← Back to team overview

data-platform team mailing list archive

[Merge] soss/+source/charmed-kafka:dpe-6097-3.6.1-ubuntu1 into soss/+source/charmed-kafka:main

 

Iman Enami has proposed merging soss/+source/charmed-kafka:dpe-6097-3.6.1-ubuntu1 into soss/+source/charmed-kafka:main.

Commit message:
[DPE-6097] Release 3.6.1-ubuntu1

Requested reviews:
  Canonical Data Platform (data-platform)

For more details, see:
https://code.launchpad.net/~data-platform/soss/+source/charmed-kafka/+git/charmed-kafka/+merge/477808

Added [KAFKA-15513] patch
-- 
The attached diff has been truncated due to its size.
Your team Canonical Data Platform is requested to review the proposed merge of soss/+source/charmed-kafka:dpe-6097-3.6.1-ubuntu1 into soss/+source/charmed-kafka:main.
diff --git a/.asf.yaml b/.asf.yaml
new file mode 100644
index 0000000..13dc326
--- /dev/null
+++ b/.asf.yaml
@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+notifications:
+  commits:      commits@xxxxxxxxxxxxxxxx
+  issues:       jira@xxxxxxxxxxxxxxxx
+  pullrequests: jira@xxxxxxxxxxxxxxxx
+  jira_options: link label
+
+# This list allows you to trigger builds on pull requests. It can have a maximum of 10 people.
+# https://cwiki.apache.org/confluence/pages/viewpage.action?spaceKey=INFRA&title=Git+-+.asf.yaml+features#Git.asf.yamlfeatures-JenkinsPRwhitelisting
+jenkins:
+  github_whitelist:
+    - gharris1727
+    - vcrfxia
+    - divijvaidya
+    - lucasbru
+    - yashmayya
+    - philipnee
+    - vamossagar12
+    - clolov
+    - fvaleri
+    - andymg3
+
+# This list allows you to triage pull requests. It can have a maximum of 10 people.
+# https://cwiki.apache.org/confluence/pages/viewpage.action?spaceKey=INFRA&title=Git+-+.asf.yaml+features#Git.asf.yamlfeatures-AssigningexternalcollaboratorswiththetriageroleonGitHub
+github:
+  collaborators:
+    - gharris1727
+    - vcrfxia
+    - divijvaidya
+    - lucasbru
+    - yashmayya
+    - philipnee
+    - vamossagar12
+    - clolov
+    - fvaleri
+    - andymg3
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 0000000..fbf0d0d
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+name: 'Handle stale PRs'
+on:
+  schedule:
+    - cron: '30 3 * * *'    # Run at 3:30 UTC nightly
+  workflow_dispatch:
+    inputs:
+      dryRun:
+        description: 'Dry Run'
+        required: true
+        default: true
+        type: boolean
+      operationsPerRun:
+        description: 'Max GitHub API operations'
+        required: true
+        default: 30
+        type: number
+
+permissions:
+  issues: write
+  pull-requests: write
+
+jobs:
+  stale:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/stale@v8
+        with:
+          debug-only: ${{ inputs.dryRun || false }}
+          operations-per-run: ${{ inputs.operationsPerRun || 100 }}
+          days-before-stale: 90
+          days-before-close: -1
+          stale-pr-label: 'stale'
+          stale-pr-message: >
+            This PR is being marked as stale since it has not had any activity in 90 days. If you
+            would like to keep this PR alive, please ask a committer for review. If the PR has 
+            merge conflicts, please update it with the latest from trunk (or appropriate release branch)
+            <p>
+            If this PR is no longer valid or desired, please feel free to close it. If no activity
+            occurrs in the next 30 days, it will be automatically closed.
+          
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..f466af2
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,61 @@
+dist
+*classes
+*.class
+target/
+build/
+build_eclipse/
+out/
+.gradle/
+.vscode/
+lib_managed/
+src_managed/
+project/boot/
+project/plugins/project/
+patch-process/*
+.idea
+.svn
+.classpath
+/.metadata
+/.recommenders
+*~
+*#
+.#*
+rat.out
+TAGS
+*.iml
+.project
+.settings
+*.ipr
+*.iws
+.vagrant
+Vagrantfile.local
+/logs
+.DS_Store
+
+config/server-*
+config/zookeeper-*
+core/data/*
+gradle/wrapper/*.jar
+gradlew.bat
+
+results
+tests/results
+.ducktape
+tests/.ducktape
+tests/venv
+.cache
+
+docs/generated/
+
+.release-settings.json
+
+kafkatest.egg-info/
+systest/
+*.swp
+jmh-benchmarks/generated
+jmh-benchmarks/src/main/generated
+**/.jqwik-database
+**/src/generated
+**/src/generated-test
+
+storage/kafka-tiered-storage/
diff --git a/.launchpad.yaml b/.launchpad.yaml
new file mode 100644
index 0000000..a08b665
--- /dev/null
+++ b/.launchpad.yaml
@@ -0,0 +1,260 @@
+pipeline:
+  - prepare
+  - build
+  - tests
+  - release
+
+jobs:
+  prepare:
+    series: jammy
+    architectures: amd64
+    environment:
+      BASE_BUILD_DIR: /home/kafka/kafka-build
+      ARTIFACTORY_BUILDING_URL: "https://canonical.jfrog.io/artifactory/dataplatform-kafka/";
+      ARTIFACTORY_STAGING_URL: "https://canonical.jfrog.io/artifactory/dataplatform-kafka-staging/";
+    packages:
+      - git
+    run: |-
+      echo -e "##################################"
+      echo -e "PREPARE"
+      echo -e "##################################"
+      
+      # try to read branch name (works only locally)
+      BRANCH_NAME=$(git branch --show-current)
+      COMMIT_ID=$(git rev-parse HEAD)
+      
+      # check if branch name is valid
+      if [ -z "$BRANCH_NAME" ]
+      then
+        # get branch revision id from git HEAD file
+        echo "No branch name given from git command! Try to get it from .git folder"
+        git_rev=$(cat .git/HEAD)
+        while read line; do
+          current_rev=$( echo $line | awk -F ' ' '{print $1}' )
+          branch_name=$( echo $line | awk -F ' ' '{print $2}' | awk -F '/' '{print $NF}' )
+          if [[ $current_rev = $git_rev ]]
+          then
+            export BRANCH_NAME=$branch_name
+          fi
+        done < .git/packed-refs
+      fi
+            
+      # create kafka-build
+      mkdir -p ${BASE_BUILD_DIR}
+      
+      # move current project to the home dir 
+      cp -r * "${BASE_BUILD_DIR}/."
+      
+      cd ${BASE_BUILD_DIR}
+      
+      # create release name
+      BUILD_VERSION=$(date '+%Y%m%d%H%M%S')
+      KAFKA_VERSION=$(cat gradle.properties | grep version\= | cut -d "=" -f2)
+      if [ ! -f "./lp/PATCH_VERSION" ]; 
+      then 
+        CANONICAL_PATCH_VERSION="ubuntu0"
+      else
+        CANONICAL_PATCH_VERSION=$(cat ./lp/PATCH_VERSION)
+      fi
+      VERSION="$KAFKA_VERSION-$CANONICAL_PATCH_VERSION"
+           
+      [ ! -z "$BRANCH_NAME" ] && echo "Current branch: $BRANCH_NAME"
+      if [ -z "$RELEASE" ]; then
+        if [[ "$BRANCH_NAME" != "lp-"* ]]; then
+          RELEASE=false
+        else
+          RELEASE=true
+        fi
+      fi
+
+      mkdir -p logs
+      METADATA_FILE=${BASE_BUILD_DIR}/logs/metadata.yaml
+      echo -e "version: $VERSION\nrelease: $RELEASE\nbranch: $BRANCH_NAME\ncommit_id: $COMMIT_ID\nbuild: $BUILD_VERSION" > $METADATA_FILE
+      
+      echo "********"
+      echo "Metadata"
+      echo "********"
+      cat $METADATA_FILE
+      
+      # copy gradle repo conf
+      mkdir -p ~/.gradle; cp ./lp/init.gradle ~/.gradle
+
+      CREDENTIALS_FILE="credentials.yaml"
+
+      if [[ "$RELEASE" = "true" ]]; then
+        export ARTIFACTORY_URL="${ARTIFACTORY_BUILDING_URL}"
+      else
+        export ARTIFACTORY_URL="${ARTIFACTORY_STAGING_URL}"
+      fi
+            
+      echo "Selected artifactory: $ARTIFACTORY_URL"
+      echo "Release artifact: $RELEASE"
+      
+      # check artifactory credentials
+      [ -z "PIP_INDEX_URL" ] && exit 1 
+      [ ! -z "$PIP_INDEX_URL" ] && echo "Env variable exists :) "
+      [ ! -z "$PIP_INDEX_URL" ] && export ARTIFACTORY_USER=$(echo "${PIP_INDEX_URL#https://}"; | awk -F '@' '{print $1}' | awk -F ':' '{print $1}')
+      [ ! -z "$PIP_INDEX_URL" ] && export ARTIFACTORY_PASSWORD=$(echo "${PIP_INDEX_URL#https://}"; | awk -F '@' '{print $1}' | awk -F ':' '{print $2}')
+
+      echo -e "url: ${ARTIFACTORY_URL}\nuser: ${ARTIFACTORY_USER}\npassword: ${ARTIFACTORY_PASSWORD}" > $CREDENTIALS_FILE
+
+  build:
+    series: jammy
+    architectures: amd64
+    environment:
+      BASE_BUILD_DIR: /home/kafka/kafka-build
+      HTTPS_PROXY: 10.10.10.1:8222
+      HTTP_PROXY: 10.10.10.1:8222
+      JAVA_OPTS: "-Dhttp.proxyHost=10.10.10.1 -Dhttp.proxyPort=8222 -Dhttps.proxyHost=10.10.10.1 -Dhttps.proxyPort=8222 -Dhttp.nonProxyHosts=localhost"
+      NO_PROXY: localhost
+    packages:
+      - build-essential
+      - wget
+      - openjdk-18-jdk-headless
+      - tcpdump
+    snaps:
+      - name: yq
+        classic: true
+    run: |-
+      echo -e "##################################"
+      echo -e "BUILD"
+      echo -e "##################################"
+
+      METADATA_FILE=${BASE_BUILD_DIR}/logs/metadata.yaml
+      CREDENTIALS_FILE=${BASE_BUILD_DIR}/credentials.yaml
+
+      VERSION=$(cat $METADATA_FILE | yq .version)
+      RELEASE=$(cat $METADATA_FILE | yq .release)
+      BRANCH=$(cat $METADATA_FILE | yq .branch)
+      COMMIT_ID=$(cat $METADATA_FILE | yq .commit_id)
+      
+      export ARTIFACTORY_URL=$(cat $CREDENTIALS_FILE | yq .url)
+      export ARTIFACTORY_USER=$(cat $CREDENTIALS_FILE | yq .user)
+      export ARTIFACTORY_PASSWORD=$(cat $CREDENTIALS_FILE | yq .password)
+      
+      echo "ARTIFACTORY USERNAME: ${ARTIFACTORY_USER}"
+      
+      export GRADLE_OPTS="-Dgradle.wrapperUser=${ARTIFACTORY_USER} -Dgradle.wrapperPassword=${ARTIFACTORY_PASSWORD}"
+
+      if [ -n "$MODE" ] &&  [ "$MODE" = "dev" ];
+      then 
+        export JAVA_OPTS=""
+        tcpdump -U -i any -w ${BASE_BUILD_DIR}/logs/dump.pcap &   
+        sleep 5
+        TCPDUMP_PID=$(ps -e | pgrep tcpdump)  
+        echo "TCPDUMP RUNNING AT: ${TCPDUMP_PID}"  
+      fi 
+    
+      mkdir -p ~/.m2/repository  
+          
+      EXTRA_ARGS="-Pversion=${VERSION} -PcommitId=${COMMIT_ID} -PskipSigning=true -PmavenUrl=file:///root/.m2/repository releaseTarGz publishToMavenLocal"    
+      
+      export _JAVA_OPTIONS=$JAVA_OPTS            
+      pushd ${BASE_BUILD_DIR} && ./gradlew $EXTRA_ARGS && popd
+
+      mv ~/.m2 ${BASE_BUILD_DIR}/.
+      
+      if [ -n "$TCPDUMP_PID" ];
+      then
+        sleep 5
+        echo "Killing tcpdump process: ${TCPDUMP_PID}"
+        kill -9 "${TCPDUMP_PID}"
+      fi
+
+  tests:
+    series: jammy
+    architectures: amd64
+    environment:
+      BASE_BUILD_DIR: /home/kafka/kafka-build
+      HTTPS_PROXY: 10.10.10.1:8222
+      HTTP_PROXY: 10.10.10.1:8222
+      JAVA_OPTS: "-Dhttp.proxyHost=10.10.10.1 -Dhttp.proxyPort=8222 -Dhttps.proxyHost=10.10.10.1 -Dhttps.proxyPort=8222 -Dhttp.nonProxyHosts=localhost"
+      NO_PROXY: localhost
+    packages:
+      - build-essential
+      - wget
+      - openjdk-18-jdk-headless
+    snaps:
+      - name: yq
+        classic: true
+    run: |-
+      echo -e "##################################"
+      echo -e "TESTS"
+      echo -e "##################################"
+
+      METADATA_FILE=${BASE_BUILD_DIR}/logs/metadata.yaml
+      CREDENTIALS_FILE=${BASE_BUILD_DIR}/credentials.yaml
+
+      VERSION=$(cat $METADATA_FILE | yq .version)
+      RELEASE=$(cat $METADATA_FILE | yq .release)
+      BRANCH=$(cat $METADATA_FILE | yq .branch)
+      COMMIT_ID=$(cat $METADATA_FILE | yq .commit_id)
+
+      export ARTIFACTORY_URL=$(cat $CREDENTIALS_FILE | yq .url)
+      export ARTIFACTORY_USER=$(cat $CREDENTIALS_FILE | yq .user)
+      export ARTIFACTORY_PASSWORD=$(cat $CREDENTIALS_FILE | yq .password)
+      
+      echo "ARTIFACTORY USERNAME: ${ARTIFACTORY_USER}"
+      
+      export GRADLE_OPTS="-Dgradle.wrapperUser=${ARTIFACTORY_USER} -Dgradle.wrapperPassword=${ARTIFACTORY_PASSWORD}"
+
+      if [ -n "$MODE" ] &&  [ "$MODE" = "dev" ];
+      then 
+        export JAVA_OPTS=""
+      fi
+
+      EXTRA_ARGS="-Pversion=${VERSION} -PcommitId=${COMMIT_ID} test"    
+      
+      export _JAVA_OPTIONS=$JAVA_OPTS            
+      # OUTCOME=$( (pushd ${BASE_BUILD_DIR} && ./gradlew $EXTRA_ARGS && popd | echo "success") || echo "failed" )
+      
+      echo "Tests outcome: $OUTCOME"
+
+  release:
+    series: jammy
+    architectures: amd64
+    environment:
+      BASE_BUILD_DIR: /home/kafka/kafka-build
+    packages:
+      - zip
+    run: |-
+      echo -e "##################################"
+      echo -e "PACKAGE RELEASE ARTIFACTS"
+      echo -e "##################################"
+
+      METADATA_FILE=${BASE_BUILD_DIR}/logs/metadata.yaml
+
+      VERSION=$(cat $METADATA_FILE | yq .version)
+      BUILD_VERSION=$(cat $METADATA_FILE | yq .build)
+
+      # copy the tarball to the launchpad build directory
+      cp ${BASE_BUILD_DIR}/core/build/distributions/*${VERSION}.tgz ./
+      
+      # compute the checksum
+      FILE=$(find . -maxdepth 1 -regex "./kafka_[0-9\.-]+\-${VERSION}.tgz" | tail -n1 )
+      TARBALL="${FILE%.*}-${BUILD_VERSION}.${FILE##*.}"
+      mv $FILE $TARBALL
+      
+      echo "Distribution: ${TARBALL}"
+      sha512sum "${TARBALL}" > "${TARBALL}.sha512"
+      
+      PROJECT_ROOT="${PWD}"
+
+      # .m2 repos
+      pushd $BASE_BUILD_DIR/.m2
+      zip -r repository.zip repository
+      mv repository.zip ${PROJECT_ROOT}/.
+      popd
+      
+      # Logs 
+      pushd $BASE_BUILD_DIR
+      zip -r logs.zip logs
+      mv logs.zip ${PROJECT_ROOT}/logs_$VERSION.zip
+      popd
+
+    output:
+      paths:
+        - kafka_*.tgz
+        - kafka_*.tgz.sha512
+        - repository.zip
+        - logs_*.zip
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..07fd857
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,11 @@
+## Contributing to Kafka
+
+*Before opening a pull request*, review the [Contributing](https://kafka.apache.org/contributing.html) and [Contributing Code Changes](https://cwiki.apache.org/confluence/display/KAFKA/Contributing+Code+Changes) pages.
+
+It lists steps that are required before creating a PR.
+
+When you contribute code, you affirm that the contribution is your original work and that you
+license the work to the project under the project's open source license. Whether or not you
+state this explicitly, by submitting any copyrighted material via pull request, email, or
+other means you agree to license the material under the project's open source license and
+warrant that you have the legal authority to do so.
diff --git a/HEADER b/HEADER
new file mode 100644
index 0000000..8853bce
--- /dev/null
+++ b/HEADER
@@ -0,0 +1,14 @@
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 0000000..d5d8909
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,195 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+def doValidation() {
+  // Run all the tasks associated with `check` except for `test` - the latter is executed via `doTest`
+  sh """
+    ./retry_zinc ./gradlew -PscalaVersion=$SCALA_VERSION clean check -x test \
+        --profile --continue -PxmlSpotBugsReport=true -PkeepAliveMode="session"
+  """
+}
+
+def isChangeRequest(env) {
+  env.CHANGE_ID != null && !env.CHANGE_ID.isEmpty()
+}
+
+def doTest(env, target = "test") {
+  sh """./gradlew -PscalaVersion=$SCALA_VERSION ${target} \
+      --profile --continue -PkeepAliveMode="session" -PtestLoggingEvents=started,passed,skipped,failed \
+      -PignoreFailures=true -PmaxParallelForks=2 -PmaxTestRetries=1 -PmaxTestRetryFailures=10"""
+  junit '**/build/test-results/**/TEST-*.xml'
+}
+
+def doStreamsArchetype() {
+  echo 'Verify that Kafka Streams archetype compiles'
+
+  sh '''
+    ./gradlew streams:publishToMavenLocal clients:publishToMavenLocal connect:json:publishToMavenLocal connect:api:publishToMavenLocal \
+         || { echo 'Could not publish kafka-streams.jar (and dependencies) locally to Maven'; exit 1; }
+  '''
+
+  VERSION = sh(script: 'grep "^version=" gradle.properties | cut -d= -f 2', returnStdout: true).trim()
+
+  dir('streams/quickstart') {
+    sh '''
+      mvn clean install -Dgpg.skip  \
+          || { echo 'Could not `mvn install` streams quickstart archetype'; exit 1; }
+    '''
+
+    dir('test-streams-archetype') {
+      // Note the double quotes for variable interpolation
+      sh """ 
+        echo "Y" | mvn archetype:generate \
+            -DarchetypeCatalog=local \
+            -DarchetypeGroupId=org.apache.kafka \
+            -DarchetypeArtifactId=streams-quickstart-java \
+            -DarchetypeVersion=${VERSION} \
+            -DgroupId=streams.examples \
+            -DartifactId=streams.examples \
+            -Dversion=0.1 \
+            -Dpackage=myapps \
+            || { echo 'Could not create new project using streams quickstart archetype'; exit 1; }
+      """
+
+      dir('streams.examples') {
+        sh '''
+          mvn compile \
+              || { echo 'Could not compile streams quickstart archetype project'; exit 1; }
+        '''
+      }
+    }
+  }
+}
+
+def tryStreamsArchetype() {
+  try {
+    doStreamsArchetype()
+  } catch(err) {
+    echo 'Failed to build Kafka Streams archetype, marking this build UNSTABLE'
+    currentBuild.result = 'UNSTABLE'
+  }
+}
+
+
+pipeline {
+  agent none
+  
+  options {
+    disableConcurrentBuilds(abortPrevious: isChangeRequest(env))
+  }
+  
+  stages {
+    stage('Build') {
+      parallel {
+
+        stage('JDK 8 and Scala 2.12') {
+          agent { label 'ubuntu' }
+          tools {
+            jdk 'jdk_1.8_latest'
+            maven 'maven_3_latest'
+          }
+          options {
+            timeout(time: 8, unit: 'HOURS') 
+            timestamps()
+          }
+          environment {
+            SCALA_VERSION=2.12
+          }
+          steps {
+            doValidation()
+            doTest(env)
+            tryStreamsArchetype()
+          }
+        }
+
+        stage('JDK 11 and Scala 2.13') {
+          agent { label 'ubuntu' }
+          tools {
+            jdk 'jdk_11_latest'
+          }
+          options {
+            timeout(time: 8, unit: 'HOURS') 
+            timestamps()
+          }
+          environment {
+            SCALA_VERSION=2.13
+          }
+          steps {
+            doValidation()
+            doTest(env)
+            echo 'Skipping Kafka Streams archetype test for Java 11'
+          }
+        }
+
+        stage('JDK 17 and Scala 2.13') {
+          agent { label 'ubuntu' }
+          tools {
+            jdk 'jdk_17_latest'
+          }
+          options {
+            timeout(time: 8, unit: 'HOURS') 
+            timestamps()
+          }
+          environment {
+            SCALA_VERSION=2.13
+          }
+          steps {
+            doValidation()
+            doTest(env)
+            echo 'Skipping Kafka Streams archetype test for Java 17'
+          }
+        }
+
+        stage('JDK 20 and Scala 2.13') {
+          agent { label 'ubuntu' }
+          tools {
+            jdk 'jdk_20_latest'
+          }
+          options {
+            timeout(time: 8, unit: 'HOURS')
+            timestamps()
+          }
+          environment {
+            SCALA_VERSION=2.13
+          }
+          steps {
+            doValidation()
+            doTest(env)
+            echo 'Skipping Kafka Streams archetype test for Java 20'
+          }
+        }
+      }
+    }
+  }
+  
+  post {
+    always {
+      script {
+        if (!isChangeRequest(env)) {
+          node('ubuntu') {
+            step([$class: 'Mailer',
+                 notifyEveryUnstableBuild: true,
+                 recipients: "dev@xxxxxxxxxxxxxxxx",
+                 sendToIndividuals: false])
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/LICENSE-binary b/LICENSE-binary
new file mode 100644
index 0000000..58ade0e
--- /dev/null
+++ b/LICENSE-binary
@@ -0,0 +1,337 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+-------------------------------------------------------------------------------
+This project bundles some components that are also licensed under the Apache
+License Version 2.0:
+
+audience-annotations-0.12.0
+caffeine-2.9.3
+commons-beanutils-1.9.4
+commons-cli-1.4
+commons-collections-3.2.2
+commons-digester-2.1
+commons-io-2.11.0
+commons-lang3-3.8.1
+commons-logging-1.2
+commons-validator-1.7
+error_prone_annotations-2.10.0
+jackson-annotations-2.13.5
+jackson-core-2.13.5
+jackson-databind-2.13.5
+jackson-dataformat-csv-2.13.5
+jackson-datatype-jdk8-2.13.5
+jackson-jaxrs-base-2.13.5
+jackson-jaxrs-json-provider-2.13.5
+jackson-module-jaxb-annotations-2.13.5
+jackson-module-scala_2.13-2.13.5
+jackson-module-scala_2.12-2.13.5
+jakarta.validation-api-2.0.2
+javassist-3.29.2-GA
+jetty-client-9.4.52.v20230823
+jetty-continuation-9.4.52.v20230823
+jetty-http-9.4.52.v20230823
+jetty-io-9.4.52.v20230823
+jetty-security-9.4.52.v20230823
+jetty-server-9.4.52.v20230823
+jetty-servlet-9.4.52.v20230823
+jetty-servlets-9.4.52.v20230823
+jetty-util-9.4.52.v20230823
+jetty-util-ajax-9.4.52.v20230823
+jose4j-0.9.3
+lz4-java-1.8.0
+maven-artifact-3.8.8
+metrics-core-4.1.12.1
+metrics-core-2.2.0
+netty-buffer-4.1.100.Final
+netty-codec-4.1.100.Final
+netty-common-4.1.100.Final
+netty-handler-4.1.100.Final
+netty-resolver-4.1.100.Final
+netty-transport-4.1.100.Final
+netty-transport-classes-epoll-4.1.100.Final
+netty-transport-native-epoll-4.1.100.Final
+netty-transport-native-unix-common-4.1.100.Final
+plexus-utils-3.3.1
+reflections-0.10.2
+reload4j-1.2.25
+rocksdbjni-7.9.2
+scala-collection-compat_2.13-2.10.0
+scala-library-2.13.11
+scala-logging_2.13-3.9.4
+scala-reflect-2.13.11
+scala-java8-compat_2.13-1.0.2
+snappy-java-1.1.10.5
+swagger-annotations-2.2.8
+zookeeper-3.8.3
+zookeeper-jute-3.8.3
+
+===============================================================================
+This product bundles various third-party components under other open source
+licenses. This section summarizes those components and their licenses.
+See licenses/ for text of these licenses.
+
+---------------------------------------
+Eclipse Distribution License - v 1.0
+see: licenses/eclipse-distribution-license-1.0
+
+jakarta.activation-api-1.2.2
+jakarta.xml.bind-api-2.3.3
+
+---------------------------------------
+Eclipse Public License - v 2.0
+see: licenses/eclipse-public-license-2.0
+
+jakarta.annotation-api-1.3.5
+jakarta.ws.rs-api-2.1.6
+hk2-api-2.6.1
+hk2-locator-2.6.1
+hk2-utils-2.6.1
+osgi-resource-locator-1.0.3
+aopalliance-repackaged-2.6.1
+jakarta.inject-2.6.1
+jersey-client-2.39.1
+jersey-common-2.39.1
+jersey-container-servlet-2.39.1
+jersey-container-servlet-core-2.39.1
+jersey-hk2-2.39.1
+jersey-server-2.39.1
+
+---------------------------------------
+CDDL 1.1 + GPLv2 with classpath exception
+see: licenses/CDDL+GPL-1.1
+
+javax.activation-api-1.2.0
+javax.annotation-api-1.3.2
+javax.servlet-api-3.1.0
+javax.ws.rs-api-2.1.1
+jaxb-api-2.3.1
+activation-1.1.1
+
+---------------------------------------
+MIT License
+
+argparse4j-0.7.0, see: licenses/argparse-MIT
+checker-qual-3.19.0, see: licenses/checker-qual-MIT
+jopt-simple-5.0.4, see: licenses/jopt-simple-MIT
+slf4j-api-1.7.36, see: licenses/slf4j-MIT
+slf4j-reload4j-1.7.36, see: licenses/slf4j-MIT
+pcollections-4.0.1, see: licenses/pcollections-MIT
+
+---------------------------------------
+BSD 2-Clause
+
+zstd-jni-1.5.5-1 see: licenses/zstd-jni-BSD-2-clause
+
+---------------------------------------
+BSD 3-Clause
+
+jline-3.22.0, see: licenses/jline-BSD-3-clause
+jsr305-3.0.2, see: licenses/jsr305-BSD-3-clause
+paranamer-2.8, see: licenses/paranamer-BSD-3-clause
+
+---------------------------------------
+Do What The F*ck You Want To Public License
+see: licenses/DWTFYWTPL
+
+reflections-0.10.2
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000..230cf75
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,23 @@
+Apache Kafka
+Copyright 2023 The Apache Software Foundation.
+
+This product includes software developed at
+The Apache Software Foundation (https://www.apache.org/).
+
+This distribution has a binary dependency on jersey, which is available under the CDDL
+License. The source code of jersey can be found at https://github.com/jersey/jersey/.
+
+This distribution has a binary test dependency on jqwik, which is available under
+the Eclipse Public License 2.0. The source code can be found at
+https://github.com/jlink/jqwik.
+
+The streams-scala (streams/streams-scala) module was donated by Lightbend and the original code was copyrighted by them:
+Copyright (C) 2018 Lightbend Inc. <https://www.lightbend.com>
+Copyright (C) 2017-2018 Alexis Seigneurin.
+
+This project contains the following code copied from Apache Hadoop:
+clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java
+Some portions of this file Copyright (c) 2004-2006 Intel Corporation and licensed under the BSD license.
+
+This project contains the following code copied from Apache Hive:
+streams/src/main/java/org/apache/kafka/streams/state/internals/Murmur3.java
\ No newline at end of file
diff --git a/NOTICE-binary b/NOTICE-binary
new file mode 100644
index 0000000..a50c86d
--- /dev/null
+++ b/NOTICE-binary
@@ -0,0 +1,856 @@
+Apache Kafka
+Copyright 2021 The Apache Software Foundation.
+
+This product includes software developed at
+The Apache Software Foundation (https://www.apache.org/).
+
+This distribution has a binary dependency on jersey, which is available under the CDDL
+License. The source code of jersey can be found at https://github.com/jersey/jersey/.
+
+This distribution has a binary test dependency on jqwik, which is available under
+the Eclipse Public License 2.0. The source code can be found at
+https://github.com/jlink/jqwik.
+
+The streams-scala (streams/streams-scala) module was donated by Lightbend and the original code was copyrighted by them:
+Copyright (C) 2018 Lightbend Inc. <https://www.lightbend.com>
+Copyright (C) 2017-2018 Alexis Seigneurin.
+
+This project contains the following code copied from Apache Hadoop:
+clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java
+Some portions of this file Copyright (c) 2004-2006 Intel Corporation and licensed under the BSD license.
+
+This project contains the following code copied from Apache Hive:
+streams/src/main/java/org/apache/kafka/streams/state/internals/Murmur3.java
+
+// ------------------------------------------------------------------
+// NOTICE file corresponding to the section 4d of The Apache License,
+// Version 2.0, in this case for
+// ------------------------------------------------------------------
+
+# Notices for Eclipse GlassFish
+
+This content is produced and maintained by the Eclipse GlassFish project.
+
+* Project home: https://projects.eclipse.org/projects/ee4j.glassfish
+
+## Trademarks
+
+Eclipse GlassFish, and GlassFish are trademarks of the Eclipse Foundation.
+
+## Copyright
+
+All content is the property of the respective authors or their employers. For
+more information regarding authorship of content, please consult the listed
+source code repository logs.
+
+## Declared Project Licenses
+
+This program and the accompanying materials are made available under the terms
+of the Eclipse Public License v. 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0. This Source Code may also be made
+available under the following Secondary Licenses when the conditions for such
+availability set forth in the Eclipse Public License v. 2.0 are satisfied: GNU
+General Public License, version 2 with the GNU Classpath Exception which is
+available at https://www.gnu.org/software/classpath/license.html.
+
+SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0
+
+## Source Code
+
+The project maintains the following source code repositories:
+
+* https://github.com/eclipse-ee4j/glassfish-ha-api
+* https://github.com/eclipse-ee4j/glassfish-logging-annotation-processor
+* https://github.com/eclipse-ee4j/glassfish-shoal
+* https://github.com/eclipse-ee4j/glassfish-cdi-porting-tck
+* https://github.com/eclipse-ee4j/glassfish-jsftemplating
+* https://github.com/eclipse-ee4j/glassfish-hk2-extra
+* https://github.com/eclipse-ee4j/glassfish-hk2
+* https://github.com/eclipse-ee4j/glassfish-fighterfish
+
+## Third-party Content
+
+This project leverages the following third party content.
+
+None
+
+## Cryptography
+
+Content may contain encryption software. The country in which you are currently
+may have restrictions on the import, possession, and use, and/or re-export to
+another country, of encryption software. BEFORE using any encryption software,
+please check the country's laws, regulations and policies concerning the import,
+possession, or use, and re-export of encryption software, to see if this is
+permitted.
+
+
+Apache Yetus - Audience Annotations
+Copyright 2015-2017 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+
+Apache Commons CLI
+Copyright 2001-2017 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+
+Apache Commons Lang
+Copyright 2001-2018 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+
+# Jackson JSON processor
+
+Jackson is a high-performance, Free/Open Source JSON processing library.
+It was originally written by Tatu Saloranta (tatu.saloranta@xxxxxx), and has
+been in development since 2007.
+It is currently developed by a community of developers, as well as supported
+commercially by FasterXML.com.
+
+## Licensing
+
+Jackson core and extension components may licensed under different licenses.
+To find the details that apply to this artifact see the accompanying LICENSE file.
+For more information, including possible other licensing options, contact
+FasterXML.com (http://fasterxml.com).
+
+## Credits
+
+A list of contributors may be found from CREDITS file, which is included
+in some artifacts (usually source distributions); but is always available
+from the source code management (SCM) system project uses.
+
+
+# Notices for Eclipse Project for JAF
+
+This content is produced and maintained by the Eclipse Project for JAF project.
+
+* Project home: https://projects.eclipse.org/projects/ee4j.jaf
+
+## Copyright
+
+All content is the property of the respective authors or their employers. For
+more information regarding authorship of content, please consult the listed
+source code repository logs.
+
+## Declared Project Licenses
+
+This program and the accompanying materials are made available under the terms
+of the Eclipse Distribution License v. 1.0,
+which is available at http://www.eclipse.org/org/documents/edl-v10.php.
+
+SPDX-License-Identifier: BSD-3-Clause
+
+## Source Code
+
+The project maintains the following source code repositories:
+
+* https://github.com/eclipse-ee4j/jaf
+
+## Third-party Content
+
+This project leverages the following third party content.
+
+JUnit (4.12)
+
+* License: Eclipse Public License
+
+
+# Notices for Jakarta Annotations
+
+This content is produced and maintained by the Jakarta Annotations project.
+
+ * Project home: https://projects.eclipse.org/projects/ee4j.ca
+
+## Trademarks
+
+Jakarta Annotations is a trademark of the Eclipse Foundation.
+
+## Declared Project Licenses
+
+This program and the accompanying materials are made available under the terms
+of the Eclipse Public License v. 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0. This Source Code may also be made
+available under the following Secondary Licenses when the conditions for such
+availability set forth in the Eclipse Public License v. 2.0 are satisfied: GNU
+General Public License, version 2 with the GNU Classpath Exception which is
+available at https://www.gnu.org/software/classpath/license.html.
+
+SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0
+
+## Source Code
+
+The project maintains the following source code repositories:
+
+ * https://github.com/eclipse-ee4j/common-annotations-api
+
+## Third-party Content
+
+## Cryptography
+
+Content may contain encryption software. The country in which you are currently
+may have restrictions on the import, possession, and use, and/or re-export to
+another country, of encryption software. BEFORE using any encryption software,
+please check the country's laws, regulations and policies concerning the import,
+possession, or use, and re-export of encryption software, to see if this is
+permitted.
+
+
+# Notices for the Jakarta RESTful Web Services Project
+
+This content is produced and maintained by the **Jakarta RESTful Web Services**
+project.
+
+* Project home: https://projects.eclipse.org/projects/ee4j.jaxrs
+
+## Trademarks
+
+**Jakarta RESTful Web Services** is a trademark of the Eclipse Foundation.
+
+## Copyright
+
+All content is the property of the respective authors or their employers. For
+more information regarding authorship of content, please consult the listed
+source code repository logs.
+
+## Declared Project Licenses
+
+This program and the accompanying materials are made available under the terms
+of the Eclipse Public License v. 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0. This Source Code may also be made
+available under the following Secondary Licenses when the conditions for such
+availability set forth in the Eclipse Public License v. 2.0 are satisfied: GNU
+General Public License, version 2 with the GNU Classpath Exception which is
+available at https://www.gnu.org/software/classpath/license.html.
+
+SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0
+
+## Source Code
+
+The project maintains the following source code repositories:
+
+* https://github.com/eclipse-ee4j/jaxrs-api
+
+## Third-party Content
+
+This project leverages the following third party content.
+
+javaee-api (7.0)
+
+* License: Apache-2.0 AND W3C
+
+JUnit (4.11)
+
+* License: Common Public License 1.0
+
+Mockito (2.16.0)
+
+* Project: http://site.mockito.org
+* Source: https://github.com/mockito/mockito/releases/tag/v2.16.0
+
+## Cryptography
+
+Content may contain encryption software. The country in which you are currently
+may have restrictions on the import, possession, and use, and/or re-export to
+another country, of encryption software. BEFORE using any encryption software,
+please check the country's laws, regulations and policies concerning the import,
+possession, or use, and re-export of encryption software, to see if this is
+permitted.
+
+
+# Notices for Eclipse Project for JAXB
+
+This content is produced and maintained by the Eclipse Project for JAXB project.
+
+* Project home: https://projects.eclipse.org/projects/ee4j.jaxb
+
+## Trademarks
+
+Eclipse Project for JAXB is a trademark of the Eclipse Foundation.
+
+## Copyright
+
+All content is the property of the respective authors or their employers. For
+more information regarding authorship of content, please consult the listed
+source code repository logs.
+
+## Declared Project Licenses
+
+This program and the accompanying materials are made available under the terms
+of the Eclipse Distribution License v. 1.0 which is available
+at http://www.eclipse.org/org/documents/edl-v10.php.
+
+SPDX-License-Identifier: BSD-3-Clause
+
+## Source Code
+
+The project maintains the following source code repositories:
+
+* https://github.com/eclipse-ee4j/jaxb-api
+
+## Third-party Content
+
+This project leverages the following third party content.
+
+None
+
+## Cryptography
+
+Content may contain encryption software. The country in which you are currently
+may have restrictions on the import, possession, and use, and/or re-export to
+another country, of encryption software. BEFORE using any encryption software,
+please check the country's laws, regulations and policies concerning the import,
+possession, or use, and re-export of encryption software, to see if this is
+permitted.
+
+
+# Notice for Jersey
+This content is produced and maintained by the Eclipse Jersey project.
+
+*  Project home: https://projects.eclipse.org/projects/ee4j.jersey
+
+## Trademarks
+Eclipse Jersey is a trademark of the Eclipse Foundation.
+
+## Copyright
+
+All content is the property of the respective authors or their employers. For
+more information regarding authorship of content, please consult the listed
+source code repository logs.
+
+## Declared Project Licenses
+
+This program and the accompanying materials are made available under the terms
+of the Eclipse Public License v. 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0. This Source Code may also be made
+available under the following Secondary Licenses when the conditions for such
+availability set forth in the Eclipse Public License v. 2.0 are satisfied: GNU
+General Public License, version 2 with the GNU Classpath Exception which is
+available at https://www.gnu.org/software/classpath/license.html.
+
+SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0
+
+## Source Code
+The project maintains the following source code repositories:
+
+* https://github.com/eclipse-ee4j/jersey
+
+## Third-party Content
+
+Angular JS, v1.6.6
+* License MIT (http://www.opensource.org/licenses/mit-license.php)
+* Project: http://angularjs.org
+* Coyright: (c) 2010-2017 Google, Inc.
+
+aopalliance Version 1
+* License: all the source code provided by AOP Alliance is Public Domain.
+* Project: http://aopalliance.sourceforge.net
+* Copyright: Material in the public domain is not protected by copyright
+
+Bean Validation API 2.0.2
+* License: Apache License, 2.0
+* Project: http://beanvalidation.org/1.1/
+* Copyright: 2009, Red Hat, Inc. and/or its affiliates, and individual contributors
+* by the @authors tag.
+
+Hibernate Validator CDI, 6.1.2.Final
+* License: Apache License, 2.0
+* Project: https://beanvalidation.org/
+* Repackaged in org.glassfish.jersey.server.validation.internal.hibernate
+
+Bootstrap v3.3.7
+* License: MIT license (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+* Project: http://getbootstrap.com
+* Copyright: 2011-2016 Twitter, Inc
+
+Google Guava Version 18.0
+* License: Apache License, 2.0
+* Copyright (C) 2009 The Guava Authors
+
+javax.inject Version: 1
+* License: Apache License, 2.0
+* Copyright (C) 2009 The JSR-330 Expert Group
+
+Javassist Version 3.25.0-GA
+* License: Apache License, 2.0
+* Project: http://www.javassist.org/
+* Copyright (C) 1999- Shigeru Chiba. All Rights Reserved.
+
+Jackson JAX-RS Providers Version 2.10.1
+* License: Apache License, 2.0
+* Project: https://github.com/FasterXML/jackson-jaxrs-providers
+* Copyright: (c) 2009-2011 FasterXML, LLC. All rights reserved unless otherwise indicated.
+
+jQuery v1.12.4
+* License: jquery.org/license
+* Project: jquery.org
+* Copyright: (c) jQuery Foundation
+
+jQuery Barcode plugin 0.3
+* License: MIT & GPL (http://www.opensource.org/licenses/mit-license.php & http://www.gnu.org/licenses/gpl.html)
+* Project:  http://www.pasella.it/projects/jQuery/barcode
+* Copyright: (c) 2009 Antonello Pasella antonello.pasella@xxxxxxxxx
+
+JSR-166 Extension - JEP 266
+* License: CC0
+* No copyright
+* Written by Doug Lea with assistance from members of JCP JSR-166 Expert Group and released to the public domain, as explained at http://creativecommons.org/publicdomain/zero/1.0/
+
+KineticJS, v4.7.1
+* License: MIT license (http://www.opensource.org/licenses/mit-license.php)
+* Project: http://www.kineticjs.com, https://github.com/ericdrowell/KineticJS
+* Copyright: Eric Rowell
+
+org.objectweb.asm Version 8.0
+* License: Modified BSD (http://asm.objectweb.org/license.html)
+* Copyright (c) 2000-2011 INRIA, France Telecom. All rights reserved.
+
+org.osgi.core version 6.0.0
+* License: Apache License, 2.0
+* Copyright (c) OSGi Alliance (2005, 2008). All Rights Reserved.
+
+org.glassfish.jersey.server.internal.monitoring.core
+* License: Apache License, 2.0
+* Copyright (c) 2015-2018 Oracle and/or its affiliates. All rights reserved.
+* Copyright 2010-2013 Coda Hale and Yammer, Inc.
+
+W3.org documents
+* License: W3C License
+* Copyright: Copyright (c) 1994-2001 World Wide Web Consortium, (Massachusetts Institute of Technology, Institut National de Recherche en Informatique et en Automatique, Keio University). All Rights Reserved. http://www.w3.org/Consortium/Legal/
+
+
+==============================================================
+ Jetty Web Container
+ Copyright 1995-2018 Mort Bay Consulting Pty Ltd.
+==============================================================
+
+The Jetty Web Container is Copyright Mort Bay Consulting Pty Ltd
+unless otherwise noted.
+
+Jetty is dual licensed under both
+
+  * The Apache 2.0 License
+    http://www.apache.org/licenses/LICENSE-2.0.html
+
+      and
+
+  * The Eclipse Public 1.0 License
+    http://www.eclipse.org/legal/epl-v10.html
+
+Jetty may be distributed under either license.
+
+------
+Eclipse
+
+The following artifacts are EPL.
+ * org.eclipse.jetty.orbit:org.eclipse.jdt.core
+
+The following artifacts are EPL and ASL2.
+ * org.eclipse.jetty.orbit:javax.security.auth.message
+
+
+The following artifacts are EPL and CDDL 1.0.
+ * org.eclipse.jetty.orbit:javax.mail.glassfish
+
+
+------
+Oracle
+
+The following artifacts are CDDL + GPLv2 with classpath exception.
+https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html
+
+ * javax.servlet:javax.servlet-api
+ * javax.annotation:javax.annotation-api
+ * javax.transaction:javax.transaction-api
+ * javax.websocket:javax.websocket-api
+
+------
+Oracle OpenJDK
+
+If ALPN is used to negotiate HTTP/2 connections, then the following
+artifacts may be included in the distribution or downloaded when ALPN
+module is selected.
+
+ * java.sun.security.ssl
+
+These artifacts replace/modify OpenJDK classes.  The modififications
+are hosted at github and both modified and original are under GPL v2 with
+classpath exceptions.
+http://openjdk.java.net/legal/gplv2+ce.html
+
+
+------
+OW2
+
+The following artifacts are licensed by the OW2 Foundation according to the
+terms of http://asm.ow2.org/license.html
+
+org.ow2.asm:asm-commons
+org.ow2.asm:asm
+
+
+------
+Apache
+
+The following artifacts are ASL2 licensed.
+
+org.apache.taglibs:taglibs-standard-spec
+org.apache.taglibs:taglibs-standard-impl
+
+
+------
+MortBay
+
+The following artifacts are ASL2 licensed.  Based on selected classes from
+following Apache Tomcat jars, all ASL2 licensed.
+
+org.mortbay.jasper:apache-jsp
+  org.apache.tomcat:tomcat-jasper
+  org.apache.tomcat:tomcat-juli
+  org.apache.tomcat:tomcat-jsp-api
+  org.apache.tomcat:tomcat-el-api
+  org.apache.tomcat:tomcat-jasper-el
+  org.apache.tomcat:tomcat-api
+  org.apache.tomcat:tomcat-util-scan
+  org.apache.tomcat:tomcat-util
+
+org.mortbay.jasper:apache-el
+  org.apache.tomcat:tomcat-jasper-el
+  org.apache.tomcat:tomcat-el-api
+
+
+------
+Mortbay
+
+The following artifacts are CDDL + GPLv2 with classpath exception.
+
+https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html
+
+org.eclipse.jetty.toolchain:jetty-schemas
+
+------
+Assorted
+
+The UnixCrypt.java code implements the one way cryptography used by
+Unix systems for simple password protection.  Copyright 1996 Aki Yoshida,
+modified April 2001  by Iris Van den Broeke, Daniel Deville.
+Permission to use, copy, modify and distribute UnixCrypt
+for non-commercial or commercial purposes and without fee is
+granted provided that the copyright notice appears in all copies.
+
+
+Apache log4j
+Copyright 2007 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+
+Maven Artifact
+Copyright 2001-2019 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+
+This product includes software developed by the Indiana University
+  Extreme! Lab (http://www.extreme.indiana.edu/).
+
+This product includes software developed by
+The Apache Software Foundation (http://www.apache.org/).
+
+This product includes software developed by
+ThoughtWorks (http://www.thoughtworks.com).
+
+This product includes software developed by
+javolution (http://javolution.org/).
+
+This product includes software developed by
+Rome (https://rome.dev.java.net/).
+
+
+Scala
+Copyright (c) 2002-2020 EPFL
+Copyright (c) 2011-2020 Lightbend, Inc.
+
+Scala includes software developed at
+LAMP/EPFL (https://lamp.epfl.ch/) and
+Lightbend, Inc. (https://www.lightbend.com/).
+
+Licensed under the Apache License, Version 2.0 (the "License").
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+This software includes projects with other licenses -- see `doc/LICENSE.md`.
+
+
+Apache ZooKeeper - Server
+Copyright 2008-2021 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+
+Apache ZooKeeper - Jute
+Copyright 2008-2021 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+
+The Netty Project
+                            =================
+
+Please visit the Netty web site for more information:
+
+  * https://netty.io/
+
+Copyright 2014 The Netty Project
+
+The Netty Project licenses this file to you under the Apache License,
+version 2.0 (the "License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at:
+
+  https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations
+under the License.
+
+Also, please refer to each LICENSE.<component>.txt file, which is located in
+the 'license' directory of the distribution file, for the license terms of the
+components that this product depends on.
+
+-------------------------------------------------------------------------------
+This product contains the extensions to Java Collections Framework which has
+been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
+
+  * LICENSE:
+    * license/LICENSE.jsr166y.txt (Public Domain)
+  * HOMEPAGE:
+    * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
+    * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
+
+This product contains a modified version of Robert Harder's Public Domain
+Base64 Encoder and Decoder, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.base64.txt (Public Domain)
+  * HOMEPAGE:
+    * http://iharder.sourceforge.net/current/java/base64/
+
+This product contains a modified portion of 'Webbit', an event based
+WebSocket and HTTP server, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.webbit.txt (BSD License)
+  * HOMEPAGE:
+    * https://github.com/joewalnes/webbit
+
+This product contains a modified portion of 'SLF4J', a simple logging
+facade for Java, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.slf4j.txt (MIT License)
+  * HOMEPAGE:
+    * https://www.slf4j.org/
+
+This product contains a modified portion of 'Apache Harmony', an open source
+Java SE, which can be obtained at:
+
+  * NOTICE:
+    * license/NOTICE.harmony.txt
+  * LICENSE:
+    * license/LICENSE.harmony.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://archive.apache.org/dist/harmony/
+
+This product contains a modified portion of 'jbzip2', a Java bzip2 compression
+and decompression library written by Matthew J. Francis. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.jbzip2.txt (MIT License)
+  * HOMEPAGE:
+    * https://code.google.com/p/jbzip2/
+
+This product contains a modified portion of 'libdivsufsort', a C API library to construct
+the suffix array and the Burrows-Wheeler transformed string for any input string of
+a constant-size alphabet written by Yuta Mori. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.libdivsufsort.txt (MIT License)
+  * HOMEPAGE:
+    * https://github.com/y-256/libdivsufsort
+
+This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
+ which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.jctools.txt (ASL2 License)
+  * HOMEPAGE:
+    * https://github.com/JCTools/JCTools
+
+This product optionally depends on 'JZlib', a re-implementation of zlib in
+pure Java, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.jzlib.txt (BSD style License)
+  * HOMEPAGE:
+    * http://www.jcraft.com/jzlib/
+
+This product optionally depends on 'Compress-LZF', a Java library for encoding and
+decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.compress-lzf.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/ning/compress
+
+This product optionally depends on 'lz4', a LZ4 Java compression
+and decompression library written by Adrien Grand. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.lz4.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/jpountz/lz4-java
+
+This product optionally depends on 'lzma-java', a LZMA Java compression
+and decompression library, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.lzma-java.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/jponge/lzma-java
+
+This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
+and decompression library written by William Kinney. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.jfastlz.txt (MIT License)
+  * HOMEPAGE:
+    * https://code.google.com/p/jfastlz/
+
+This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
+interchange format, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.protobuf.txt (New BSD License)
+  * HOMEPAGE:
+    * https://github.com/google/protobuf
+
+This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
+a temporary self-signed X.509 certificate when the JVM does not provide the
+equivalent functionality.  It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.bouncycastle.txt (MIT License)
+  * HOMEPAGE:
+    * https://www.bouncycastle.org/
+
+This product optionally depends on 'Snappy', a compression library produced
+by Google Inc, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.snappy.txt (New BSD License)
+  * HOMEPAGE:
+    * https://github.com/google/snappy
+
+This product optionally depends on 'JBoss Marshalling', an alternative Java
+serialization API, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.jboss-marshalling.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/jboss-remoting/jboss-marshalling
+
+This product optionally depends on 'Caliper', Google's micro-
+benchmarking framework, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.caliper.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/google/caliper
+
+This product optionally depends on 'Apache Commons Logging', a logging
+framework, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.commons-logging.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://commons.apache.org/logging/
+
+This product optionally depends on 'Apache Log4J', a logging framework, which
+can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.log4j.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://logging.apache.org/log4j/
+
+This product optionally depends on 'Aalto XML', an ultra-high performance
+non-blocking XML processor, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.aalto-xml.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * http://wiki.fasterxml.com/AaltoHome
+
+This product contains a modified version of 'HPACK', a Java implementation of
+the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.hpack.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/twitter/hpack
+
+This product contains a modified version of 'HPACK', a Java implementation of
+the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.hyper-hpack.txt (MIT License)
+  * HOMEPAGE:
+    * https://github.com/python-hyper/hpack/
+
+This product contains a modified version of 'HPACK', a Java implementation of
+the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.nghttp2-hpack.txt (MIT License)
+  * HOMEPAGE:
+    * https://github.com/nghttp2/nghttp2/
+
+This product contains a modified portion of 'Apache Commons Lang', a Java library
+provides utilities for the java.lang API, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.commons-lang.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://commons.apache.org/proper/commons-lang/
+
+
+This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build.
+
+  * LICENSE:
+    * license/LICENSE.mvn-wrapper.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/takari/maven-wrapper
+
+This product contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS.
+This private header is also used by Apple's open source
+ mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/).
+
+ * LICENSE:
+    * license/LICENSE.dnsinfo.txt (Apple Public Source License 2.0)
+  * HOMEPAGE:
+    * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h
\ No newline at end of file
diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000..552a4d0
--- /dev/null
+++ b/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,14 @@
+*More detailed description of your change,
+if necessary. The PR title and PR message become
+the squashed commit message, so use a separate
+comment to ping reviewers.*
+
+*Summary of testing strategy (including rationale)
+for the feature or bug fix. Unit and/or integration
+tests are expected for any behaviour change and
+system tests should be considered for larger changes.*
+
+### Committer Checklist (excluded from commit message)
+- [ ] Verify design and implementation 
+- [ ] Verify test coverage and CI build status
+- [ ] Verify documentation (including upgrade notes)
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..49dd49e
--- /dev/null
+++ b/README.md
@@ -0,0 +1,291 @@
+Apache Kafka
+=================
+See our [web site](https://kafka.apache.org) for details on the project.
+
+You need to have [Java](http://www.oracle.com/technetwork/java/javase/downloads/index.html) installed.
+
+We build and test Apache Kafka with Java 8, 11, 17 and 20. We set the `release` parameter in javac and scalac
+to `8` to ensure the generated binaries are compatible with Java 8 or higher (independently of the Java version
+used for compilation). Java 8 support has been deprecated since Apache Kafka 3.0 and will be removed in Apache
+Kafka 4.0 (see [KIP-750](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=181308223) for more details).
+
+Scala 2.12 and 2.13 are supported and 2.13 is used by default. Scala 2.12 support has been deprecated since
+Apache Kafka 3.0 and will be removed in Apache Kafka 4.0 (see [KIP-751](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=181308218)
+for more details). See below for how to use a specific Scala version or all of the supported Scala versions.
+
+### Build a jar and run it ###
+    ./gradlew jar
+
+Follow instructions in https://kafka.apache.org/quickstart
+
+### Build source jar ###
+    ./gradlew srcJar
+
+### Build aggregated javadoc ###
+    ./gradlew aggregatedJavadoc
+
+### Build javadoc and scaladoc ###
+    ./gradlew javadoc
+    ./gradlew javadocJar # builds a javadoc jar for each module
+    ./gradlew scaladoc
+    ./gradlew scaladocJar # builds a scaladoc jar for each module
+    ./gradlew docsJar # builds both (if applicable) javadoc and scaladoc jars for each module
+
+### Run unit/integration tests ###
+    ./gradlew test # runs both unit and integration tests
+    ./gradlew unitTest
+    ./gradlew integrationTest
+    
+### Force re-running tests without code change ###
+    ./gradlew test --rerun
+    ./gradlew unitTest --rerun
+    ./gradlew integrationTest --rerun
+
+### Running a particular unit/integration test ###
+    ./gradlew clients:test --tests RequestResponseTest
+
+### Repeatedly running a particular unit/integration test ###
+    I=0; while ./gradlew clients:test --tests RequestResponseTest --rerun --fail-fast; do (( I=$I+1 )); echo "Completed run: $I"; sleep 1; done
+
+### Running a particular test method within a unit/integration test ###
+    ./gradlew core:test --tests kafka.api.ProducerFailureHandlingTest.testCannotSendToInternalTopic
+    ./gradlew clients:test --tests org.apache.kafka.clients.MetadataTest.testTimeToNextUpdate
+
+### Running a particular unit/integration test with log4j output ###
+Change the log4j setting in either `clients/src/test/resources/log4j.properties` or `core/src/test/resources/log4j.properties`
+
+    ./gradlew clients:test --tests RequestResponseTest
+
+### Specifying test retries ###
+By default, each failed test is retried once up to a maximum of five retries per test run. Tests are retried at the end of the test task. Adjust these parameters in the following way:
+
+    ./gradlew test -PmaxTestRetries=1 -PmaxTestRetryFailures=5
+    
+See [Test Retry Gradle Plugin](https://github.com/gradle/test-retry-gradle-plugin) for more details.
+
+### Generating test coverage reports ###
+Generate coverage reports for the whole project:
+
+    ./gradlew reportCoverage -PenableTestCoverage=true -Dorg.gradle.parallel=false
+
+Generate coverage for a single module, i.e.: 
+
+    ./gradlew clients:reportCoverage -PenableTestCoverage=true -Dorg.gradle.parallel=false
+    
+### Building a binary release gzipped tar ball ###
+    ./gradlew clean releaseTarGz
+
+The release file can be found inside `./core/build/distributions/`.
+
+### Building auto generated messages ###
+Sometimes it is only necessary to rebuild the RPC auto-generated message data when switching between branches, as they could
+fail due to code changes. You can just run:
+ 
+    ./gradlew processMessages processTestMessages
+
+### Running a Kafka broker in KRaft mode
+
+    KAFKA_CLUSTER_ID="$(./bin/kafka-storage.sh random-uuid)"
+    ./bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c config/kraft/server.properties
+    ./bin/kafka-server-start.sh config/kraft/server.properties
+
+### Running a Kafka broker in ZooKeeper mode
+
+    ./bin/zookeeper-server-start.sh config/zookeeper.properties
+    ./bin/kafka-server-start.sh config/server.properties
+
+### Cleaning the build ###
+    ./gradlew clean
+
+### Running a task with one of the Scala versions available (2.12.x or 2.13.x) ###
+*Note that if building the jars with a version other than 2.13.x, you need to set the `SCALA_VERSION` variable or change it in `bin/kafka-run-class.sh` to run the quick start.*
+
+You can pass either the major version (eg 2.12) or the full version (eg 2.12.7):
+
+    ./gradlew -PscalaVersion=2.12 jar
+    ./gradlew -PscalaVersion=2.12 test
+    ./gradlew -PscalaVersion=2.12 releaseTarGz
+
+### Running a task with all the scala versions enabled by default ###
+
+Invoke the `gradlewAll` script followed by the task(s):
+
+    ./gradlewAll test
+    ./gradlewAll jar
+    ./gradlewAll releaseTarGz
+
+### Running a task for a specific project ###
+This is for `core`, `examples` and `clients`
+
+    ./gradlew core:jar
+    ./gradlew core:test
+
+Streams has multiple sub-projects, but you can run all the tests:
+
+    ./gradlew :streams:testAll
+
+### Listing all gradle tasks ###
+    ./gradlew tasks
+
+### Building IDE project ####
+*Note that this is not strictly necessary (IntelliJ IDEA has good built-in support for Gradle projects, for example).*
+
+    ./gradlew eclipse
+    ./gradlew idea
+
+The `eclipse` task has been configured to use `${project_dir}/build_eclipse` as Eclipse's build directory. Eclipse's default
+build directory (`${project_dir}/bin`) clashes with Kafka's scripts directory and we don't use Gradle's build directory
+to avoid known issues with this configuration.
+
+### Publishing the jar for all versions of Scala and for all projects to maven ###
+The recommended command is:
+
+    ./gradlewAll publish
+
+For backwards compatibility, the following also works:
+
+    ./gradlewAll uploadArchives
+
+Please note for this to work you should create/update `${GRADLE_USER_HOME}/gradle.properties` (typically, `~/.gradle/gradle.properties`) and assign the following variables
+
+    mavenUrl=
+    mavenUsername=
+    mavenPassword=
+    signing.keyId=
+    signing.password=
+    signing.secretKeyRingFile=
+
+### Publishing the streams quickstart archetype artifact to maven ###
+For the Streams archetype project, one cannot use gradle to upload to maven; instead the `mvn deploy` command needs to be called at the quickstart folder:
+
+    cd streams/quickstart
+    mvn deploy
+
+Please note for this to work you should create/update user maven settings (typically, `${USER_HOME}/.m2/settings.xml`) to assign the following variables
+
+    <settings xmlns="http://maven.apache.org/SETTINGS/1.0.0";
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+       xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
+                           https://maven.apache.org/xsd/settings-1.0.0.xsd";>
+    ...                           
+    <servers>
+       ...
+       <server>
+          <id>apache.snapshots.https</id>
+          <username>${maven_username}</username>
+          <password>${maven_password}</password>
+       </server>
+       <server>
+          <id>apache.releases.https</id>
+          <username>${maven_username}</username>
+          <password>${maven_password}</password>
+        </server>
+        ...
+     </servers>
+     ...
+
+
+### Installing ALL the jars to the local Maven repository ###
+The recommended command to build for both Scala 2.12 and 2.13 is:
+
+    ./gradlewAll publishToMavenLocal
+
+For backwards compatibility, the following also works:
+
+    ./gradlewAll install
+
+### Installing specific projects to the local Maven repository ###
+
+    ./gradlew -PskipSigning=true :streams:publishToMavenLocal
+    
+If needed, you can specify the Scala version with `-PscalaVersion=2.13`.
+
+### Building the test jar ###
+    ./gradlew testJar
+
+### Running code quality checks ###
+There are two code quality analysis tools that we regularly run, spotbugs and checkstyle.
+
+#### Checkstyle ####
+Checkstyle enforces a consistent coding style in Kafka.
+You can run checkstyle using:
+
+    ./gradlew checkstyleMain checkstyleTest
+
+The checkstyle warnings will be found in `reports/checkstyle/reports/main.html` and `reports/checkstyle/reports/test.html` files in the
+subproject build directories. They are also printed to the console. The build will fail if Checkstyle fails.
+
+#### Spotbugs ####
+Spotbugs uses static analysis to look for bugs in the code.
+You can run spotbugs using:
+
+    ./gradlew spotbugsMain spotbugsTest -x test
+
+The spotbugs warnings will be found in `reports/spotbugs/main.html` and `reports/spotbugs/test.html` files in the subproject build
+directories.  Use -PxmlSpotBugsReport=true to generate an XML report instead of an HTML one.
+
+### JMH microbenchmarks ###
+We use [JMH](https://openjdk.java.net/projects/code-tools/jmh/) to write microbenchmarks that produce reliable results in the JVM.
+    
+See [jmh-benchmarks/README.md](https://github.com/apache/kafka/blob/trunk/jmh-benchmarks/README.md) for details on how to run the microbenchmarks.
+
+### Dependency Analysis ###
+
+The gradle [dependency debugging documentation](https://docs.gradle.org/current/userguide/viewing_debugging_dependencies.html) mentions using the `dependencies` or `dependencyInsight` tasks to debug dependencies for the root project or individual subprojects.
+
+Alternatively, use the `allDeps` or `allDepInsight` tasks for recursively iterating through all subprojects:
+
+    ./gradlew allDeps
+
+    ./gradlew allDepInsight --configuration runtimeClasspath --dependency com.fasterxml.jackson.core:jackson-databind
+
+These take the same arguments as the builtin variants.
+
+### Determining if any dependencies could be updated ###
+    ./gradlew dependencyUpdates
+
+### Common build options ###
+
+The following options should be set with a `-P` switch, for example `./gradlew -PmaxParallelForks=1 test`.
+
+* `commitId`: sets the build commit ID as .git/HEAD might not be correct if there are local commits added for build purposes.
+* `mavenUrl`: sets the URL of the maven deployment repository (`file://path/to/repo` can be used to point to a local repository).
+* `maxParallelForks`: maximum number of test processes to start in parallel. Defaults to the number of processors available to the JVM.
+* `maxScalacThreads`: maximum number of worker threads for the scalac backend. Defaults to the lowest of `8` and the number of processors
+available to the JVM. The value must be between 1 and 16 (inclusive). 
+* `ignoreFailures`: ignore test failures from junit
+* `showStandardStreams`: shows standard out and standard error of the test JVM(s) on the console.
+* `skipSigning`: skips signing of artifacts.
+* `testLoggingEvents`: unit test events to be logged, separated by comma. For example `./gradlew -PtestLoggingEvents=started,passed,skipped,failed test`.
+* `xmlSpotBugsReport`: enable XML reports for spotBugs. This also disables HTML reports as only one can be enabled at a time.
+* `maxTestRetries`: maximum number of retries for a failing test case.
+* `maxTestRetryFailures`: maximum number of test failures before retrying is disabled for subsequent tests.
+* `enableTestCoverage`: enables test coverage plugins and tasks, including bytecode enhancement of classes required to track said
+coverage. Note that this introduces some overhead when running tests and hence why it's disabled by default (the overhead
+varies, but 15-20% is a reasonable estimate).
+* `keepAliveMode`: configures the keep alive mode for the Gradle compilation daemon - reuse improves start-up time. The values should 
+be one of `daemon` or `session` (the default is `daemon`). `daemon` keeps the daemon alive until it's explicitly stopped while
+`session` keeps it alive until the end of the build session. This currently only affects the Scala compiler, see
+https://github.com/gradle/gradle/pull/21034 for a PR that attempts to do the same for the Java compiler.
+* `scalaOptimizerMode`: configures the optimizing behavior of the scala compiler, the value should be one of `none`, `method`, `inline-kafka` or
+`inline-scala` (the default is `inline-kafka`). `none` is the scala compiler default, which only eliminates unreachable code. `method` also
+includes method-local optimizations. `inline-kafka` adds inlining of methods within the kafka packages. Finally, `inline-scala` also
+includes inlining of methods within the scala library (which avoids lambda allocations for methods like `Option.exists`). `inline-scala` is
+only safe if the Scala library version is the same at compile time and runtime. Since we cannot guarantee this for all cases (for example, users
+may depend on the kafka jar for integration tests where they may include a scala library with a different version), we don't enable it by
+default. See https://www.lightbend.com/blog/scala-inliner-optimizer for more details.
+
+### Running system tests ###
+
+See [tests/README.md](tests/README.md).
+
+### Running in Vagrant ###
+
+See [vagrant/README.md](vagrant/README.md).
+
+### Contribution ###
+
+Apache Kafka is interested in building the community; we would welcome any thoughts or [patches](https://issues.apache.org/jira/browse/KAFKA). You can reach us [on the Apache mailing lists](http://kafka.apache.org/contact.html).
+
+To contribute follow the instructions here:
+ * https://kafka.apache.org/contributing.html 
diff --git a/Vagrantfile b/Vagrantfile
new file mode 100644
index 0000000..a053be2
--- /dev/null
+++ b/Vagrantfile
@@ -0,0 +1,217 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+require 'socket'
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+# General config
+enable_dns = false
+# Override to false when bringing up a cluster on AWS
+enable_hostmanager = true
+enable_jmx = false
+num_zookeepers = 1
+num_brokers = 3
+num_workers = 0 # Generic workers that get the code, but don't start any services
+ram_megabytes = 1280
+base_box = "ubuntu/trusty64"
+
+# EC2
+ec2_access_key = ENV['AWS_ACCESS_KEY']
+ec2_secret_key = ENV['AWS_SECRET_KEY']
+ec2_session_token = ENV['AWS_SESSION_TOKEN']
+ec2_keypair_name = nil
+ec2_keypair_file = nil
+
+ec2_region = "us-east-1"
+ec2_az = nil # Uses set by AWS
+ec2_ami = "ami-29ebb519"
+ec2_instance_type = "m3.medium"
+ec2_spot_instance = ENV['SPOT_INSTANCE'] ? ENV['SPOT_INSTANCE'] == 'true' : true
+ec2_spot_max_price = "0.113"  # On-demand price for instance type
+ec2_user = "ubuntu"
+ec2_instance_name_prefix = "kafka-vagrant"
+ec2_security_groups = nil
+ec2_subnet_id = nil
+# Only override this by setting it to false if you're running in a VPC and you
+# are running Vagrant from within that VPC as well.
+ec2_associate_public_ip = nil
+ec2_iam_instance_profile_name = nil
+
+ebs_volume_type = 'gp3'
+
+jdk_major = '8'
+jdk_full = '8u202-linux-x64'
+
+local_config_file = File.join(File.dirname(__FILE__), "Vagrantfile.local")
+if File.exists?(local_config_file) then
+  eval(File.read(local_config_file), binding, "Vagrantfile.local")
+end
+
+# override any instance type set by Vagrantfile.local or above via an environment variable
+if ENV['INSTANCE_TYPE'] then
+  ec2_instance_type = ENV['INSTANCE_TYPE']
+end
+
+# choose size based on overridden size
+if ec2_instance_type.start_with?("m3") then
+  ebs_volume_size = 20
+else
+  ebs_volume_size = 40
+end
+
+# TODO(ksweeney): RAM requirements are not empirical and can probably be significantly lowered.
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+  config.hostmanager.enabled = enable_hostmanager
+  config.hostmanager.manage_host = enable_dns
+  config.hostmanager.include_offline = false
+
+  ## Provider-specific global configs
+  config.vm.provider :virtualbox do |vb,override|
+    override.vm.box = base_box
+
+    override.hostmanager.ignore_private_ip = false
+
+    # Brokers started with the standard script currently set Xms and Xmx to 1G,
+    # plus we need some extra head room.
+    vb.customize ["modifyvm", :id, "--memory", ram_megabytes.to_s]
+
+    if Vagrant.has_plugin?("vagrant-cachier")
+      override.cache.scope = :box
+    end
+  end
+
+  config.vm.provider :aws do |aws,override|
+    # The "box" is specified as an AMI
+    override.vm.box = "dummy"
+    override.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box";
+
+    cached_addresses = {}
+    # Use a custom resolver that SSH's into the machine and finds the IP address
+    # directly. This lets us get at the private IP address directly, avoiding
+    # some issues with using the default IP resolver, which uses the public IP
+    # address.
+    override.hostmanager.ip_resolver = proc do |vm, resolving_vm|
+      if !cached_addresses.has_key?(vm.name)
+        state_id = vm.state.id
+        if state_id != :not_created && state_id != :stopped && vm.communicate.ready?
+          contents = ''
+          vm.communicate.execute("/sbin/ifconfig eth0 | grep 'inet addr' | tail -n 1 | egrep -o '[0-9\.]+' | head -n 1 2>&1") do |type, data|
+            contents << data
+          end
+          cached_addresses[vm.name] = contents.split("\n").first[/(\d+\.\d+\.\d+\.\d+)/, 1]
+        else
+          cached_addresses[vm.name] = nil
+        end
+      end
+      cached_addresses[vm.name]
+    end
+
+    override.ssh.username = ec2_user
+    override.ssh.private_key_path = ec2_keypair_file
+
+    aws.access_key_id = ec2_access_key
+    aws.secret_access_key = ec2_secret_key
+    aws.session_token = ec2_session_token
+    aws.keypair_name = ec2_keypair_name
+
+    aws.region = ec2_region
+    aws.availability_zone = ec2_az
+    aws.instance_type = ec2_instance_type
+
+    aws.ami = ec2_ami
+    aws.security_groups = ec2_security_groups
+    aws.subnet_id = ec2_subnet_id
+    aws.block_device_mapping = [{ 'DeviceName' => '/dev/sda1', 'Ebs.VolumeType' => ebs_volume_type, 'Ebs.VolumeSize' => ebs_volume_size }]
+    # If a subnet is specified, default to turning on a public IP unless the
+    # user explicitly specifies the option. Without a public IP, Vagrant won't
+    # be able to SSH into the hosts unless Vagrant is also running in the VPC.
+    if ec2_associate_public_ip.nil?
+      aws.associate_public_ip = true unless ec2_subnet_id.nil?
+    else
+      aws.associate_public_ip = ec2_associate_public_ip
+    end
+    aws.region_config ec2_region do |region|
+      region.spot_instance = ec2_spot_instance
+      region.spot_max_price = ec2_spot_max_price
+    end
+    aws.iam_instance_profile_name = ec2_iam_instance_profile_name
+
+    # Exclude some directories that can grow very large from syncing
+    override.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__exclude: ['.git', 'core/data/', 'logs/', 'tests/results/', 'results/']
+  end
+
+  def name_node(node, name, ec2_instance_name_prefix)
+    node.vm.hostname = name
+    node.vm.provider :aws do |aws|
+      aws.tags = {
+        'Name' => ec2_instance_name_prefix + "-" + Socket.gethostname + "-" + name,
+        'JenkinsBuildUrl' => ENV['BUILD_URL']
+      }
+    end
+  end
+
+  def assign_local_ip(node, ip_address)
+    node.vm.provider :virtualbox do |vb,override|
+      override.vm.network :private_network, ip: ip_address
+    end
+  end
+
+  ## Cluster definition
+  zookeepers = []
+  (1..num_zookeepers).each { |i|
+    name = "zk" + i.to_s
+    zookeepers.push(name)
+    config.vm.define name do |zookeeper|
+      name_node(zookeeper, name, ec2_instance_name_prefix)
+      ip_address = "192.168.50." + (10 + i).to_s
+      assign_local_ip(zookeeper, ip_address)
+      zookeeper.vm.provision "shell", path: "vagrant/base.sh", env: {"JDK_MAJOR" => jdk_major, "JDK_FULL" => jdk_full}
+      zk_jmx_port = enable_jmx ? (8000 + i).to_s : ""
+      zookeeper.vm.provision "shell", path: "vagrant/zk.sh", :args => [i.to_s, num_zookeepers, zk_jmx_port]
+    end
+  }
+
+  (1..num_brokers).each { |i|
+    name = "broker" + i.to_s
+    config.vm.define name do |broker|
+      name_node(broker, name, ec2_instance_name_prefix)
+      ip_address = "192.168.50." + (50 + i).to_s
+      assign_local_ip(broker, ip_address)
+      # We need to be careful about what we list as the publicly routable
+      # address since this is registered in ZK and handed out to clients. If
+      # host DNS isn't setup, we shouldn't use hostnames -- IP addresses must be
+      # used to support clients running on the host.
+      zookeeper_connect = zookeepers.map{ |zk_addr| zk_addr + ":2181"}.join(",")
+      broker.vm.provision "shell", path: "vagrant/base.sh", env: {"JDK_MAJOR" => jdk_major, "JDK_FULL" => jdk_full}
+      kafka_jmx_port = enable_jmx ? (9000 + i).to_s : ""
+      broker.vm.provision "shell", path: "vagrant/broker.sh", :args => [i.to_s, enable_dns ? name : ip_address, zookeeper_connect, kafka_jmx_port]
+    end
+  }
+
+  (1..num_workers).each { |i|
+    name = "worker" + i.to_s
+    config.vm.define name do |worker|
+      name_node(worker, name, ec2_instance_name_prefix)
+      ip_address = "192.168.50." + (100 + i).to_s
+      assign_local_ip(worker, ip_address)
+      worker.vm.provision "shell", path: "vagrant/base.sh", env: {"JDK_MAJOR" => jdk_major, "JDK_FULL" => jdk_full}
+    end
+  }
+
+end
diff --git a/bin/connect-distributed.sh b/bin/connect-distributed.sh
new file mode 100755
index 0000000..b8088ad
--- /dev/null
+++ b/bin/connect-distributed.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+        echo "USAGE: $0 [-daemon] connect-distributed.properties"
+        exit 1
+fi
+
+base_dir=$(dirname $0)
+
+if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
+    export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
+fi
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+  export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
+fi
+
+EXTRA_ARGS=${EXTRA_ARGS-'-name connectDistributed'}
+
+COMMAND=$1
+case $COMMAND in
+  -daemon)
+    EXTRA_ARGS="-daemon "$EXTRA_ARGS
+    shift
+    ;;
+  *)
+    ;;
+esac
+
+exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectDistributed "$@"
diff --git a/bin/connect-mirror-maker.sh b/bin/connect-mirror-maker.sh
new file mode 100755
index 0000000..8e2b2e1
--- /dev/null
+++ b/bin/connect-mirror-maker.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+        echo "USAGE: $0 [-daemon] mm2.properties"
+        exit 1
+fi
+
+base_dir=$(dirname $0)
+
+if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
+    export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
+fi
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+  export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
+fi
+
+EXTRA_ARGS=${EXTRA_ARGS-'-name mirrorMaker'}
+
+COMMAND=$1
+case $COMMAND in
+  -daemon)
+    EXTRA_ARGS="-daemon "$EXTRA_ARGS
+    shift
+    ;;
+  *)
+    ;;
+esac
+
+exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.mirror.MirrorMaker "$@"
diff --git a/bin/connect-plugin-path.sh b/bin/connect-plugin-path.sh
new file mode 100755
index 0000000..5074206
--- /dev/null
+++ b/bin/connect-plugin-path.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+  export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
+fi
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.ConnectPluginPath "$@"
diff --git a/bin/connect-standalone.sh b/bin/connect-standalone.sh
new file mode 100755
index 0000000..441069f
--- /dev/null
+++ b/bin/connect-standalone.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+        echo "USAGE: $0 [-daemon] connect-standalone.properties"
+        exit 1
+fi
+
+base_dir=$(dirname $0)
+
+if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
+    export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
+fi
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+  export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
+fi
+
+EXTRA_ARGS=${EXTRA_ARGS-'-name connectStandalone'}
+
+COMMAND=$1
+case $COMMAND in
+  -daemon)
+    EXTRA_ARGS="-daemon "$EXTRA_ARGS
+    shift
+    ;;
+  *)
+    ;;
+esac
+
+exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectStandalone "$@"
diff --git a/bin/kafka-acls.sh b/bin/kafka-acls.sh
new file mode 100755
index 0000000..8fa6554
--- /dev/null
+++ b/bin/kafka-acls.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.AclCommand "$@"
diff --git a/bin/kafka-broker-api-versions.sh b/bin/kafka-broker-api-versions.sh
new file mode 100755
index 0000000..4f560a0
--- /dev/null
+++ b/bin/kafka-broker-api-versions.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.BrokerApiVersionsCommand "$@"
diff --git a/bin/kafka-cluster.sh b/bin/kafka-cluster.sh
new file mode 100755
index 0000000..f09858c
--- /dev/null
+++ b/bin/kafka-cluster.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.ClusterTool "$@"
diff --git a/bin/kafka-configs.sh b/bin/kafka-configs.sh
new file mode 100755
index 0000000..2f9eb8c
--- /dev/null
+++ b/bin/kafka-configs.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConfigCommand "$@"
diff --git a/bin/kafka-console-consumer.sh b/bin/kafka-console-consumer.sh
new file mode 100755
index 0000000..dbaac2b
--- /dev/null
+++ b/bin/kafka-console-consumer.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+    export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleConsumer "$@"
diff --git a/bin/kafka-console-producer.sh b/bin/kafka-console-producer.sh
new file mode 100755
index 0000000..e5187b8
--- /dev/null
+++ b/bin/kafka-console-producer.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+    export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleProducer "$@"
diff --git a/bin/kafka-consumer-groups.sh b/bin/kafka-consumer-groups.sh
new file mode 100755
index 0000000..feb063d
--- /dev/null
+++ b/bin/kafka-consumer-groups.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConsumerGroupCommand "$@"
diff --git a/bin/kafka-consumer-perf-test.sh b/bin/kafka-consumer-perf-test.sh
new file mode 100755
index 0000000..4eebe87
--- /dev/null
+++ b/bin/kafka-consumer-perf-test.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+    export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.ConsumerPerformance "$@"
diff --git a/bin/kafka-delegation-tokens.sh b/bin/kafka-delegation-tokens.sh
new file mode 100755
index 0000000..9f8bb13
--- /dev/null
+++ b/bin/kafka-delegation-tokens.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.DelegationTokenCommand "$@"
diff --git a/bin/kafka-delete-records.sh b/bin/kafka-delete-records.sh
new file mode 100755
index 0000000..e9db8f9
--- /dev/null
+++ b/bin/kafka-delete-records.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.DeleteRecordsCommand "$@"
diff --git a/bin/kafka-dump-log.sh b/bin/kafka-dump-log.sh
new file mode 100755
index 0000000..a97ea7d
--- /dev/null
+++ b/bin/kafka-dump-log.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.DumpLogSegments "$@"
diff --git a/bin/kafka-e2e-latency.sh b/bin/kafka-e2e-latency.sh
new file mode 100755
index 0000000..32d1063
--- /dev/null
+++ b/bin/kafka-e2e-latency.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.EndToEndLatency "$@"
diff --git a/bin/kafka-features.sh b/bin/kafka-features.sh
new file mode 100755
index 0000000..8d90a06
--- /dev/null
+++ b/bin/kafka-features.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.FeatureCommand "$@"
diff --git a/bin/kafka-get-offsets.sh b/bin/kafka-get-offsets.sh
new file mode 100755
index 0000000..993a202
--- /dev/null
+++ b/bin/kafka-get-offsets.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.GetOffsetShell "$@"
diff --git a/bin/kafka-jmx.sh b/bin/kafka-jmx.sh
new file mode 100755
index 0000000..88b3874
--- /dev/null
+++ b/bin/kafka-jmx.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.JmxTool "$@"
diff --git a/bin/kafka-leader-election.sh b/bin/kafka-leader-election.sh
new file mode 100755
index 0000000..88baef3
--- /dev/null
+++ b/bin/kafka-leader-election.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.LeaderElectionCommand "$@"
diff --git a/bin/kafka-log-dirs.sh b/bin/kafka-log-dirs.sh
new file mode 100755
index 0000000..9894d69
--- /dev/null
+++ b/bin/kafka-log-dirs.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.LogDirsCommand "$@"
diff --git a/bin/kafka-metadata-quorum.sh b/bin/kafka-metadata-quorum.sh
new file mode 100755
index 0000000..3b25c7d
--- /dev/null
+++ b/bin/kafka-metadata-quorum.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.MetadataQuorumCommand "$@"
diff --git a/bin/kafka-metadata-shell.sh b/bin/kafka-metadata-shell.sh
new file mode 100755
index 0000000..289f0c1
--- /dev/null
+++ b/bin/kafka-metadata-shell.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.shell.MetadataShell "$@"
diff --git a/bin/kafka-mirror-maker.sh b/bin/kafka-mirror-maker.sh
new file mode 100755
index 0000000..981f271
--- /dev/null
+++ b/bin/kafka-mirror-maker.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.MirrorMaker "$@"
diff --git a/bin/kafka-producer-perf-test.sh b/bin/kafka-producer-perf-test.sh
new file mode 100755
index 0000000..73a6288
--- /dev/null
+++ b/bin/kafka-producer-perf-test.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+    export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.ProducerPerformance "$@"
diff --git a/bin/kafka-reassign-partitions.sh b/bin/kafka-reassign-partitions.sh
new file mode 100755
index 0000000..4c7f1bc
--- /dev/null
+++ b/bin/kafka-reassign-partitions.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.ReassignPartitionsCommand "$@"
diff --git a/bin/kafka-replica-verification.sh b/bin/kafka-replica-verification.sh
new file mode 100755
index 0000000..1df5639
--- /dev/null
+++ b/bin/kafka-replica-verification.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.ReplicaVerificationTool "$@"
diff --git a/bin/kafka-run-class.sh b/bin/kafka-run-class.sh
new file mode 100755
index 0000000..9ab96d7
--- /dev/null
+++ b/bin/kafka-run-class.sh
@@ -0,0 +1,347 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+  echo "USAGE: $0 [-daemon] [-name servicename] [-loggc] classname [opts]"
+  exit 1
+fi
+
+# CYGWIN == 1 if Cygwin is detected, else 0.
+if [[ $(uname -a) =~ "CYGWIN" ]]; then
+  CYGWIN=1
+else
+  CYGWIN=0
+fi
+
+if [ -z "$INCLUDE_TEST_JARS" ]; then
+  INCLUDE_TEST_JARS=false
+fi
+
+# Exclude jars not necessary for running commands.
+regex="(-(test|test-sources|src|scaladoc|javadoc)\.jar|jar.asc|connect-file.*\.jar)$"
+should_include_file() {
+  if [ "$INCLUDE_TEST_JARS" = true ]; then
+    return 0
+  fi
+  file=$1
+  if [ -z "$(echo "$file" | grep -E "$regex")" ] ; then
+    return 0
+  else
+    return 1
+  fi
+}
+
+base_dir=$(dirname $0)/..
+
+if [ -z "$SCALA_VERSION" ]; then
+  SCALA_VERSION=2.13.11
+  if [[ -f "$base_dir/gradle.properties" ]]; then
+    SCALA_VERSION=`grep "^scalaVersion=" "$base_dir/gradle.properties" | cut -d= -f 2`
+  fi
+fi
+
+if [ -z "$SCALA_BINARY_VERSION" ]; then
+  SCALA_BINARY_VERSION=$(echo $SCALA_VERSION | cut -f 1-2 -d '.')
+fi
+
+# run ./gradlew copyDependantLibs to get all dependant jars in a local dir
+shopt -s nullglob
+if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
+  for dir in "$base_dir"/core/build/dependant-libs-${SCALA_VERSION}*;
+  do
+    CLASSPATH="$CLASSPATH:$dir/*"
+  done
+fi
+
+for file in "$base_dir"/examples/build/libs/kafka-examples*.jar;
+do
+  if should_include_file "$file"; then
+    CLASSPATH="$CLASSPATH":"$file"
+  fi
+done
+
+if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
+  clients_lib_dir=$(dirname $0)/../clients/build/libs
+  streams_lib_dir=$(dirname $0)/../streams/build/libs
+  streams_dependant_clients_lib_dir=$(dirname $0)/../streams/build/dependant-libs-${SCALA_VERSION}
+else
+  clients_lib_dir=/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs
+  streams_lib_dir=$clients_lib_dir
+  streams_dependant_clients_lib_dir=$streams_lib_dir
+fi
+
+
+for file in "$clients_lib_dir"/kafka-clients*.jar;
+do
+  if should_include_file "$file"; then
+    CLASSPATH="$CLASSPATH":"$file"
+  fi
+done
+
+for file in "$streams_lib_dir"/kafka-streams*.jar;
+do
+  if should_include_file "$file"; then
+    CLASSPATH="$CLASSPATH":"$file"
+  fi
+done
+
+if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
+  for file in "$base_dir"/streams/examples/build/libs/kafka-streams-examples*.jar;
+  do
+    if should_include_file "$file"; then
+      CLASSPATH="$CLASSPATH":"$file"
+    fi
+  done
+else
+  VERSION_NO_DOTS=`echo $UPGRADE_KAFKA_STREAMS_TEST_VERSION | sed 's/\.//g'`
+  SHORT_VERSION_NO_DOTS=${VERSION_NO_DOTS:0:((${#VERSION_NO_DOTS} - 1))} # remove last char, ie, bug-fix number
+  for file in "$base_dir"/streams/upgrade-system-tests-$SHORT_VERSION_NO_DOTS/build/libs/kafka-streams-upgrade-system-tests*.jar;
+  do
+    if should_include_file "$file"; then
+      CLASSPATH="$file":"$CLASSPATH"
+    fi
+  done
+  if [ "$SHORT_VERSION_NO_DOTS" = "0100" ]; then
+    CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.8.jar":"$CLASSPATH"
+    CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.6.jar":"$CLASSPATH"
+  fi
+  if [ "$SHORT_VERSION_NO_DOTS" = "0101" ]; then
+    CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.9.jar":"$CLASSPATH"
+    CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.8.jar":"$CLASSPATH"
+  fi
+fi
+
+for file in "$streams_dependant_clients_lib_dir"/rocksdb*.jar;
+do
+  CLASSPATH="$CLASSPATH":"$file"
+done
+
+for file in "$streams_dependant_clients_lib_dir"/*hamcrest*.jar;
+do
+  CLASSPATH="$CLASSPATH":"$file"
+done
+
+for file in "$base_dir"/shell/build/libs/kafka-shell*.jar;
+do
+  if should_include_file "$file"; then
+    CLASSPATH="$CLASSPATH":"$file"
+  fi
+done
+
+for dir in "$base_dir"/shell/build/dependant-libs-${SCALA_VERSION}*;
+do
+  CLASSPATH="$CLASSPATH:$dir/*"
+done
+
+for file in "$base_dir"/tools/build/libs/kafka-tools*.jar;
+do
+  if should_include_file "$file"; then
+    CLASSPATH="$CLASSPATH":"$file"
+  fi
+done
+
+for dir in "$base_dir"/tools/build/dependant-libs-${SCALA_VERSION}*;
+do
+  CLASSPATH="$CLASSPATH:$dir/*"
+done
+
+for file in "$base_dir"/trogdor/build/libs/trogdor-*.jar;
+do
+  if should_include_file "$file"; then
+    CLASSPATH="$CLASSPATH":"$file"
+  fi
+done
+
+for dir in "$base_dir"/trogdor/build/dependant-libs-${SCALA_VERSION}*;
+do
+  CLASSPATH="$CLASSPATH:$dir/*"
+done
+
+for cc_pkg in "api" "transforms" "runtime" "mirror" "mirror-client" "json" "tools" "basic-auth-extension"
+do
+  for file in "$base_dir"/connect/${cc_pkg}/build/libs/connect-${cc_pkg}*.jar;
+  do
+    if should_include_file "$file"; then
+      CLASSPATH="$CLASSPATH":"$file"
+    fi
+  done
+  if [ -d "$base_dir/connect/${cc_pkg}/build/dependant-libs" ] ; then
+    CLASSPATH="$CLASSPATH:$base_dir/connect/${cc_pkg}/build/dependant-libs/*"
+  fi
+done
+
+# classpath addition for release
+for file in "$base_dir"/libs/*;
+do
+  if should_include_file "$file"; then
+    CLASSPATH="$CLASSPATH":"$file"
+  fi
+done
+
+for file in "$base_dir"/core/build/libs/kafka_${SCALA_BINARY_VERSION}*.jar;
+do
+  if should_include_file "$file"; then
+    CLASSPATH="$CLASSPATH":"$file"
+  fi
+done
+shopt -u nullglob
+
+if [ -z "$CLASSPATH" ] ; then
+  echo "Classpath is empty. Please build the project first e.g. by running './gradlew jar -PscalaVersion=$SCALA_VERSION'"
+  exit 1
+fi
+
+# JMX settings
+if [ -z "$KAFKA_JMX_OPTS" ]; then
+  KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false  -Dcom.sun.management.jmxremote.ssl=false "
+fi
+
+# JMX port to use
+if [  $JMX_PORT ]; then
+  KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT "
+  if ! echo "$KAFKA_JMX_OPTS" | grep -qF -- '-Dcom.sun.management.jmxremote.rmi.port=' ; then
+    # If unset, set the RMI port to address issues with monitoring Kafka running in containers
+    KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
+  fi
+fi
+
+# Log directory to use
+if [ "x$LOG_DIR" = "x" ]; then
+  LOG_DIR="$base_dir/logs"
+fi
+
+# Log4j settings
+if [ -z "$KAFKA_LOG4J_OPTS" ]; then
+  # Log to console. This is a tool.
+  LOG4J_DIR="$base_dir/config/tools-log4j.properties"
+  # If Cygwin is detected, LOG4J_DIR is converted to Windows format.
+  (( CYGWIN )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}")
+  KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}"
+else
+  # create logs directory
+  if [ ! -d "$LOG_DIR" ]; then
+    mkdir -p "$LOG_DIR"
+  fi
+fi
+
+# If Cygwin is detected, LOG_DIR is converted to Windows format.
+(( CYGWIN )) && LOG_DIR=$(cygpath --path --mixed "${LOG_DIR}")
+KAFKA_LOG4J_OPTS="-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS"
+
+# Generic jvm settings you want to add
+if [ -z "$KAFKA_OPTS" ]; then
+  KAFKA_OPTS=""
+fi
+
+# Set Debug options if enabled
+if [ "x$KAFKA_DEBUG" != "x" ]; then
+
+    # Use default ports
+    DEFAULT_JAVA_DEBUG_PORT="5005"
+
+    if [ -z "$JAVA_DEBUG_PORT" ]; then
+        JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT"
+    fi
+
+    # Use the defaults if JAVA_DEBUG_OPTS was not set
+    DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=${DEBUG_SUSPEND_FLAG:-n},address=$JAVA_DEBUG_PORT"
+    if [ -z "$JAVA_DEBUG_OPTS" ]; then
+        JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
+    fi
+
+    echo "Enabling Java debug options: $JAVA_DEBUG_OPTS"
+    KAFKA_OPTS="$JAVA_DEBUG_OPTS $KAFKA_OPTS"
+fi
+
+# Which java to use
+if [ -z "$JAVA_HOME" ]; then
+  JAVA="java"
+else
+  JAVA="$JAVA_HOME/bin/java"
+fi
+
+# Memory options
+if [ -z "$KAFKA_HEAP_OPTS" ]; then
+  KAFKA_HEAP_OPTS="-Xmx256M"
+fi
+
+# JVM performance options
+# MaxInlineLevel=15 is the default since JDK 14 and can be removed once older JDKs are no longer supported
+if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then
+  KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 -Djava.awt.headless=true"
+fi
+
+while [ $# -gt 0 ]; do
+  COMMAND=$1
+  case $COMMAND in
+    -name)
+      DAEMON_NAME=$2
+      CONSOLE_OUTPUT_FILE=$LOG_DIR/$DAEMON_NAME.out
+      shift 2
+      ;;
+    -loggc)
+      if [ -z "$KAFKA_GC_LOG_OPTS" ]; then
+        GC_LOG_ENABLED="true"
+      fi
+      shift
+      ;;
+    -daemon)
+      DAEMON_MODE="true"
+      shift
+      ;;
+    *)
+      break
+      ;;
+  esac
+done
+
+# GC options
+GC_FILE_SUFFIX='-gc.log'
+GC_LOG_FILE_NAME=''
+if [ "x$GC_LOG_ENABLED" = "xtrue" ]; then
+  GC_LOG_FILE_NAME=$DAEMON_NAME$GC_FILE_SUFFIX
+
+  # The first segment of the version number, which is '1' for releases before Java 9
+  # it then becomes '9', '10', ...
+  # Some examples of the first line of `java --version`:
+  # 8 -> java version "1.8.0_152"
+  # 9.0.4 -> java version "9.0.4"
+  # 10 -> java version "10" 2018-03-20
+  # 10.0.1 -> java version "10.0.1" 2018-04-17
+  # We need to match to the end of the line to prevent sed from printing the characters that do not match
+  JAVA_MAJOR_VERSION=$("$JAVA" -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
+  if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
+    KAFKA_GC_LOG_OPTS="-Xlog:gc*:file=$LOG_DIR/$GC_LOG_FILE_NAME:time,tags:filecount=10,filesize=100M"
+  else
+    KAFKA_GC_LOG_OPTS="-Xloggc:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
+  fi
+fi
+
+# Remove a possible colon prefix from the classpath (happens at lines like `CLASSPATH="$CLASSPATH:$file"` when CLASSPATH is blank)
+# Syntax used on the right side is native Bash string manipulation; for more details see
+# http://tldp.org/LDP/abs/html/string-manipulation.html, specifically the section titled "Substring Removal"
+CLASSPATH=${CLASSPATH#:}
+
+# If Cygwin is detected, classpath is converted to Windows format.
+(( CYGWIN )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}")
+
+# Launch mode
+if [ "x$DAEMON_MODE" = "xtrue" ]; then
+  nohup "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null &
+else
+  exec "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@"
+fi
diff --git a/bin/kafka-server-start.sh b/bin/kafka-server-start.sh
new file mode 100755
index 0000000..5a53126
--- /dev/null
+++ b/bin/kafka-server-start.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+	echo "USAGE: $0 [-daemon] server.properties [--override property=value]*"
+	exit 1
+fi
+base_dir=$(dirname $0)
+
+if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
+    export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
+fi
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+    export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
+fi
+
+EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
+
+COMMAND=$1
+case $COMMAND in
+  -daemon)
+    EXTRA_ARGS="-daemon "$EXTRA_ARGS
+    shift
+    ;;
+  *)
+    ;;
+esac
+
+exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@"
diff --git a/bin/kafka-server-stop.sh b/bin/kafka-server-stop.sh
new file mode 100755
index 0000000..437189f
--- /dev/null
+++ b/bin/kafka-server-stop.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+SIGNAL=${SIGNAL:-TERM}
+
+OSNAME=$(uname -s)
+if [[ "$OSNAME" == "OS/390" ]]; then
+    if [ -z $JOBNAME ]; then
+        JOBNAME="KAFKSTRT"
+    fi
+    PIDS=$(ps -A -o pid,jobname,comm | grep -i $JOBNAME | grep java | grep -v grep | awk '{print $1}')
+elif [[ "$OSNAME" == "OS400" ]]; then
+    PIDS=$(ps -Af | grep -i 'kafka\.Kafka' | grep java | grep -v grep | awk '{print $2}')
+else
+    PIDS=$(ps ax | grep ' kafka\.Kafka ' | grep java | grep -v grep | awk '{print $1}')
+fi
+
+if [ -z "$PIDS" ]; then
+  echo "No kafka server to stop"
+  exit 1
+else
+  kill -s $SIGNAL $PIDS
+fi
diff --git a/bin/kafka-storage.sh b/bin/kafka-storage.sh
new file mode 100755
index 0000000..eef9342
--- /dev/null
+++ b/bin/kafka-storage.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.StorageTool "$@"
diff --git a/bin/kafka-streams-application-reset.sh b/bin/kafka-streams-application-reset.sh
new file mode 100755
index 0000000..26ab766
--- /dev/null
+++ b/bin/kafka-streams-application-reset.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+    export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.StreamsResetter "$@"
diff --git a/bin/kafka-topics.sh b/bin/kafka-topics.sh
new file mode 100755
index 0000000..ad6a2d4
--- /dev/null
+++ b/bin/kafka-topics.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.TopicCommand "$@"
diff --git a/bin/kafka-transactions.sh b/bin/kafka-transactions.sh
new file mode 100755
index 0000000..6fb5233
--- /dev/null
+++ b/bin/kafka-transactions.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.TransactionsCommand "$@"
diff --git a/bin/kafka-verifiable-consumer.sh b/bin/kafka-verifiable-consumer.sh
new file mode 100755
index 0000000..852847d
--- /dev/null
+++ b/bin/kafka-verifiable-consumer.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+    export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableConsumer "$@"
diff --git a/bin/kafka-verifiable-producer.sh b/bin/kafka-verifiable-producer.sh
new file mode 100755
index 0000000..b59bae7
--- /dev/null
+++ b/bin/kafka-verifiable-producer.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+    export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableProducer "$@"
diff --git a/bin/trogdor.sh b/bin/trogdor.sh
new file mode 100755
index 0000000..3324c4e
--- /dev/null
+++ b/bin/trogdor.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+usage() {
+    cat <<EOF
+The Trogdor fault injector.
+
+Usage:
+  $0 [action] [options]
+
+Actions:
+  agent: Run the trogdor agent.
+  coordinator: Run the trogdor coordinator.
+  client: Run the client which communicates with the trogdor coordinator.
+  agent-client: Run the client which communicates with the trogdor agent.
+  help: This help message.
+EOF
+}
+
+if [[ $# -lt 1 ]]; then
+    usage
+    exit 0
+fi
+action="${1}"
+shift
+CLASS=""
+case ${action} in
+    agent) CLASS="org.apache.kafka.trogdor.agent.Agent";;
+    coordinator) CLASS="org.apache.kafka.trogdor.coordinator.Coordinator";;
+    client) CLASS="org.apache.kafka.trogdor.coordinator.CoordinatorClient";;
+    agent-client) CLASS="org.apache.kafka.trogdor.agent.AgentClient";;
+    help) usage; exit 0;;
+    *)  echo "Unknown action '${action}'.  Type '$0 help' for help."; exit 1;;
+esac
+
+export INCLUDE_TEST_JARS=1
+exec $(dirname $0)/kafka-run-class.sh "${CLASS}" "$@"
diff --git a/bin/windows/connect-distributed.bat b/bin/windows/connect-distributed.bat
new file mode 100644
index 0000000..0535085
--- /dev/null
+++ b/bin/windows/connect-distributed.bat
@@ -0,0 +1,34 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem    http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+IF [%1] EQU [] (
+	echo USAGE: %0 connect-distributed.properties
+	EXIT /B 1
+)
+
+SetLocal
+rem Using pushd popd to set BASE_DIR to the absolute path
+pushd %~dp0..\..
+set BASE_DIR=%CD%
+popd
+
+rem Log4j settings
+IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
+	set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/connect-log4j.properties
+)
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectDistributed %*
+EndLocal
diff --git a/bin/windows/connect-plugin-path.bat b/bin/windows/connect-plugin-path.bat
new file mode 100644
index 0000000..3f64a82
--- /dev/null
+++ b/bin/windows/connect-plugin-path.bat
@@ -0,0 +1,21 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
+	set KAFKA_HEAP_OPTS=-Xms256M -Xmx2G
+)
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.ConnectPluginPath %*
\ No newline at end of file
diff --git a/bin/windows/connect-standalone.bat b/bin/windows/connect-standalone.bat
new file mode 100644
index 0000000..12ebb21
--- /dev/null
+++ b/bin/windows/connect-standalone.bat
@@ -0,0 +1,34 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem    http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+IF [%1] EQU [] (
+	echo USAGE: %0 connect-standalone.properties
+	EXIT /B 1
+)
+
+SetLocal
+rem Using pushd popd to set BASE_DIR to the absolute path
+pushd %~dp0..\..
+set BASE_DIR=%CD%
+popd
+
+rem Log4j settings
+IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
+	set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/connect-log4j.properties
+)
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectStandalone %*
+EndLocal
diff --git a/bin/windows/kafka-acls.bat b/bin/windows/kafka-acls.bat
new file mode 100644
index 0000000..8f0be85
--- /dev/null
+++ b/bin/windows/kafka-acls.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.admin.AclCommand %*
diff --git a/bin/windows/kafka-broker-api-versions.bat b/bin/windows/kafka-broker-api-versions.bat
new file mode 100644
index 0000000..f7ec72d
--- /dev/null
+++ b/bin/windows/kafka-broker-api-versions.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+%~dp0kafka-run-class.bat kafka.admin.BrokerApiVersionsCommand %*
diff --git a/bin/windows/kafka-cluster.bat b/bin/windows/kafka-cluster.bat
new file mode 100644
index 0000000..b7c3166
--- /dev/null
+++ b/bin/windows/kafka-cluster.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.ClusterTool %*
diff --git a/bin/windows/kafka-configs.bat b/bin/windows/kafka-configs.bat
new file mode 100644
index 0000000..3792a5d
--- /dev/null
+++ b/bin/windows/kafka-configs.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.admin.ConfigCommand %*
diff --git a/bin/windows/kafka-console-consumer.bat b/bin/windows/kafka-console-consumer.bat
new file mode 100644
index 0000000..bbbd336
--- /dev/null
+++ b/bin/windows/kafka-console-consumer.bat
@@ -0,0 +1,20 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+SetLocal
+set KAFKA_HEAP_OPTS=-Xmx512M
+"%~dp0kafka-run-class.bat" kafka.tools.ConsoleConsumer %*
+EndLocal
diff --git a/bin/windows/kafka-console-producer.bat b/bin/windows/kafka-console-producer.bat
new file mode 100644
index 0000000..e1834bc
--- /dev/null
+++ b/bin/windows/kafka-console-producer.bat
@@ -0,0 +1,20 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+SetLocal
+set KAFKA_HEAP_OPTS=-Xmx512M
+"%~dp0kafka-run-class.bat" kafka.tools.ConsoleProducer %*
+EndLocal
diff --git a/bin/windows/kafka-consumer-groups.bat b/bin/windows/kafka-consumer-groups.bat
new file mode 100644
index 0000000..e027b9e
--- /dev/null
+++ b/bin/windows/kafka-consumer-groups.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.admin.ConsumerGroupCommand %*
diff --git a/bin/windows/kafka-consumer-perf-test.bat b/bin/windows/kafka-consumer-perf-test.bat
new file mode 100644
index 0000000..17e17d3
--- /dev/null
+++ b/bin/windows/kafka-consumer-perf-test.bat
@@ -0,0 +1,20 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+SetLocal
+set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.ConsumerPerformance %*
+EndLocal
diff --git a/bin/windows/kafka-delegation-tokens.bat b/bin/windows/kafka-delegation-tokens.bat
new file mode 100644
index 0000000..596a37c
--- /dev/null
+++ b/bin/windows/kafka-delegation-tokens.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.DelegationTokenCommand %*
diff --git a/bin/windows/kafka-delete-records.bat b/bin/windows/kafka-delete-records.bat
new file mode 100644
index 0000000..a883ec7
--- /dev/null
+++ b/bin/windows/kafka-delete-records.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.DeleteRecordsCommand %*
diff --git a/bin/windows/kafka-dump-log.bat b/bin/windows/kafka-dump-log.bat
new file mode 100644
index 0000000..3a1473d
--- /dev/null
+++ b/bin/windows/kafka-dump-log.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.tools.DumpLogSegments %*
diff --git a/bin/windows/kafka-e2e-latency.bat b/bin/windows/kafka-e2e-latency.bat
new file mode 100644
index 0000000..c26545d
--- /dev/null
+++ b/bin/windows/kafka-e2e-latency.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.EndToEndLatency %*
diff --git a/bin/windows/kafka-features.bat b/bin/windows/kafka-features.bat
new file mode 100644
index 0000000..a5933fa
--- /dev/null
+++ b/bin/windows/kafka-features.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.FeatureCommand %*
diff --git a/bin/windows/kafka-get-offsets.bat b/bin/windows/kafka-get-offsets.bat
new file mode 100644
index 0000000..08b8e27
--- /dev/null
+++ b/bin/windows/kafka-get-offsets.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.tools.GetOffsetShell %*
diff --git a/bin/windows/kafka-jmx.bat b/bin/windows/kafka-jmx.bat
new file mode 100644
index 0000000..72eb8fa
--- /dev/null
+++ b/bin/windows/kafka-jmx.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.JmxTool %*
diff --git a/bin/windows/kafka-leader-election.bat b/bin/windows/kafka-leader-election.bat
new file mode 100644
index 0000000..0432a99
--- /dev/null
+++ b/bin/windows/kafka-leader-election.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.admin.LeaderElectionCommand %*
diff --git a/bin/windows/kafka-log-dirs.bat b/bin/windows/kafka-log-dirs.bat
new file mode 100644
index 0000000..850003c
--- /dev/null
+++ b/bin/windows/kafka-log-dirs.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.LogDirsCommand %*
diff --git a/bin/windows/kafka-metadata-quorum.bat b/bin/windows/kafka-metadata-quorum.bat
new file mode 100644
index 0000000..7942115
--- /dev/null
+++ b/bin/windows/kafka-metadata-quorum.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.MetadataQuorumCommand %*
diff --git a/bin/windows/kafka-mirror-maker.bat b/bin/windows/kafka-mirror-maker.bat
new file mode 100644
index 0000000..a1fae45
--- /dev/null
+++ b/bin/windows/kafka-mirror-maker.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.tools.MirrorMaker %*
diff --git a/bin/windows/kafka-producer-perf-test.bat b/bin/windows/kafka-producer-perf-test.bat
new file mode 100644
index 0000000..917d211
--- /dev/null
+++ b/bin/windows/kafka-producer-perf-test.bat
@@ -0,0 +1,20 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+SetLocal
+set KAFKA_HEAP_OPTS=-Xmx512M
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.ProducerPerformance %*
+EndLocal
diff --git a/bin/windows/kafka-reassign-partitions.bat b/bin/windows/kafka-reassign-partitions.bat
new file mode 100644
index 0000000..62b710d
--- /dev/null
+++ b/bin/windows/kafka-reassign-partitions.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.admin.ReassignPartitionsCommand %*
diff --git a/bin/windows/kafka-replica-verification.bat b/bin/windows/kafka-replica-verification.bat
new file mode 100644
index 0000000..a64d6f0
--- /dev/null
+++ b/bin/windows/kafka-replica-verification.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.ReplicaVerificationTool %*
diff --git a/bin/windows/kafka-run-class.bat b/bin/windows/kafka-run-class.bat
new file mode 100755
index 0000000..42903fb
--- /dev/null
+++ b/bin/windows/kafka-run-class.bat
@@ -0,0 +1,191 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+IF [%1] EQU [] (
+	echo USAGE: %0 classname [opts]
+	EXIT /B 1
+)
+
+rem Using pushd popd to set BASE_DIR to the absolute path
+pushd %~dp0..\..
+set BASE_DIR=%CD%
+popd
+
+IF ["%SCALA_VERSION%"] EQU [""] (
+  set SCALA_VERSION=2.13.11
+)
+
+IF ["%SCALA_BINARY_VERSION%"] EQU [""] (
+  for /f "tokens=1,2 delims=." %%a in ("%SCALA_VERSION%") do (
+    set FIRST=%%a
+    set SECOND=%%b
+    if ["!SECOND!"] EQU [""] (
+      set SCALA_BINARY_VERSION=!FIRST!
+    ) else (
+      set SCALA_BINARY_VERSION=!FIRST!.!SECOND!
+    )
+  )
+)
+
+rem Classpath addition for kafka-core dependencies
+for %%i in ("%BASE_DIR%\core\build\dependant-libs-%SCALA_VERSION%\*.jar") do (
+	call :concat "%%i"
+)
+
+rem Classpath addition for kafka-examples
+for %%i in ("%BASE_DIR%\examples\build\libs\kafka-examples*.jar") do (
+	call :concat "%%i"
+)
+
+rem Classpath addition for kafka-clients
+for %%i in ("%BASE_DIR%\clients\build\libs\kafka-clients*.jar") do (
+	call :concat "%%i"
+)
+
+rem Classpath addition for kafka-streams
+for %%i in ("%BASE_DIR%\streams\build\libs\kafka-streams*.jar") do (
+	call :concat "%%i"
+)
+
+rem Classpath addition for kafka-streams-examples
+for %%i in ("%BASE_DIR%\streams\examples\build\libs\kafka-streams-examples*.jar") do (
+	call :concat "%%i"
+)
+
+for %%i in ("%BASE_DIR%\streams\build\dependant-libs-%SCALA_VERSION%\rocksdb*.jar") do (
+	call :concat "%%i"
+)
+
+rem Classpath addition for kafka tools
+for %%i in ("%BASE_DIR%\tools\build\libs\kafka-tools*.jar") do (
+	call :concat "%%i"
+)
+
+for %%i in ("%BASE_DIR%\tools\build\dependant-libs-%SCALA_VERSION%\*.jar") do (
+	call :concat "%%i"
+)
+
+for %%p in (api runtime file json tools) do (
+	for %%i in ("%BASE_DIR%\connect\%%p\build\libs\connect-%%p*.jar") do (
+		call :concat "%%i"
+	)
+	if exist "%BASE_DIR%\connect\%%p\build\dependant-libs\*" (
+		call :concat "%BASE_DIR%\connect\%%p\build\dependant-libs\*"
+	)
+)
+
+rem Classpath addition for release
+for %%i in ("%BASE_DIR%\libs\*") do (
+	call :concat "%%i"
+)
+
+rem Classpath addition for core
+for %%i in ("%BASE_DIR%\core\build\libs\kafka_%SCALA_BINARY_VERSION%*.jar") do (
+	call :concat "%%i"
+)
+
+rem JMX settings
+IF ["%KAFKA_JMX_OPTS%"] EQU [""] (
+	set KAFKA_JMX_OPTS=-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false  -Dcom.sun.management.jmxremote.ssl=false
+)
+
+rem JMX port to use
+IF ["%JMX_PORT%"] NEQ [""] (
+	set KAFKA_JMX_OPTS=%KAFKA_JMX_OPTS% -Dcom.sun.management.jmxremote.port=%JMX_PORT%
+)
+
+rem Log directory to use
+IF ["%LOG_DIR%"] EQU [""] (
+    set LOG_DIR=%BASE_DIR%/logs
+)
+
+rem Log4j settings
+IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
+	set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
+) ELSE (
+  rem create logs directory
+  IF not exist "%LOG_DIR%" (
+      mkdir "%LOG_DIR%"
+  )
+)
+
+set KAFKA_LOG4J_OPTS=-Dkafka.logs.dir="%LOG_DIR%" "%KAFKA_LOG4J_OPTS%"
+
+rem Generic jvm settings you want to add
+IF ["%KAFKA_OPTS%"] EQU [""] (
+	set KAFKA_OPTS=
+)
+
+set DEFAULT_JAVA_DEBUG_PORT=5005
+set DEFAULT_DEBUG_SUSPEND_FLAG=n
+rem Set Debug options if enabled
+IF ["%KAFKA_DEBUG%"] NEQ [""] (
+
+
+	IF ["%JAVA_DEBUG_PORT%"] EQU [""] (
+		set JAVA_DEBUG_PORT=%DEFAULT_JAVA_DEBUG_PORT%
+	)
+
+	IF ["%DEBUG_SUSPEND_FLAG%"] EQU [""] (
+		set DEBUG_SUSPEND_FLAG=%DEFAULT_DEBUG_SUSPEND_FLAG%
+	)
+	set DEFAULT_JAVA_DEBUG_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=!DEBUG_SUSPEND_FLAG!,address=!JAVA_DEBUG_PORT!
+
+	IF ["%JAVA_DEBUG_OPTS%"] EQU [""] (
+		set JAVA_DEBUG_OPTS=!DEFAULT_JAVA_DEBUG_OPTS!
+	)
+
+	echo Enabling Java debug options: !JAVA_DEBUG_OPTS!
+	set KAFKA_OPTS=!JAVA_DEBUG_OPTS! !KAFKA_OPTS!
+)
+
+rem Which java to use
+IF ["%JAVA_HOME%"] EQU [""] (
+	set JAVA=java
+) ELSE (
+	set JAVA="%JAVA_HOME%/bin/java"
+)
+
+rem Memory options
+IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
+	set KAFKA_HEAP_OPTS=-Xmx256M
+)
+
+rem JVM performance options
+IF ["%KAFKA_JVM_PERFORMANCE_OPTS%"] EQU [""] (
+	set KAFKA_JVM_PERFORMANCE_OPTS=-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true
+)
+
+IF not defined CLASSPATH (
+	echo Classpath is empty. Please build the project first e.g. by running 'gradlew jarAll'
+	EXIT /B 2
+)
+
+set COMMAND=%JAVA% %KAFKA_HEAP_OPTS% %KAFKA_JVM_PERFORMANCE_OPTS% %KAFKA_JMX_OPTS% %KAFKA_LOG4J_OPTS% -cp "%CLASSPATH%" %KAFKA_OPTS% %*
+rem echo.
+rem echo %COMMAND%
+rem echo.
+%COMMAND%
+
+goto :eof
+:concat
+IF not defined CLASSPATH (
+  set CLASSPATH="%~1"
+) ELSE (
+  set CLASSPATH=%CLASSPATH%;"%~1"
+)
diff --git a/bin/windows/kafka-server-start.bat b/bin/windows/kafka-server-start.bat
new file mode 100644
index 0000000..8624eda
--- /dev/null
+++ b/bin/windows/kafka-server-start.bat
@@ -0,0 +1,38 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+IF [%1] EQU [] (
+	echo USAGE: %0 server.properties
+	EXIT /B 1
+)
+
+SetLocal
+IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
+    set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties
+)
+IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
+    rem detect OS architecture
+    wmic os get osarchitecture | find /i "32-bit" >nul 2>&1
+    IF NOT ERRORLEVEL 1 (
+        rem 32-bit OS
+        set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
+    ) ELSE (
+        rem 64-bit OS
+        set KAFKA_HEAP_OPTS=-Xmx1G -Xms1G
+    )
+)
+"%~dp0kafka-run-class.bat" kafka.Kafka %*
+EndLocal
diff --git a/bin/windows/kafka-server-stop.bat b/bin/windows/kafka-server-stop.bat
new file mode 100644
index 0000000..676577c
--- /dev/null
+++ b/bin/windows/kafka-server-stop.bat
@@ -0,0 +1,18 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+wmic process where (commandline like "%%kafka.Kafka%%" and not name="wmic.exe") delete
+rem ps ax | grep -i 'kafka.Kafka' | grep -v grep | awk '{print $1}' | xargs kill -SIGTERM
diff --git a/bin/windows/kafka-storage.bat b/bin/windows/kafka-storage.bat
new file mode 100644
index 0000000..4a0e458
--- /dev/null
+++ b/bin/windows/kafka-storage.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.tools.StorageTool %*
diff --git a/bin/windows/kafka-streams-application-reset.bat b/bin/windows/kafka-streams-application-reset.bat
new file mode 100644
index 0000000..77ffc7d
--- /dev/null
+++ b/bin/windows/kafka-streams-application-reset.bat
@@ -0,0 +1,23 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+SetLocal
+IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
+        set KAFKA_HEAP_OPTS=-Xmx512M
+)
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.StreamsResetter %*
+EndLocal
diff --git a/bin/windows/kafka-topics.bat b/bin/windows/kafka-topics.bat
new file mode 100644
index 0000000..677b09d
--- /dev/null
+++ b/bin/windows/kafka-topics.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.admin.TopicCommand %*
diff --git a/bin/windows/kafka-transactions.bat b/bin/windows/kafka-transactions.bat
new file mode 100644
index 0000000..9bb7585
--- /dev/null
+++ b/bin/windows/kafka-transactions.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.TransactionsCommand %*
diff --git a/bin/windows/zookeeper-server-start.bat b/bin/windows/zookeeper-server-start.bat
new file mode 100644
index 0000000..f201a58
--- /dev/null
+++ b/bin/windows/zookeeper-server-start.bat
@@ -0,0 +1,30 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+IF [%1] EQU [] (
+	echo USAGE: %0 zookeeper.properties
+	EXIT /B 1
+)
+
+SetLocal
+IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
+    set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties
+)
+IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
+    set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
+)
+"%~dp0kafka-run-class.bat" org.apache.zookeeper.server.quorum.QuorumPeerMain %*
+EndLocal
diff --git a/bin/windows/zookeeper-server-stop.bat b/bin/windows/zookeeper-server-stop.bat
new file mode 100644
index 0000000..8b57dd8
--- /dev/null
+++ b/bin/windows/zookeeper-server-stop.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+wmic process where (commandline like "%%zookeeper%%" and not name="wmic.exe") delete
diff --git a/bin/windows/zookeeper-shell.bat b/bin/windows/zookeeper-shell.bat
new file mode 100644
index 0000000..f1c86c4
--- /dev/null
+++ b/bin/windows/zookeeper-shell.bat
@@ -0,0 +1,22 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+IF [%1] EQU [] (
+	echo USAGE: %0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...]
+	EXIT /B 1
+)
+
+"%~dp0kafka-run-class.bat" org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server %*
diff --git a/bin/zookeeper-security-migration.sh b/bin/zookeeper-security-migration.sh
new file mode 100755
index 0000000..722bde7
--- /dev/null
+++ b/bin/zookeeper-security-migration.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.ZkSecurityMigrator "$@"
diff --git a/bin/zookeeper-server-start.sh b/bin/zookeeper-server-start.sh
new file mode 100755
index 0000000..bd9c114
--- /dev/null
+++ b/bin/zookeeper-server-start.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+	echo "USAGE: $0 [-daemon] zookeeper.properties"
+	exit 1
+fi
+base_dir=$(dirname $0)
+
+if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
+    export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
+fi
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+    export KAFKA_HEAP_OPTS="-Xmx512M -Xms512M"
+fi
+
+EXTRA_ARGS=${EXTRA_ARGS-'-name zookeeper -loggc'}
+
+COMMAND=$1
+case $COMMAND in
+  -daemon)
+     EXTRA_ARGS="-daemon "$EXTRA_ARGS
+     shift
+     ;;
+ *)
+     ;;
+esac
+
+exec $base_dir/kafka-run-class.sh $EXTRA_ARGS org.apache.zookeeper.server.quorum.QuorumPeerMain "$@"
diff --git a/bin/zookeeper-server-stop.sh b/bin/zookeeper-server-stop.sh
new file mode 100755
index 0000000..11665f3
--- /dev/null
+++ b/bin/zookeeper-server-stop.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+SIGNAL=${SIGNAL:-TERM}
+
+OSNAME=$(uname -s)
+if [[ "$OSNAME" == "OS/390" ]]; then
+    if [ -z $JOBNAME ]; then
+        JOBNAME="ZKEESTRT"
+    fi
+    PIDS=$(ps -A -o pid,jobname,comm | grep -i $JOBNAME | grep java | grep -v grep | awk '{print $1}')
+elif [[ "$OSNAME" == "OS400" ]]; then
+    PIDS=$(ps -Af | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $2}')
+else
+    PIDS=$(ps ax | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $1}')
+fi
+
+if [ -z "$PIDS" ]; then
+  echo "No zookeeper server to stop"
+  exit 1
+else
+  kill -s $SIGNAL $PIDS
+fi
diff --git a/bin/zookeeper-shell.sh b/bin/zookeeper-shell.sh
new file mode 100755
index 0000000..2f1d0f2
--- /dev/null
+++ b/bin/zookeeper-shell.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+	echo "USAGE: $0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...]"
+	exit 1
+fi
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server "$@"
diff --git a/build.gradle b/build.gradle
new file mode 100644
index 0000000..69f1d40
--- /dev/null
+++ b/build.gradle
@@ -0,0 +1,3154 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import org.ajoberstar.grgit.Grgit
+import org.gradle.api.JavaVersion
+
+import java.nio.charset.StandardCharsets
+
+buildscript {
+  repositories {
+    mavenCentral()
+  }
+  apply from: "$rootDir/gradle/dependencies.gradle"
+
+  dependencies {
+    // For Apache Rat plugin to ignore non-Git files
+    classpath "org.ajoberstar.grgit:grgit-core:$versions.grgit"
+  }
+}
+
+plugins {
+  id 'com.github.ben-manes.versions' version '0.47.0'
+  id 'idea'
+  id 'jacoco'
+  id 'java-library'
+  id 'org.owasp.dependencycheck' version '8.2.1'
+  id 'org.nosphere.apache.rat' version "0.8.0"
+  id "io.swagger.core.v3.swagger-gradle-plugin" version "2.2.8"
+
+  id "com.github.spotbugs" version '5.0.13' apply false
+  id 'org.scoverage' version '7.0.1' apply false
+  id 'com.github.johnrengelman.shadow' version '8.1.1' apply false
+  id 'com.diffplug.spotless' version '6.14.0' apply false // 6.14.1 and newer require Java 11 at compile time, so we can't upgrade until AK 4.0
+}
+
+ext {
+  gradleVersion = versions.gradle
+  minJavaVersion = 8
+  buildVersionFileName = "kafka-version.properties"
+
+  defaultMaxHeapSize = "2g"
+  defaultJvmArgs = ["-Xss4m", "-XX:+UseParallelGC"]
+
+  // "JEP 403: Strongly Encapsulate JDK Internals" causes some tests to fail when they try
+  // to access internals (often via mocking libraries). We use `--add-opens` as a workaround
+  // for now and we'll fix it properly (where possible) via KAFKA-13275.
+  if (JavaVersion.current().isCompatibleWith(JavaVersion.VERSION_16))
+    defaultJvmArgs.addAll(
+      "--add-opens=java.base/java.io=ALL-UNNAMED",
+      "--add-opens=java.base/java.lang=ALL-UNNAMED",
+      "--add-opens=java.base/java.nio=ALL-UNNAMED",
+      "--add-opens=java.base/java.nio.file=ALL-UNNAMED",
+      "--add-opens=java.base/java.util=ALL-UNNAMED",
+      "--add-opens=java.base/java.util.concurrent=ALL-UNNAMED",
+      "--add-opens=java.base/java.util.regex=ALL-UNNAMED",
+      "--add-opens=java.base/java.util.stream=ALL-UNNAMED",
+      "--add-opens=java.base/java.text=ALL-UNNAMED",
+      "--add-opens=java.base/java.time=ALL-UNNAMED",
+      "--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED"
+    )
+
+  maxTestForks = project.hasProperty('maxParallelForks') ? maxParallelForks.toInteger() : Runtime.runtime.availableProcessors()
+  maxScalacThreads = project.hasProperty('maxScalacThreads') ? maxScalacThreads.toInteger() :
+      Math.min(Runtime.runtime.availableProcessors(), 8)
+  userIgnoreFailures = project.hasProperty('ignoreFailures') ? ignoreFailures : false
+
+  userMaxTestRetries = project.hasProperty('maxTestRetries') ? maxTestRetries.toInteger() : 0
+  userMaxTestRetryFailures = project.hasProperty('maxTestRetryFailures') ? maxTestRetryFailures.toInteger() : 0
+
+  skipSigning = project.hasProperty('skipSigning') && skipSigning.toBoolean()
+  shouldSign = !skipSigning && !version.endsWith("SNAPSHOT")
+
+  mavenUrl = project.hasProperty('mavenUrl') ? project.mavenUrl : ''
+  mavenUsername = project.hasProperty('mavenUsername') ? project.mavenUsername : ''
+  mavenPassword = project.hasProperty('mavenPassword') ? project.mavenPassword : ''
+
+  userShowStandardStreams = project.hasProperty("showStandardStreams") ? showStandardStreams : null
+
+  userTestLoggingEvents = project.hasProperty("testLoggingEvents") ? Arrays.asList(testLoggingEvents.split(",")) : null
+
+  userEnableTestCoverage = project.hasProperty("enableTestCoverage") ? enableTestCoverage : false
+
+  userKeepAliveModeString = project.hasProperty("keepAliveMode") ? keepAliveMode : "daemon"
+  userKeepAliveMode = KeepAliveMode.values().find(m -> m.name().toLowerCase().equals(userKeepAliveModeString))
+  if (userKeepAliveMode == null) {
+    def keepAliveValues = KeepAliveMode.values().collect(m -> m.name.toLowerCase())
+    throw new GradleException("Unexpected value for keepAliveMode property. Expected one of $keepAliveValues, but received: $userKeepAliveModeString")
+  }
+
+  // See README.md for details on this option and the reasoning for the default
+  userScalaOptimizerMode = project.hasProperty("scalaOptimizerMode") ? scalaOptimizerMode : "inline-kafka"
+  def scalaOptimizerValues = ["none", "method", "inline-kafka", "inline-scala"]
+  if (!scalaOptimizerValues.contains(userScalaOptimizerMode))
+    throw new GradleException("Unexpected value for scalaOptimizerMode property. Expected one of $scalaOptimizerValues, but received: $userScalaOptimizerMode")
+
+  generatedDocsDir = new File("${project.rootDir}/docs/generated")
+  repo = file("$rootDir/.git").isDirectory() ? Grgit.open(currentDir: project.getRootDir()) : null
+
+  commitId = determineCommitId()
+}
+
+allprojects {
+
+  repositories {
+    mavenCentral()
+  }
+
+  dependencyUpdates {
+    revision="release"
+    resolutionStrategy {
+      componentSelection { rules ->
+        rules.all { ComponentSelection selection ->
+          boolean rejected = ['snap', 'alpha', 'beta', 'rc', 'cr', 'm'].any { qualifier ->
+            selection.candidate.version ==~ /(?i).*[.-]${qualifier}[.\d-]*/
+          }
+          if (rejected) {
+            selection.reject('Release candidate')
+          }
+        }
+      }
+    }
+  }
+
+  configurations.all {
+    // zinc is the Scala incremental compiler, it has a configuration for its own dependencies
+    // that are unrelated to the project dependencies, we should not change them
+    if (name != "zinc") {
+      resolutionStrategy {
+        force(
+          // be explicit about the javassist dependency version instead of relying on the transitive version
+          libs.javassist,
+          // ensure we have a single version in the classpath despite transitive dependencies
+          libs.scalaLibrary,
+          libs.scalaReflect,
+          libs.jacksonAnnotations,
+          // be explicit about the Netty dependency version instead of relying on the version set by
+          // ZooKeeper (potentially older and containing CVEs)
+          libs.nettyHandler,
+          libs.nettyTransportNativeEpoll,
+	  // be explicit about the reload4j version instead of relying on the transitive versions
+	  libs.log4j
+        )
+      }
+    }
+  }
+  task printAllDependencies(type: DependencyReportTask) {}
+}
+
+def determineCommitId() {
+  def takeFromHash = 16
+  if (project.hasProperty('commitId')) {
+    commitId.take(takeFromHash)
+  } else if (repo != null) {
+    repo.head().id.take(takeFromHash)
+  } else {
+    "unknown"
+  }
+}
+
+apply from: file('wrapper.gradle')
+
+if (repo != null) {
+  rat {
+    dependsOn subprojects.collect {
+      it.tasks.matching {
+        it.name == "processMessages" || it.name == "processTestMessages"
+      }
+    }
+
+    verbose.set(true)
+    reportDir.set(project.file('build/rat'))
+    stylesheet.set(file('gradle/resources/rat-output-to-html.xsl'))
+
+    // Exclude everything under the directory that git should be ignoring via .gitignore or that isn't checked in. These
+    // restrict us only to files that are checked in or are staged.
+    excludes = new ArrayList<String>(repo.clean(ignore: false, directories: true, dryRun: true))
+    // And some of the files that we have checked in should also be excluded from this check
+    excludes.addAll([
+        '**/.git/**',
+        '**/build/**',
+        'CONTRIBUTING.md',
+        'PULL_REQUEST_TEMPLATE.md',
+        'gradlew',
+        'gradlew.bat',
+        'gradle/wrapper/gradle-wrapper.properties',
+        'trogdor/README.md',
+        '**/README.md',
+        '**/id_rsa',
+        '**/id_rsa.pub',
+        'checkstyle/suppressions.xml',
+        'streams/quickstart/java/src/test/resources/projects/basic/goal.txt',
+        'streams/streams-scala/logs/*',
+        'licenses/*',
+        '**/generated/**',
+        'clients/src/test/resources/serializedData/*'
+    ])
+  }
+} else {
+  rat.enabled = false
+}
+println("Starting build with version $version (commit id ${commitId == null ? "null" : commitId.take(8)}) using Gradle $gradleVersion, Java ${JavaVersion.current()} and Scala ${versions.scala}")
+println("Build properties: maxParallelForks=$maxTestForks, maxScalacThreads=$maxScalacThreads, maxTestRetries=$userMaxTestRetries")
+
+subprojects {
+
+  // enable running :dependencies task recursively on all subprojects
+  // eg: ./gradlew allDeps
+  task allDeps(type: DependencyReportTask) {}
+  // enable running :dependencyInsight task recursively on all subprojects
+  // eg: ./gradlew allDepInsight --configuration runtime --dependency com.fasterxml.jackson.core:jackson-databind
+  task allDepInsight(type: DependencyInsightReportTask) {showingAllVariants = false} doLast {}
+
+  apply plugin: 'java-library'
+  apply plugin: 'checkstyle'
+  apply plugin: "com.github.spotbugs"
+
+  // We use the shadow plugin for the jmh-benchmarks module and the `-all` jar can get pretty large, so
+  // don't publish it
+  def shouldPublish = !project.name.equals('jmh-benchmarks')
+
+  if (shouldPublish) {
+    apply plugin: 'maven-publish'
+    apply plugin: 'signing'
+
+    // Add aliases for the task names used by the maven plugin for backwards compatibility
+    // The maven plugin was replaced by the maven-publish plugin in Gradle 7.0
+    tasks.register('install').configure { dependsOn(publishToMavenLocal) }
+    tasks.register('uploadArchives').configure { dependsOn(publish) }
+  }
+
+  // apply the eclipse plugin only to subprojects that hold code. 'connect' is just a folder.
+  if (!project.name.equals('connect')) {
+    apply plugin: 'eclipse'
+    fineTuneEclipseClasspathFile(eclipse, project)
+  }
+
+  java {
+    consistentResolution {
+      // resolve the compileClasspath and then "inject" the result of resolution as strict constraints into the runtimeClasspath
+      useCompileClasspathVersions()
+    }
+  }
+
+  tasks.withType(JavaCompile) {
+    options.encoding = 'UTF-8'
+    options.compilerArgs << "-Xlint:all"
+    // temporary exclusions until all the warnings are fixed
+    if (!project.path.startsWith(":connect"))
+      options.compilerArgs << "-Xlint:-rawtypes"
+    options.compilerArgs << "-Xlint:-serial"
+    options.compilerArgs << "-Xlint:-try"
+    options.compilerArgs << "-Werror"
+
+    // --release is the recommended way to select the target release, but it's only supported in Java 9 so we also
+    // set --source and --target via `sourceCompatibility` and `targetCompatibility` a couple of lines below
+    if (JavaVersion.current().isJava9Compatible())
+      options.release = minJavaVersion
+    // --source/--target 8 is deprecated in Java 20, suppress warning until Java 8 support is dropped in Kafka 4.0
+    if (JavaVersion.current().isCompatibleWith(JavaVersion.VERSION_20))
+      options.compilerArgs << "-Xlint:-options"
+  }
+
+  // We should only set this if Java version is < 9 (--release is recommended for >= 9), but the Scala plugin for IntelliJ sets
+  // `-target` incorrectly if this is unset
+  sourceCompatibility = minJavaVersion
+  targetCompatibility = minJavaVersion
+
+  if (shouldPublish) {
+
+    publishing {
+      repositories {
+        // To test locally, invoke gradlew with `-PmavenUrl=file:///some/local/path`
+        maven {
+          url = mavenUrl
+          credentials {
+            username = mavenUsername
+            password = mavenPassword
+          }
+        }
+      }
+      publications {
+        mavenJava(MavenPublication) {
+          from components.java
+
+          afterEvaluate {
+            ["srcJar", "javadocJar", "scaladocJar", "testJar", "testSrcJar"].forEach { taskName ->
+              def task = tasks.findByName(taskName)
+              if (task != null)
+                artifact task
+            }
+
+            artifactId = archivesBaseName
+            pom {
+              name = 'Apache Kafka'
+              url = 'https://kafka.apache.org'
+              licenses {
+                license {
+                  name = 'The Apache License, Version 2.0'
+                  url = 'http://www.apache.org/licenses/LICENSE-2.0.txt'
+                  distribution = 'repo'
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+
+    if (shouldSign) {
+      signing {
+        sign publishing.publications.mavenJava
+      }
+    }
+  }
+
+  // Remove the relevant project name once it's converted to JUnit 5
+  def shouldUseJUnit5 = !(["runtime"].contains(it.project.name))
+
+  def testLoggingEvents = ["passed", "skipped", "failed"]
+  def testShowStandardStreams = false
+  def testExceptionFormat = 'full'
+  // Gradle built-in logging only supports sending test output to stdout, which generates a lot
+  // of noise, especially for passing tests. We really only want output for failed tests. This
+  // hooks into the output and logs it (so we don't have to buffer it all in memory) and only
+  // saves the output for failing tests. Directory and filenames are such that you can, e.g.,
+  // create a Jenkins rule to collect failed test output.
+  def logTestStdout = {
+    def testId = { TestDescriptor descriptor ->
+      "${descriptor.className}.${descriptor.name}".toString()
+    }
+
+    def logFiles = new HashMap<String, File>()
+    def logStreams = new HashMap<String, FileOutputStream>()
+    beforeTest { TestDescriptor td ->
+      def tid = testId(td)
+      // truncate the file name if it's too long
+      def logFile = new File(
+              "${projectDir}/build/reports/testOutput/${tid.substring(0, Math.min(tid.size(),240))}.test.stdout"
+      )
+      logFile.parentFile.mkdirs()
+      logFiles.put(tid, logFile)
+      logStreams.put(tid, new FileOutputStream(logFile))
+    }
+    onOutput { TestDescriptor td, TestOutputEvent toe ->
+      def tid = testId(td)
+      // Some output can happen outside the context of a specific test (e.g. at the class level)
+      // and beforeTest/afterTest seems to not be invoked for these cases (and similarly, there's
+      // a TestDescriptor hierarchy that includes the thread executing the test, Gradle tasks,
+      // etc). We see some of these in practice and it seems like something buggy in the Gradle
+      // test runner since we see it *before* any tests and it is frequently not related to any
+      // code in the test (best guess is that it is tail output from last test). We won't have
+      // an output file for these, so simply ignore them. If they become critical for debugging,
+      // they can be seen with showStandardStreams.
+      if (td.name == td.className || td.className == null) {
+        // silently ignore output unrelated to specific test methods
+        return
+      } else if (logStreams.get(tid) == null) {
+        println "WARNING: unexpectedly got output for a test [${tid}]" +
+                " that we didn't previously see in the beforeTest hook." +
+                " Message for debugging: [" + toe.message + "]."
+        return
+      }
+      try {
+        logStreams.get(tid).write(toe.message.getBytes(StandardCharsets.UTF_8))
+      } catch (Exception e) {
+        println "ERROR: Failed to write output for test ${tid}"
+        e.printStackTrace()
+      }
+    }
+    afterTest { TestDescriptor td, TestResult tr ->
+      def tid = testId(td)
+      try {
+        logStreams.get(tid).close()
+        if (tr.resultType != TestResult.ResultType.FAILURE) {
+          logFiles.get(tid).delete()
+        } else {
+          def file = logFiles.get(tid)
+          println "${tid} failed, log available in ${file}"
+        }
+      } catch (Exception e) {
+        println "ERROR: Failed to close stdout file for ${tid}"
+        e.printStackTrace()
+      } finally {
+        logFiles.remove(tid)
+        logStreams.remove(tid)
+      }
+    }
+  }
+
+  // The suites are for running sets of tests in IDEs.
+  // Gradle will run each test class, so we exclude the suites to avoid redundantly running the tests twice.
+  def testsToExclude = ['**/*Suite.class']
+  // Exclude PowerMock tests when running with Java 16 or newer until a version of PowerMock that supports the relevant versions is released
+  // The relevant issues are https://github.com/powermock/powermock/issues/1094 and https://github.com/powermock/powermock/issues/1099
+  if (JavaVersion.current().isCompatibleWith(JavaVersion.VERSION_16)) {
+    testsToExclude.addAll([
+      // connect tests
+      "**/KafkaConfigBackingStoreTest.*",
+      "**/StandaloneHerderTest.*",
+      "**/WorkerSinkTaskTest.*", "**/WorkerSinkTaskThreadedTest.*"
+    ])
+  }
+
+  test {
+    maxParallelForks = maxTestForks
+    ignoreFailures = userIgnoreFailures
+
+    maxHeapSize = defaultMaxHeapSize
+    jvmArgs = defaultJvmArgs
+
+    testLogging {
+      events = userTestLoggingEvents ?: testLoggingEvents
+      showStandardStreams = userShowStandardStreams ?: testShowStandardStreams
+      exceptionFormat = testExceptionFormat
+      displayGranularity = 0
+    }
+    logTestStdout.rehydrate(delegate, owner, this)()
+
+    exclude testsToExclude
+
+    if (shouldUseJUnit5)
+      useJUnitPlatform()
+
+    retry {
+      maxRetries = userMaxTestRetries
+      maxFailures = userMaxTestRetryFailures
+    }
+  }
+
+  task integrationTest(type: Test, dependsOn: compileJava) {
+    maxParallelForks = maxTestForks
+    ignoreFailures = userIgnoreFailures
+
+    // Increase heap size for integration tests
+    maxHeapSize = "2560m"
+    jvmArgs = defaultJvmArgs
+
+
+    testLogging {
+      events = userTestLoggingEvents ?: testLoggingEvents
+      showStandardStreams = userShowStandardStreams ?: testShowStandardStreams
+      exceptionFormat = testExceptionFormat
+      displayGranularity = 0
+    }
+    logTestStdout.rehydrate(delegate, owner, this)()
+
+    exclude testsToExclude
+
+    if (shouldUseJUnit5) {
+      if (project.name == 'streams') {
+        useJUnitPlatform {
+          includeTags "integration"
+          includeTags "org.apache.kafka.test.IntegrationTest"
+	  // Both engines are needed to run JUnit 4 tests alongside JUnit 5 tests.
+          // junit-vintage (JUnit 4) can be removed once the JUnit 4 migration is complete.
+          includeEngines "junit-vintage", "junit-jupiter"
+        }
+      } else {
+        useJUnitPlatform {
+          includeTags "integration"
+        }
+      }
+    } else {
+      useJUnit {
+        includeCategories 'org.apache.kafka.test.IntegrationTest'
+      }
+    }
+
+    retry {
+      maxRetries = userMaxTestRetries
+      maxFailures = userMaxTestRetryFailures
+    }
+  }
+
+  task unitTest(type: Test, dependsOn: compileJava) {
+    maxParallelForks = maxTestForks
+    ignoreFailures = userIgnoreFailures
+
+    maxHeapSize = defaultMaxHeapSize
+    jvmArgs = defaultJvmArgs
+
+    testLogging {
+      events = userTestLoggingEvents ?: testLoggingEvents
+      showStandardStreams = userShowStandardStreams ?: testShowStandardStreams
+      exceptionFormat = testExceptionFormat
+      displayGranularity = 0
+    }
+    logTestStdout.rehydrate(delegate, owner, this)()
+
+    exclude testsToExclude
+
+    if (shouldUseJUnit5) {
+      if (project.name == 'streams') {
+        useJUnitPlatform {
+          excludeTags "integration"
+          excludeTags "org.apache.kafka.test.IntegrationTest"
+	  // Both engines are needed to run JUnit 4 tests alongside JUnit 5 tests.
+          // junit-vintage (JUnit 4) can be removed once the JUnit 4 migration is complete.
+          includeEngines "junit-vintage", "junit-jupiter"
+        }
+      } else {
+        useJUnitPlatform {
+          excludeTags "integration"
+        }
+      }
+    } else {
+      useJUnit {
+        excludeCategories 'org.apache.kafka.test.IntegrationTest'
+      }
+    }
+
+    retry {
+      maxRetries = userMaxTestRetries
+      maxFailures = userMaxTestRetryFailures
+    }
+  }
+
+  // remove test output from all test types
+  tasks.withType(Test).all { t ->
+    cleanTest {
+      delete t.reports.junitXml.outputLocation
+      delete t.reports.html.outputLocation
+    }
+  }
+
+  jar {
+    from "$rootDir/LICENSE"
+    from "$rootDir/NOTICE"
+  }
+
+  task srcJar(type: Jar) {
+    archiveClassifier = 'sources'
+    from "$rootDir/LICENSE"
+    from "$rootDir/NOTICE"
+    from sourceSets.main.allSource
+  }
+
+  task javadocJar(type: Jar, dependsOn: javadoc) {
+    archiveClassifier = 'javadoc'
+    from "$rootDir/LICENSE"
+    from "$rootDir/NOTICE"
+    from javadoc.destinationDir
+  }
+
+  task docsJar(dependsOn: javadocJar)
+
+  javadoc {
+    options.charSet = 'UTF-8'
+    options.docEncoding = 'UTF-8'
+    options.encoding = 'UTF-8'
+    options.memberLevel = JavadocMemberLevel.PUBLIC  // Document only public members/API
+    // Turn off doclint for now, see https://blog.joda.org/2014/02/turning-off-doclint-in-jdk-8-javadoc.html for rationale
+    options.addStringOption('Xdoclint:none', '-quiet')
+
+    // The URL structure was changed to include the locale after Java 8
+    if (JavaVersion.current().isJava11Compatible())
+      options.links "https://docs.oracle.com/en/java/javase/${JavaVersion.current().majorVersion}/docs/api/"
+    else
+      options.links "https://docs.oracle.com/javase/8/docs/api/";
+  }
+
+  task systemTestLibs(dependsOn: jar)
+
+  if (!sourceSets.test.allSource.isEmpty()) {
+    task testJar(type: Jar) {
+      archiveClassifier = 'test'
+      from "$rootDir/LICENSE"
+      from "$rootDir/NOTICE"
+      from sourceSets.test.output
+    }
+
+    task testSrcJar(type: Jar, dependsOn: testJar) {
+      archiveClassifier = 'test-sources'
+      from "$rootDir/LICENSE"
+      from "$rootDir/NOTICE"
+      from sourceSets.test.allSource
+    }
+
+  }
+
+  plugins.withType(ScalaPlugin) {
+
+    scala {
+      zincVersion = versions.zinc
+    }
+
+    task scaladocJar(type:Jar, dependsOn: scaladoc) {
+      archiveClassifier = 'scaladoc'
+      from "$rootDir/LICENSE"
+      from "$rootDir/NOTICE"
+      from scaladoc.destinationDir
+    }
+
+    //documentation task should also trigger building scala doc jar
+    docsJar.dependsOn scaladocJar
+
+  }
+
+  tasks.withType(ScalaCompile) {
+
+    scalaCompileOptions.keepAliveMode = userKeepAliveMode
+
+    scalaCompileOptions.additionalParameters = [
+      "-deprecation",
+      "-unchecked",
+      "-encoding", "utf8",
+      "-Xlog-reflective-calls",
+      "-feature",
+      "-language:postfixOps",
+      "-language:implicitConversions",
+      "-language:existentials",
+      "-Ybackend-parallelism", maxScalacThreads.toString(),
+      "-Xlint:constant",
+      "-Xlint:delayedinit-select",
+      "-Xlint:doc-detached",
+      "-Xlint:missing-interpolator",
+      "-Xlint:nullary-unit",
+      "-Xlint:option-implicit",
+      "-Xlint:package-object-classes",
+      "-Xlint:poly-implicit-overload",
+      "-Xlint:private-shadow",
+      "-Xlint:stars-align",
+      "-Xlint:type-parameter-shadow",
+      "-Xlint:unused"
+    ]
+
+    // See README.md for details on this option and the meaning of each value
+    if (userScalaOptimizerMode.equals("method"))
+      scalaCompileOptions.additionalParameters += ["-opt:l:method"]
+    else if (userScalaOptimizerMode.startsWith("inline-")) {
+      List<String> inlineFrom = ["-opt-inline-from:org.apache.kafka.**"]
+      if (project.name.equals('core'))
+        inlineFrom.add("-opt-inline-from:kafka.**")
+      if (userScalaOptimizerMode.equals("inline-scala"))
+        inlineFrom.add("-opt-inline-from:scala.**")
+
+      scalaCompileOptions.additionalParameters += ["-opt:l:inline"]
+      scalaCompileOptions.additionalParameters += inlineFrom
+    }
+
+    if (versions.baseScala != '2.12') {
+      scalaCompileOptions.additionalParameters += ["-opt-warnings", "-Xlint:strict-unsealed-patmat"]
+      // Scala 2.13.2 introduces compiler warnings suppression, which is a pre-requisite for -Xfatal-warnings
+      scalaCompileOptions.additionalParameters += ["-Xfatal-warnings"]
+    }
+
+    // these options are valid for Scala versions < 2.13 only
+    // Scala 2.13 removes them, see https://github.com/scala/scala/pull/6502 and https://github.com/scala/scala/pull/5969
+    if (versions.baseScala == '2.12') {
+      scalaCompileOptions.additionalParameters += [
+        "-Xlint:by-name-right-associative",
+        "-Xlint:nullary-override",
+        "-Xlint:unsound-match"
+      ]
+    }
+
+    // Scalac 2.12 `-release` requires Java 9 or higher, but Scala 2.13 doesn't have that restriction
+    if (versions.baseScala == "2.13" || JavaVersion.current().isJava9Compatible())
+      scalaCompileOptions.additionalParameters += ["-release", String.valueOf(minJavaVersion)]
+
+    configure(scalaCompileOptions.forkOptions) {
+      memoryMaximumSize = defaultMaxHeapSize
+      jvmArgs = defaultJvmArgs
+    }
+  }
+
+  checkstyle {
+    configFile = new File(rootDir, "checkstyle/checkstyle.xml")
+    configProperties = checkstyleConfigProperties("import-control.xml")
+    toolVersion = versions.checkstyle
+  }
+
+  configure(checkstyleMain) {
+    group = 'Verification'
+    description = 'Run checkstyle on all main Java sources'
+  }
+
+  configure(checkstyleTest) {
+    group = 'Verification'
+    description = 'Run checkstyle on all test Java sources'
+  }
+
+  test.dependsOn('checkstyleMain', 'checkstyleTest')
+
+  spotbugs {
+    toolVersion = versions.spotbugs
+    excludeFilter = file("$rootDir/gradle/spotbugs-exclude.xml")
+    ignoreFailures = false
+  }
+  test.dependsOn('spotbugsMain')
+
+  tasks.withType(com.github.spotbugs.snom.SpotBugsTask) {
+    reports {
+      // Continue supporting `xmlFindBugsReport` for compatibility
+      xml.enabled(project.hasProperty('xmlSpotBugsReport') || project.hasProperty('xmlFindBugsReport'))
+      html.enabled(!project.hasProperty('xmlSpotBugsReport') && !project.hasProperty('xmlFindBugsReport'))
+    }
+    maxHeapSize = defaultMaxHeapSize
+    jvmArgs = defaultJvmArgs
+  }
+
+  // Ignore core since its a scala project
+  if (it.path != ':core') {
+    if (userEnableTestCoverage) {
+      apply plugin: "jacoco"
+
+      jacoco {
+        toolVersion = versions.jacoco
+      }
+
+      // NOTE: Jacoco Gradle plugin does not support "offline instrumentation" this means that classes mocked by PowerMock
+      // may report 0 coverage, since the source was modified after initial instrumentation.
+      // See https://github.com/jacoco/jacoco/issues/51
+      jacocoTestReport {
+        dependsOn tasks.test
+        sourceSets sourceSets.main
+        reports {
+          html.required = true
+          xml.required = true
+          csv.required = false
+        }
+      }
+
+    }
+  }
+
+  if (userEnableTestCoverage) {
+    def coverageGen = it.path == ':core' ? 'reportScoverage' : 'jacocoTestReport'
+    task reportCoverage(dependsOn: [coverageGen])
+  }
+
+}
+
+gradle.taskGraph.whenReady { taskGraph ->
+  taskGraph.getAllTasks().findAll { it.name.contains('spotbugsScoverage') || it.name.contains('spotbugsTest') }.each { task ->
+    task.enabled = false
+  }
+}
+
+def fineTuneEclipseClasspathFile(eclipse, project) {
+  eclipse.classpath.file {
+    beforeMerged { cp ->
+      cp.entries.clear()
+      // for the core project add the directories defined under test/scala as separate source directories
+      if (project.name.equals('core')) {
+        cp.entries.add(new org.gradle.plugins.ide.eclipse.model.SourceFolder("src/test/scala/integration", null))
+        cp.entries.add(new org.gradle.plugins.ide.eclipse.model.SourceFolder("src/test/scala/other", null))
+        cp.entries.add(new org.gradle.plugins.ide.eclipse.model.SourceFolder("src/test/scala/unit", null))
+      }
+    }
+    whenMerged { cp ->
+      // for the core project exclude the separate sub-directories defined under test/scala. These are added as source dirs above
+      if (project.name.equals('core')) {
+        cp.entries.findAll { it.kind == "src" && it.path.equals("src/test/scala") }*.excludes = ["integration/", "other/", "unit/"]
+      }
+      /*
+       * Set all eclipse build output to go to 'build_eclipse' directory. This is to ensure that gradle and eclipse use different
+       * build output directories, and also avoid using the eclpise default of 'bin' which clashes with some of our script directories.
+       * https://discuss.gradle.org/t/eclipse-generated-files-should-be-put-in-the-same-place-as-the-gradle-generated-files/6986/2
+       */
+      cp.entries.findAll { it.kind == "output" }*.path = "build_eclipse"
+      /*
+       * Some projects have explicitly added test output dependencies. These are required for the gradle build but not required
+       * in Eclipse since the dependent projects are added as dependencies. So clean up these from the generated classpath.
+       */
+      cp.entries.removeAll { it.kind == "lib" && it.path.matches(".*/build/(classes|resources)/test") }
+    }
+  }
+}
+
+def checkstyleConfigProperties(configFileName) {
+  [importControlFile: "$rootDir/checkstyle/$configFileName",
+   suppressionsFile: "$rootDir/checkstyle/suppressions.xml",
+   headerFile: "$rootDir/checkstyle/java.header"]
+}
+
+// Aggregates all jacoco results into the root project directory
+if (userEnableTestCoverage) {
+  task jacocoRootReport(type: org.gradle.testing.jacoco.tasks.JacocoReport) {
+    def javaProjects = subprojects.findAll { it.path != ':core' }
+
+    description = 'Generates an aggregate report from all subprojects'
+    dependsOn(javaProjects.test)
+
+    additionalSourceDirs.from = javaProjects.sourceSets.main.allSource.srcDirs
+    sourceDirectories.from = javaProjects.sourceSets.main.allSource.srcDirs
+    classDirectories.from = javaProjects.sourceSets.main.output
+    executionData.from = javaProjects.jacocoTestReport.executionData
+
+    reports {
+      html.required = true
+      xml.required = true
+    }
+    // workaround to ignore projects that don't have any tests at all
+    onlyIf = { true }
+    doFirst {
+      executionData = files(executionData.findAll { it.exists() })
+    }
+  }
+}
+
+if (userEnableTestCoverage) {
+  task reportCoverage(dependsOn: ['jacocoRootReport', 'core:reportCoverage'])
+}
+
+def connectPkgs = [
+    'connect:api',
+    'connect:basic-auth-extension',
+    'connect:file',
+    'connect:json',
+    'connect:runtime',
+    'connect:test-plugins',
+    'connect:transforms',
+    'connect:mirror',
+    'connect:mirror-client'
+]
+
+tasks.create(name: "jarConnect", dependsOn: connectPkgs.collect { it + ":jar" }) {}
+
+tasks.create(name: "testConnect", dependsOn: connectPkgs.collect { it + ":test" }) {}
+
+project(':core') {
+  apply plugin: 'scala'
+
+  // scaladoc generation is configured at the sub-module level with an artifacts
+  // block (cf. see streams-scala). If scaladoc generation is invoked explicitly
+  // for the `core` module, this ensures the generated jar doesn't include scaladoc
+  // files since the `core` module doesn't include public APIs.
+  scaladoc {
+    enabled = false
+  }
+  if (userEnableTestCoverage)
+    apply plugin: "org.scoverage"
+  archivesBaseName = "kafka_${versions.baseScala}"
+
+  configurations {
+    generator
+  }
+
+  dependencies {
+    // `core` is often used in users' tests, define the following dependencies as `api` for backwards compatibility
+    // even though the `core` module doesn't expose any public API
+    api project(':clients')
+    api libs.scalaLibrary
+
+    implementation project(':server-common')
+    implementation project(':group-coordinator')
+    implementation project(':metadata')
+    implementation project(':storage:api')
+    implementation project(':tools:tools-api')
+    implementation project(':raft')
+    implementation project(':storage')
+
+
+    implementation libs.argparse4j
+    implementation libs.commonsValidator
+    implementation libs.jacksonDatabind
+    implementation libs.jacksonModuleScala
+    implementation libs.jacksonDataformatCsv
+    implementation libs.jacksonJDK8Datatypes
+    implementation libs.joptSimple
+    implementation libs.jose4j
+    implementation libs.metrics
+    implementation libs.scalaCollectionCompat
+    implementation libs.scalaJava8Compat
+    // only needed transitively, but set it explicitly to ensure it has the same version as scala-library
+    implementation libs.scalaReflect
+    implementation libs.scalaLogging
+    implementation libs.slf4jApi
+    implementation(libs.zookeeper) {
+      // Dropwizard Metrics are required by ZooKeeper as of v3.6.0,
+      // but the library should *not* be used in Kafka code
+      implementation libs.dropwizardMetrics
+      exclude module: 'slf4j-log4j12'
+      exclude module: 'log4j'
+      // Both Kafka and Zookeeper use slf4j. ZooKeeper moved from log4j to logback in v3.8.0, but Kafka relies on reload4j.
+      // We are removing Zookeeper's dependency on logback so we have a singular logging backend.
+      exclude module: 'logback-classic'
+      exclude module: 'logback-core'
+    }
+    // ZooKeeperMain depends on commons-cli but declares the dependency as `provided`
+    implementation libs.commonsCli
+
+    compileOnly libs.log4j
+
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation project(':group-coordinator').sourceSets.test.output
+    testImplementation project(':metadata').sourceSets.test.output
+    testImplementation project(':raft').sourceSets.test.output
+    testImplementation project(':server-common').sourceSets.test.output
+    testImplementation project(':storage:api').sourceSets.test.output
+    testImplementation libs.bcpkix
+    testImplementation libs.mockitoCore
+    testImplementation libs.mockitoInline // supports mocking static methods, final classes, etc.
+    testImplementation(libs.apacheda) {
+      exclude group: 'xml-apis', module: 'xml-apis'
+      // `mina-core` is a transitive dependency for `apacheds` and `apacheda`.
+      // It is safer to use from `apacheds` since that is the implementation.
+      exclude module: 'mina-core'
+    }
+    testImplementation libs.apachedsCoreApi
+    testImplementation libs.apachedsInterceptorKerberos
+    testImplementation libs.apachedsProtocolShared
+    testImplementation libs.apachedsProtocolKerberos
+    testImplementation libs.apachedsProtocolLdap
+    testImplementation libs.apachedsLdifPartition
+    testImplementation libs.apachedsMavibotPartition
+    testImplementation libs.apachedsJdbmPartition
+    testImplementation libs.junitJupiter
+    testImplementation libs.slf4jlog4j
+    testImplementation(libs.jfreechart) {
+      exclude group: 'junit', module: 'junit'
+    }
+    testImplementation libs.caffeine
+    
+    generator project(':generator')
+  }
+
+  if (userEnableTestCoverage) {
+    scoverage {
+      scoverageVersion = versions.scoverage
+      reportDir = file("${rootProject.buildDir}/scoverage")
+      highlighting = false
+      minimumRate = 0.0
+    }
+  }
+
+  configurations {
+    // manually excludes some unnecessary dependencies
+    implementation.exclude module: 'javax'
+    implementation.exclude module: 'jline'
+    implementation.exclude module: 'jms'
+    implementation.exclude module: 'jmxri'
+    implementation.exclude module: 'jmxtools'
+    implementation.exclude module: 'mail'
+    // To prevent a UniqueResourceException due the same resource existing in both
+    // org.apache.directory.api/api-all and org.apache.directory.api/api-ldap-schema-data
+    testImplementation.exclude module: 'api-ldap-schema-data'
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      include('slf4j-log4j12*')
+      include('reload4j*jar')
+    }
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-clients*')
+    }
+    into "$buildDir/dependant-libs-${versions.scala}"
+    duplicatesStrategy 'exclude'
+  }
+
+  task processMessages(type:JavaExec) {
+    mainClass = "org.apache.kafka.message.MessageGenerator"
+    classpath = configurations.generator
+    args = [ "-p", "kafka.internals.generated",
+             "-o", "src/generated/java/kafka/internals/generated",
+             "-i", "src/main/resources/common/message",
+             "-m", "MessageDataGenerator"
+    ]
+    inputs.dir("src/main/resources/common/message")
+        .withPropertyName("messages")
+        .withPathSensitivity(PathSensitivity.RELATIVE)
+    outputs.cacheIf { true }
+    outputs.dir("src/generated/java/kafka/internals/generated")
+  }
+
+  compileJava.dependsOn 'processMessages'
+  srcJar.dependsOn 'processMessages'
+
+  task genProtocolErrorDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.common.protocol.Errors'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "protocol_errors.html").newOutputStream()
+  }
+
+  task genProtocolTypesDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.common.protocol.types.Type'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "protocol_types.html").newOutputStream()
+  }
+
+  task genProtocolApiKeyDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.common.protocol.ApiKeys'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "protocol_api_keys.html").newOutputStream()
+  }
+
+  task genProtocolMessageDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.common.protocol.Protocol'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "protocol_messages.html").newOutputStream()
+  }
+
+  task genAdminClientConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.clients.admin.AdminClientConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "admin_client_config.html").newOutputStream()
+  }
+
+  task genProducerConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.clients.producer.ProducerConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "producer_config.html").newOutputStream()
+  }
+
+  task genConsumerConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.clients.consumer.ConsumerConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "consumer_config.html").newOutputStream()
+  }
+
+  task genKafkaConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'kafka.server.KafkaConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "kafka_config.html").newOutputStream()
+  }
+
+  task genTopicConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.storage.internals.log.LogConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "topic_config.html").newOutputStream()
+  }
+
+  task genConsumerMetricsDocs(type: JavaExec) {
+    classpath = sourceSets.test.runtimeClasspath
+    mainClass = 'org.apache.kafka.clients.consumer.internals.ConsumerMetrics'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "consumer_metrics.html").newOutputStream()
+  }
+
+  task genProducerMetricsDocs(type: JavaExec) {
+    classpath = sourceSets.test.runtimeClasspath
+    mainClass = 'org.apache.kafka.clients.producer.internals.ProducerMetrics'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "producer_metrics.html").newOutputStream()
+  }
+
+  task siteDocsTar(dependsOn: ['genProtocolErrorDocs', 'genProtocolTypesDocs', 'genProtocolApiKeyDocs', 'genProtocolMessageDocs',
+                               'genAdminClientConfigDocs', 'genProducerConfigDocs', 'genConsumerConfigDocs',
+                               'genKafkaConfigDocs', 'genTopicConfigDocs',
+                               ':connect:runtime:genConnectConfigDocs', ':connect:runtime:genConnectTransformationDocs',
+                               ':connect:runtime:genConnectPredicateDocs',
+                               ':connect:runtime:genSinkConnectorConfigDocs', ':connect:runtime:genSourceConnectorConfigDocs',
+                               ':streams:genStreamsConfigDocs', 'genConsumerMetricsDocs', 'genProducerMetricsDocs',
+                               ':connect:runtime:genConnectMetricsDocs', ':connect:runtime:genConnectOpenAPIDocs',
+                               ':connect:mirror:genMirrorSourceConfigDocs', ':connect:mirror:genMirrorCheckpointConfigDocs',
+                               ':connect:mirror:genMirrorHeartbeatConfigDocs', ':connect:mirror:genMirrorConnectorConfigDocs',
+                               ':storage:genRemoteLogManagerConfigDoc', ':storage:genRemoteLogMetadataManagerConfigDoc'], type: Tar) {
+    archiveClassifier = 'site-docs'
+    compression = Compression.GZIP
+    from project.file("$rootDir/docs")
+    into 'site-docs'
+    duplicatesStrategy 'exclude'
+  }
+
+  tasks.create(name: "releaseTarGz", dependsOn: configurations.archives.artifacts, type: Tar) {
+    into "kafka_${versions.baseScala}-${archiveVersion.get()}"
+    compression = Compression.GZIP
+    from(project.file("$rootDir/bin")) { into "bin/" }
+    from(project.file("$rootDir/config")) { into "config/" }
+    from(project.file("$rootDir/licenses")) { into "licenses/" }
+    from "$rootDir/LICENSE-binary" rename {String filename -> filename.replace("-binary", "")}
+    from "$rootDir/NOTICE-binary" rename {String filename -> filename.replace("-binary", "")}
+    from(configurations.runtimeClasspath) { into("libs/") }
+    from(configurations.archives.artifacts.files) { into("libs/") }
+    from(project.siteDocsTar) { into("site-docs/") }
+    from(project(':tools').jar) { into("libs/") }
+    from(project(':tools').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':trogdor').jar) { into("libs/") }
+    from(project(':trogdor').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':shell').jar) { into("libs/") }
+    from(project(':shell').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':connect:api').jar) { into("libs/") }
+    from(project(':connect:api').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':connect:runtime').jar) { into("libs/") }
+    from(project(':connect:runtime').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':connect:transforms').jar) { into("libs/") }
+    from(project(':connect:transforms').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':connect:json').jar) { into("libs/") }
+    from(project(':connect:json').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':connect:file').jar) { into("libs/") }
+    from(project(':connect:file').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':connect:basic-auth-extension').jar) { into("libs/") }
+    from(project(':connect:basic-auth-extension').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':connect:mirror').jar) { into("libs/") }
+    from(project(':connect:mirror').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':connect:mirror-client').jar) { into("libs/") }
+    from(project(':connect:mirror-client').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':streams').jar) { into("libs/") }
+    from(project(':streams').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':streams:streams-scala').jar) { into("libs/") }
+    from(project(':streams:streams-scala').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':streams:test-utils').jar) { into("libs/") }
+    from(project(':streams:test-utils').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':streams:examples').jar) { into("libs/") }
+    from(project(':streams:examples').configurations.runtimeClasspath) { into("libs/") }
+    from(project(':tools:tools-api').jar) { into("libs/") }
+    from(project(':tools:tools-api').configurations.runtimeClasspath) { into("libs/") }
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn('copyDependantLibs')
+  }
+
+  jar.manifest {
+    attributes(
+      'Version': "${version}"
+    )
+  }
+
+  tasks.create(name: "copyDependantTestLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      include('*.jar')
+    }
+    into "$buildDir/dependant-testlibs"
+    //By default gradle does not handle test dependencies between the sub-projects
+    //This line is to include clients project test jar to dependant-testlibs
+    from (project(':clients').testJar ) { "$buildDir/dependant-testlibs" }
+    duplicatesStrategy 'exclude'
+  }
+
+  systemTestLibs.dependsOn('jar', 'testJar', 'copyDependantTestLibs')
+
+  checkstyle {
+    configProperties = checkstyleConfigProperties("import-control-core.xml")
+  }
+
+  sourceSets {
+    // Set java/scala source folders in the `scala` block to enable joint compilation
+    main {
+      java {
+        srcDirs = []
+      }
+      scala {
+        srcDirs = ["src/generated/java", "src/main/java", "src/main/scala"]
+      }
+    }
+    test {
+      java {
+        srcDirs = []
+      }
+      scala {
+        srcDirs = ["src/test/java", "src/test/scala"]
+      }
+    }
+  }
+}
+
+project(':metadata') {
+  archivesBaseName = "kafka-metadata"
+
+  configurations {
+    generator
+  }
+
+  dependencies {
+    implementation project(':server-common')
+    implementation project(':clients')
+    implementation project(':raft')
+    implementation libs.jacksonDatabind
+    implementation libs.jacksonJDK8Datatypes
+    implementation libs.metrics
+    compileOnly libs.log4j
+    testImplementation libs.junitJupiter
+    testImplementation libs.jqwik
+    testImplementation libs.hamcrest
+    testImplementation libs.mockitoCore
+    testImplementation libs.mockitoInline
+    testImplementation libs.slf4jlog4j
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation project(':raft').sourceSets.test.output
+    testImplementation project(':server-common').sourceSets.test.output
+    generator project(':generator')
+  }
+
+  task processMessages(type:JavaExec) {
+    mainClass = "org.apache.kafka.message.MessageGenerator"
+    classpath = configurations.generator
+    args = [ "-p", "org.apache.kafka.common.metadata",
+             "-o", "src/generated/java/org/apache/kafka/common/metadata",
+             "-i", "src/main/resources/common/metadata",
+             "-m", "MessageDataGenerator", "JsonConverterGenerator",
+             "-t", "MetadataRecordTypeGenerator", "MetadataJsonConvertersGenerator"
+           ]
+    inputs.dir("src/main/resources/common/metadata")
+        .withPropertyName("messages")
+        .withPathSensitivity(PathSensitivity.RELATIVE)
+    outputs.cacheIf { true }
+    outputs.dir("src/generated/java/org/apache/kafka/common/metadata")
+  }
+
+  compileJava.dependsOn 'processMessages'
+  srcJar.dependsOn 'processMessages'
+
+  sourceSets {
+    main {
+      java {
+        srcDirs = ["src/generated/java", "src/main/java"]
+      }
+    }
+    test {
+      java {
+        srcDirs = ["src/generated/java", "src/test/java"]
+      }
+    }
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  checkstyle {
+    configProperties = checkstyleConfigProperties("import-control-metadata.xml")
+  }
+}
+
+project(':group-coordinator') {
+  archivesBaseName = "kafka-group-coordinator"
+
+  configurations {
+    generator
+  }
+
+  dependencies {
+    implementation project(':server-common')
+    implementation project(':clients')
+    implementation project(':metadata')
+    implementation libs.slf4jApi
+
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation project(':server-common').sourceSets.test.output
+    testImplementation libs.junitJupiter
+    testImplementation libs.mockitoCore
+
+    testRuntimeOnly libs.slf4jlog4j
+
+    generator project(':generator')
+  }
+
+  sourceSets {
+    main {
+      java {
+        srcDirs = ["src/generated/java", "src/main/java"]
+      }
+    }
+    test {
+      java {
+        srcDirs = ["src/generated/java", "src/test/java"]
+      }
+    }
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  task processMessages(type:JavaExec) {
+    mainClass = "org.apache.kafka.message.MessageGenerator"
+    classpath = configurations.generator
+    args = [ "-p", "org.apache.kafka.coordinator.group.generated",
+             "-o", "src/generated/java/org/apache/kafka/coordinator/group/generated",
+             "-i", "src/main/resources/common/message",
+             "-m", "MessageDataGenerator"
+    ]
+    inputs.dir("src/main/resources/common/message")
+        .withPropertyName("messages")
+        .withPathSensitivity(PathSensitivity.RELATIVE)
+    outputs.cacheIf { true }
+    outputs.dir("src/generated/java/org/apache/kafka/coordinator/group/generated")
+  }
+
+  compileJava.dependsOn 'processMessages'
+  srcJar.dependsOn 'processMessages'
+}
+
+project(':examples') {
+  archivesBaseName = "kafka-examples"
+
+  dependencies {
+    implementation project(':clients')
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  checkstyle {
+    configProperties = checkstyleConfigProperties("import-control-core.xml")
+  }
+}
+
+project(':generator') {
+  dependencies {
+    implementation libs.argparse4j
+    implementation libs.jacksonDatabind
+    implementation libs.jacksonJDK8Datatypes
+    implementation libs.jacksonJaxrsJsonProvider
+    testImplementation libs.junitJupiter
+  }
+
+  javadoc {
+    enabled = false
+  }
+}
+
+project(':clients') {
+  archivesBaseName = "kafka-clients"
+
+  configurations {
+    generator
+  }
+
+  dependencies {
+    implementation libs.zstd
+    implementation libs.lz4
+    implementation libs.snappy
+    implementation libs.slf4jApi
+
+    compileOnly libs.jacksonDatabind // for SASL/OAUTHBEARER bearer token parsing
+    compileOnly libs.jacksonJDK8Datatypes
+    compileOnly libs.jose4j          // for SASL/OAUTHBEARER JWT validation; only used by broker
+
+    testImplementation libs.bcpkix
+    testImplementation libs.jacksonJaxrsJsonProvider
+    testImplementation libs.jose4j
+    testImplementation libs.junitJupiter
+    testImplementation libs.log4j
+    testImplementation libs.mockitoInline
+
+    testRuntimeOnly libs.slf4jlog4j
+    testRuntimeOnly libs.jacksonDatabind
+    testRuntimeOnly libs.jacksonJDK8Datatypes
+
+    generator project(':generator')
+  }
+
+  task createVersionFile() {
+    def receiptFile = file("$buildDir/kafka/$buildVersionFileName")
+    inputs.property "commitId", commitId
+    inputs.property "version", version
+    outputs.file receiptFile
+
+    doLast {
+      def data = [
+        commitId: commitId,
+        version: version,
+      ]
+
+      receiptFile.parentFile.mkdirs()
+      def content = data.entrySet().collect { "$it.key=$it.value" }.sort().join("\n")
+      receiptFile.setText(content, "ISO-8859-1")
+    }
+  }
+
+  jar {
+    dependsOn createVersionFile
+    from("$buildDir") {
+        include "kafka/$buildVersionFileName"
+    }
+  }
+
+  clean.doFirst {
+    delete "$buildDir/kafka/"
+  }
+
+  task processMessages(type:JavaExec) {
+    mainClass = "org.apache.kafka.message.MessageGenerator"
+    classpath = configurations.generator
+    args = [ "-p", "org.apache.kafka.common.message",
+             "-o", "src/generated/java/org/apache/kafka/common/message",
+             "-i", "src/main/resources/common/message",
+             "-t", "ApiMessageTypeGenerator",
+             "-m", "MessageDataGenerator", "JsonConverterGenerator"
+           ]
+    inputs.dir("src/main/resources/common/message")
+        .withPropertyName("messages")
+        .withPathSensitivity(PathSensitivity.RELATIVE)
+    outputs.cacheIf { true }
+    outputs.dir("src/generated/java/org/apache/kafka/common/message")
+  }
+
+  task processTestMessages(type:JavaExec) {
+    mainClass = "org.apache.kafka.message.MessageGenerator"
+    classpath = configurations.generator
+    args = [ "-p", "org.apache.kafka.common.message",
+             "-o", "src/generated-test/java/org/apache/kafka/common/message",
+             "-i", "src/test/resources/common/message",
+             "-m", "MessageDataGenerator", "JsonConverterGenerator"
+           ]
+    inputs.dir("src/test/resources/common/message")
+        .withPropertyName("testMessages")
+        .withPathSensitivity(PathSensitivity.RELATIVE)
+    outputs.cacheIf { true }
+    outputs.dir("src/generated-test/java/org/apache/kafka/common/message")
+  }
+
+  sourceSets {
+    main {
+      java {
+        srcDirs = ["src/generated/java", "src/main/java"]
+      }
+    }
+    test {
+      java {
+        srcDirs = ["src/generated-test/java", "src/test/java"]
+      }
+    }
+  }
+
+  compileJava.dependsOn 'processMessages'
+  srcJar.dependsOn 'processMessages'
+
+  compileTestJava.dependsOn 'processTestMessages'
+
+  javadoc {
+    include "**/org/apache/kafka/clients/admin/*"
+    include "**/org/apache/kafka/clients/consumer/*"
+    include "**/org/apache/kafka/clients/producer/*"
+    include "**/org/apache/kafka/common/*"
+    include "**/org/apache/kafka/common/acl/*"
+    include "**/org/apache/kafka/common/annotation/*"
+    include "**/org/apache/kafka/common/errors/*"
+    include "**/org/apache/kafka/common/header/*"
+    include "**/org/apache/kafka/common/metrics/*"
+    include "**/org/apache/kafka/common/metrics/stats/*"
+    include "**/org/apache/kafka/common/quota/*"
+    include "**/org/apache/kafka/common/resource/*"
+    include "**/org/apache/kafka/common/serialization/*"
+    include "**/org/apache/kafka/common/config/*"
+    include "**/org/apache/kafka/common/config/provider/*"
+    include "**/org/apache/kafka/common/security/auth/*"
+    include "**/org/apache/kafka/common/security/plain/*"
+    include "**/org/apache/kafka/common/security/scram/*"
+    include "**/org/apache/kafka/common/security/token/delegation/*"
+    include "**/org/apache/kafka/common/security/oauthbearer/*"
+    include "**/org/apache/kafka/common/security/oauthbearer/secured/*"
+    include "**/org/apache/kafka/server/authorizer/*"
+    include "**/org/apache/kafka/server/policy/*"
+    include "**/org/apache/kafka/server/quota/*"
+  }
+}
+
+project(':raft') {
+  archivesBaseName = "kafka-raft"
+
+  configurations {
+    generator
+  }
+
+  dependencies {
+    implementation project(':server-common')
+    implementation project(':clients')
+    implementation libs.slf4jApi
+    implementation libs.jacksonDatabind
+
+    testImplementation project(':server-common')
+    testImplementation project(':clients')
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation libs.junitJupiter
+    testImplementation libs.mockitoCore
+    testImplementation libs.jqwik
+
+    testRuntimeOnly libs.slf4jlog4j
+
+    generator project(':generator')
+  }
+
+  task createVersionFile() {
+    def receiptFile = file("$buildDir/kafka/$buildVersionFileName")
+    inputs.property "commitId", commitId
+    inputs.property "version", version
+    outputs.file receiptFile
+
+    doLast {
+      def data = [
+        commitId: commitId,
+        version: version,
+      ]
+
+      receiptFile.parentFile.mkdirs()
+      def content = data.entrySet().collect { "$it.key=$it.value" }.sort().join("\n")
+      receiptFile.setText(content, "ISO-8859-1")
+    }
+  }
+
+  task processMessages(type:JavaExec) {
+    mainClass = "org.apache.kafka.message.MessageGenerator"
+    classpath = configurations.generator
+    args = [ "-p", "org.apache.kafka.raft.generated",
+             "-o", "src/generated/java/org/apache/kafka/raft/generated",
+             "-i", "src/main/resources/common/message",
+             "-m", "MessageDataGenerator", "JsonConverterGenerator"]
+    inputs.dir("src/main/resources/common/message")
+        .withPropertyName("messages")
+        .withPathSensitivity(PathSensitivity.RELATIVE)
+    outputs.cacheIf { true }
+    outputs.dir("src/generated/java/org/apache/kafka/raft/generated")
+  }
+
+  sourceSets {
+    main {
+      java {
+        srcDirs = ["src/generated/java", "src/main/java"]
+      }
+    }
+    test {
+      java {
+        srcDirs = ["src/generated/java", "src/test/java"]
+      }
+    }
+  }
+
+  compileJava.dependsOn 'processMessages'
+  srcJar.dependsOn 'processMessages'
+
+  jar {
+    dependsOn createVersionFile
+    from("$buildDir") {
+        include "kafka/$buildVersionFileName"
+    }
+  }
+
+  test {
+    useJUnitPlatform {
+      includeEngines 'jqwik', 'junit-jupiter'
+    }
+  }
+
+  clean.doFirst {
+    delete "$buildDir/kafka/"
+  }
+
+  javadoc {
+    enabled = false
+  }
+}
+
+project(':server-common') {
+  archivesBaseName = "kafka-server-common"
+
+  dependencies {
+    api project(':clients')
+    implementation libs.slf4jApi
+    implementation libs.metrics
+    implementation libs.joptSimple
+    implementation libs.jacksonDatabind
+    implementation libs.pcollections
+
+    testImplementation project(':clients')
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation libs.junitJupiter
+    testImplementation libs.mockitoCore
+    testImplementation libs.mockitoInline // supports mocking static methods, final classes, etc.
+    testImplementation libs.hamcrest
+
+    testRuntimeOnly libs.slf4jlog4j
+  }
+
+  task createVersionFile() {
+    def receiptFile = file("$buildDir/kafka/$buildVersionFileName")
+    inputs.property "commitId", commitId
+    inputs.property "version", version
+    outputs.file receiptFile
+
+    doLast {
+      def data = [
+              commitId: commitId,
+              version: version,
+      ]
+
+      receiptFile.parentFile.mkdirs()
+      def content = data.entrySet().collect { "$it.key=$it.value" }.sort().join("\n")
+      receiptFile.setText(content, "ISO-8859-1")
+    }
+  }
+
+  sourceSets {
+    main {
+      java {
+        srcDirs = ["src/main/java"]
+      }
+    }
+    test {
+      java {
+        srcDirs = ["src/test/java"]
+      }
+    }
+  }
+
+  jar {
+    dependsOn createVersionFile
+    from("$buildDir") {
+      include "kafka/$buildVersionFileName"
+    }
+  }
+
+  clean.doFirst {
+    delete "$buildDir/kafka/"
+  }
+
+  checkstyle {
+    configProperties = checkstyleConfigProperties("import-control-server-common.xml")
+  }
+}
+
+project(':storage:api') {
+  archivesBaseName = "kafka-storage-api"
+
+  dependencies {
+    implementation project(':clients')
+    implementation project(':server-common')
+    implementation libs.metrics
+    implementation libs.slf4jApi
+
+    testImplementation project(':clients')
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation libs.junitJupiter
+    testImplementation libs.mockitoCore
+
+    testRuntimeOnly libs.slf4jlog4j
+  }
+
+  task createVersionFile() {
+    def receiptFile = file("$buildDir/kafka/$buildVersionFileName")
+    inputs.property "commitId", commitId
+    inputs.property "version", version
+    outputs.file receiptFile
+
+    doLast {
+      def data = [
+              commitId: commitId,
+              version: version,
+      ]
+
+      receiptFile.parentFile.mkdirs()
+      def content = data.entrySet().collect { "$it.key=$it.value" }.sort().join("\n")
+      receiptFile.setText(content, "ISO-8859-1")
+    }
+  }
+
+  sourceSets {
+    main {
+      java {
+        srcDirs = ["src/main/java"]
+      }
+    }
+    test {
+      java {
+        srcDirs = ["src/test/java"]
+      }
+    }
+  }
+
+  jar {
+    dependsOn createVersionFile
+    from("$buildDir") {
+      include "kafka/$buildVersionFileName"
+    }
+  }
+
+  clean.doFirst {
+    delete "$buildDir/kafka/"
+  }
+
+  javadoc {
+    include "**/org/apache/kafka/server/log/remote/storage/*"
+  }
+
+  checkstyle {
+    configProperties = checkstyleConfigProperties("import-control-storage.xml")
+  }
+}
+
+project(':storage') {
+  archivesBaseName = "kafka-storage"
+
+  configurations {
+    generator
+  }
+
+  dependencies {
+    implementation project(':storage:api')
+    implementation project(':server-common')
+    implementation project(':clients')
+    implementation libs.caffeine
+    implementation libs.slf4jApi
+    implementation libs.jacksonDatabind
+    implementation libs.metrics
+
+    testImplementation project(':clients')
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation project(':core')
+    testImplementation project(':core').sourceSets.test.output
+    testImplementation project(':server-common')
+    testImplementation project(':server-common').sourceSets.test.output
+    testImplementation libs.hamcrest
+    testImplementation libs.junitJupiter
+    testImplementation libs.mockitoCore
+    testImplementation libs.bcpkix
+
+    testRuntimeOnly libs.slf4jlog4j
+
+    generator project(':generator')
+  }
+
+  task createVersionFile() {
+    def receiptFile = file("$buildDir/kafka/$buildVersionFileName")
+    inputs.property "commitId", commitId
+    inputs.property "version", version
+    outputs.file receiptFile
+
+    doLast {
+      def data = [
+              commitId: commitId,
+              version: version,
+      ]
+
+      receiptFile.parentFile.mkdirs()
+      def content = data.entrySet().collect { "$it.key=$it.value" }.sort().join("\n")
+      receiptFile.setText(content, "ISO-8859-1")
+    }
+  }
+
+  task processMessages(type:JavaExec) {
+    mainClass = "org.apache.kafka.message.MessageGenerator"
+    classpath = configurations.generator
+    args = [ "-p", " org.apache.kafka.server.log.remote.metadata.storage.generated",
+             "-o", "src/generated/java/org/apache/kafka/server/log/remote/metadata/storage/generated",
+             "-i", "src/main/resources/message",
+             "-m", "MessageDataGenerator", "JsonConverterGenerator",
+             "-t", "MetadataRecordTypeGenerator", "MetadataJsonConvertersGenerator" ]
+    inputs.dir("src/main/resources/message")
+        .withPropertyName("messages")
+        .withPathSensitivity(PathSensitivity.RELATIVE)
+    outputs.cacheIf { true }
+    outputs.dir("src/generated/java/org/apache/kafka/server/log/remote/metadata/storage/generated")
+  }
+
+  task genRemoteLogManagerConfigDoc(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "remote_log_manager_config.html").newOutputStream()
+  }
+
+  task genRemoteLogMetadataManagerConfigDoc(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "remote_log_metadata_manager_config.html").newOutputStream()
+  }
+
+  sourceSets {
+    main {
+      java {
+        srcDirs = ["src/generated/java", "src/main/java"]
+      }
+    }
+    test {
+      java {
+        srcDirs = ["src/generated/java", "src/test/java"]
+      }
+    }
+  }
+
+  compileJava.dependsOn 'processMessages'
+  srcJar.dependsOn 'processMessages'
+
+  jar {
+    dependsOn createVersionFile
+    from("$buildDir") {
+      include "kafka/$buildVersionFileName"
+    }
+  }
+
+  test {
+    useJUnitPlatform {
+      includeEngines 'junit-jupiter'
+    }
+  }
+
+  clean.doFirst {
+    delete "$buildDir/kafka/"
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  checkstyle {
+    configProperties = checkstyleConfigProperties("import-control-storage.xml")
+  }
+}
+
+project(':tools:tools-api') {
+  archivesBaseName = "kafka-tools-api"
+
+  dependencies {
+    implementation project(':clients')
+    testImplementation libs.junitJupiter
+  }
+
+  task createVersionFile() {
+    def receiptFile = file("$buildDir/kafka/$buildVersionFileName")
+    inputs.property "commitId", commitId
+    inputs.property "version", version
+    outputs.file receiptFile
+
+    doLast {
+      def data = [
+              commitId: commitId,
+              version: version,
+      ]
+
+      receiptFile.parentFile.mkdirs()
+      def content = data.entrySet().collect { "$it.key=$it.value" }.sort().join("\n")
+      receiptFile.setText(content, "ISO-8859-1")
+    }
+  }
+
+  sourceSets {
+    main {
+      java {
+        srcDirs = ["src/main/java"]
+      }
+    }
+    test {
+      java {
+        srcDirs = ["src/test/java"]
+      }
+    }
+  }
+
+  jar {
+    dependsOn createVersionFile
+    from("$buildDir") {
+      include "kafka/$buildVersionFileName"
+    }
+  }
+
+  clean.doFirst {
+    delete "$buildDir/kafka/"
+  }
+
+  javadoc {
+    include "**/org/apache/kafka/tools/api/*"
+  }
+}
+
+project(':tools') {
+  archivesBaseName = "kafka-tools"
+
+  dependencies {
+    implementation project(':clients')
+    implementation project(':server-common')
+    implementation project(':connect:api')
+    implementation project(':connect:runtime')
+    implementation project(':log4j-appender')
+    implementation project(':tools:tools-api')
+    implementation libs.argparse4j
+    implementation libs.jacksonDatabind
+    implementation libs.jacksonJDK8Datatypes
+    implementation libs.slf4jApi
+    implementation libs.log4j
+    implementation libs.joptSimple
+
+    implementation libs.jose4j                    // for SASL/OAUTHBEARER JWT validation
+    implementation libs.jacksonJaxrsJsonProvider
+
+    testImplementation project(':clients')
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation project(':core')
+    testImplementation project(':core').sourceSets.test.output
+    testImplementation project(':server-common')
+    testImplementation project(':server-common').sourceSets.test.output
+    testImplementation project(':connect:api')
+    testImplementation project(':connect:runtime')
+    testImplementation project(':connect:runtime').sourceSets.test.output
+    testImplementation libs.junitJupiter
+    testImplementation libs.mockitoInline // supports mocking static methods, final classes, etc.
+    testImplementation libs.mockitoJunitJupiter // supports MockitoExtension
+    testImplementation libs.bcpkix // required by the clients test module, but we have to specify it explicitly as gradle does not include the transitive test dependency automatically
+    testRuntimeOnly libs.slf4jlog4j
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      include('slf4j-log4j12*')
+      include('reload4j*jar')
+    }
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-clients*')
+    }
+    into "$buildDir/dependant-libs-${versions.scala}"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn 'copyDependantLibs'
+  }
+}
+
+project(':trogdor') {
+  archivesBaseName = "trogdor"
+
+  dependencies {
+    implementation project(':clients')
+    implementation project(':log4j-appender')
+    implementation libs.argparse4j
+    implementation libs.jacksonDatabind
+    implementation libs.jacksonJDK8Datatypes
+    implementation libs.slf4jApi
+    implementation libs.log4j
+
+    implementation libs.jacksonJaxrsJsonProvider
+    implementation libs.jerseyContainerServlet
+    implementation libs.jerseyHk2
+    implementation libs.jaxbApi // Jersey dependency that was available in the JDK before Java 9
+    implementation libs.activation // Jersey dependency that was available in the JDK before Java 9
+    implementation libs.jettyServer
+    implementation libs.jettyServlet
+    implementation libs.jettyServlets
+
+    testImplementation project(':clients')
+    testImplementation libs.junitJupiter
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation libs.mockitoInline // supports mocking static methods, final classes, etc.
+
+    testRuntimeOnly libs.slf4jlog4j
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      include('slf4j-log4j12*')
+      include('reload4j*jar')
+    }
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-clients*')
+    }
+    into "$buildDir/dependant-libs-${versions.scala}"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn 'copyDependantLibs'
+  }
+}
+
+project(':shell') {
+  archivesBaseName = "kafka-shell"
+
+  dependencies {
+    implementation libs.argparse4j
+    implementation libs.jacksonDatabind
+    implementation libs.jacksonJDK8Datatypes
+    implementation libs.jline
+    implementation libs.slf4jApi
+    implementation project(':server-common')
+    implementation project(':clients')
+    implementation project(':core')
+    implementation project(':log4j-appender')
+    implementation project(':metadata')
+    implementation project(':raft')
+
+    implementation libs.jose4j                    // for SASL/OAUTHBEARER JWT validation
+    implementation libs.jacksonJaxrsJsonProvider
+
+    testImplementation project(':clients')
+    testImplementation libs.junitJupiter
+
+    testRuntimeOnly libs.slf4jlog4j
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      include('jline-*jar')
+    }
+    from (configurations.runtimeClasspath) {
+      include('jline-*jar')
+    }
+    into "$buildDir/dependant-libs-${versions.scala}"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn 'copyDependantLibs'
+  }
+}
+
+project(':streams') {
+  archivesBaseName = "kafka-streams"
+  ext.buildStreamsVersionFileName = "kafka-streams-version.properties"
+
+  configurations {
+    generator
+  }
+
+  dependencies {
+    api project(':clients')
+    // `org.rocksdb.Options` is part of Kafka Streams public api via `RocksDBConfigSetter`
+    api libs.rocksDBJni
+
+    implementation libs.slf4jApi
+    implementation libs.jacksonAnnotations
+    implementation libs.jacksonDatabind
+
+    // testCompileOnly prevents streams from exporting a dependency on test-utils, which would cause a dependency cycle
+    testCompileOnly project(':streams:test-utils')
+
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation project(':core')
+    testImplementation project(':tools')
+    testImplementation project(':core').sourceSets.test.output
+    testImplementation project(':server-common').sourceSets.test.output
+    testImplementation libs.log4j
+    testImplementation libs.junitJupiter
+    testImplementation libs.junitVintageEngine
+    testImplementation libs.easymock
+    testImplementation libs.powermockJunit4
+    testImplementation libs.powermockEasymock
+    testImplementation libs.bcpkix
+    testImplementation libs.hamcrest
+    testImplementation libs.mockitoInline // supports mocking static methods, final classes, etc.
+    testImplementation libs.mockitoJunitJupiter // supports MockitoExtension
+
+    testRuntimeOnly project(':streams:test-utils')
+    testRuntimeOnly libs.slf4jlog4j
+
+    generator project(':generator')
+  }
+
+  task processMessages(type:JavaExec) {
+    mainClass = "org.apache.kafka.message.MessageGenerator"
+    classpath = configurations.generator
+    args = [ "-p", "org.apache.kafka.streams.internals.generated",
+             "-o", "src/generated/java/org/apache/kafka/streams/internals/generated",
+             "-i", "src/main/resources/common/message",
+             "-m", "MessageDataGenerator"
+           ]
+    inputs.dir("src/main/resources/common/message")
+        .withPropertyName("messages")
+        .withPathSensitivity(PathSensitivity.RELATIVE)
+    outputs.cacheIf { true }
+    outputs.dir("src/generated/java/org/apache/kafka/streams/internals/generated")
+  }
+
+  sourceSets {
+    main {
+      java {
+        srcDirs = ["src/generated/java", "src/main/java"]
+      }
+    }
+    test {
+      java {
+        srcDirs = ["src/generated/java", "src/test/java"]
+      }
+    }
+  }
+
+  compileJava.dependsOn 'processMessages'
+  srcJar.dependsOn 'processMessages'
+
+  javadoc {
+    include "**/org/apache/kafka/streams/**"
+    exclude "**/org/apache/kafka/streams/internals/**", "**/org/apache/kafka/streams/**/internals/**"
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-clients*')
+    }
+    into "$buildDir/dependant-libs-${versions.scala}"
+    duplicatesStrategy 'exclude'
+  }
+
+  task createStreamsVersionFile() {
+    def receiptFile = file("$buildDir/kafka/$buildStreamsVersionFileName")
+    inputs.property "commitId", commitId
+    inputs.property "version", version
+    outputs.file receiptFile
+
+    doLast {
+      def data = [
+              commitId: commitId,
+              version: version,
+      ]
+
+      receiptFile.parentFile.mkdirs()
+      def content = data.entrySet().collect { "$it.key=$it.value" }.sort().join("\n")
+      receiptFile.setText(content, "ISO-8859-1")
+    }
+  }
+
+  jar {
+    dependsOn 'createStreamsVersionFile'
+    from("$buildDir") {
+      include "kafka/$buildStreamsVersionFileName"
+    }
+    dependsOn 'copyDependantLibs'
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+
+  task genStreamsConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.streams.StreamsConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "streams_config.html").newOutputStream()
+  }
+
+  task testAll(
+    dependsOn: [
+            ':streams:test',
+            ':streams:test-utils:test',
+            ':streams:streams-scala:test',
+            ':streams:upgrade-system-tests-0100:test',
+            ':streams:upgrade-system-tests-0101:test',
+            ':streams:upgrade-system-tests-0102:test',
+            ':streams:upgrade-system-tests-0110:test',
+            ':streams:upgrade-system-tests-10:test',
+            ':streams:upgrade-system-tests-11:test',
+            ':streams:upgrade-system-tests-20:test',
+            ':streams:upgrade-system-tests-21:test',
+            ':streams:upgrade-system-tests-22:test',
+            ':streams:upgrade-system-tests-23:test',
+            ':streams:upgrade-system-tests-24:test',
+            ':streams:upgrade-system-tests-25:test',
+            ':streams:upgrade-system-tests-26:test',
+            ':streams:upgrade-system-tests-27:test',
+            ':streams:upgrade-system-tests-28:test',
+            ':streams:upgrade-system-tests-30:test',
+            ':streams:upgrade-system-tests-31:test',
+            ':streams:upgrade-system-tests-32:test',
+            ':streams:upgrade-system-tests-33:test',
+            ':streams:upgrade-system-tests-34:test',
+            ':streams:upgrade-system-tests-35:test',
+            ':streams:examples:test'
+    ]
+  )
+}
+
+project(':streams:streams-scala') {
+  apply plugin: 'scala'
+  archivesBaseName = "kafka-streams-scala_${versions.baseScala}"
+  dependencies {
+    api project(':streams')
+
+    api libs.scalaLibrary
+    api libs.scalaCollectionCompat
+
+    testImplementation project(':core')
+    testImplementation project(':core').sourceSets.test.output
+    testImplementation project(':server-common').sourceSets.test.output
+    testImplementation project(':streams').sourceSets.test.output
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation project(':streams:test-utils')
+
+    testImplementation libs.junitJupiter
+    testImplementation libs.easymock
+    testImplementation libs.hamcrest
+    testRuntimeOnly libs.slf4jlog4j
+  }
+
+  javadoc {
+    include "**/org/apache/kafka/streams/scala/**"
+  }
+
+  scaladoc {
+    scalaDocOptions.additionalParameters = ["-no-link-warnings"]
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-streams*')
+    }
+    into "$buildDir/dependant-libs-${versions.scala}"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn 'copyDependantLibs'
+  }
+
+  // spotless 6.14 requires Java 11 at runtime
+  if (JavaVersion.current().isJava11Compatible()) {
+    apply plugin: 'com.diffplug.spotless'
+    spotless {
+      scala {
+        target '**/*.scala'
+        scalafmt("$versions.scalafmt").configFile('../../checkstyle/.scalafmt.conf').scalaMajorVersion(versions.baseScala)
+        licenseHeaderFile '../../checkstyle/java.header', 'package'
+      }
+    }
+  }
+}
+
+project(':streams:test-utils') {
+  archivesBaseName = "kafka-streams-test-utils"
+
+  dependencies {
+    api project(':streams')
+    api project(':clients')
+
+    implementation libs.slf4jApi
+
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation libs.junitJupiter
+    testImplementation libs.mockitoCore
+    testImplementation libs.hamcrest
+
+    testRuntimeOnly libs.slf4jlog4j
+  }
+
+  javadoc {
+    include "**/org/apache/kafka/streams/test/**"
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-streams*')
+    }
+    into "$buildDir/dependant-libs-${versions.scala}"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn 'copyDependantLibs'
+  }
+
+}
+
+project(':streams:examples') {
+  archivesBaseName = "kafka-streams-examples"
+
+  dependencies {
+    // this dependency should be removed after we unify data API
+    implementation(project(':connect:json')) {
+      // this transitive dependency is not used in Streams, and it breaks SBT builds
+      exclude module: 'javax.ws.rs-api'
+    }
+
+    implementation project(':streams')
+
+    implementation libs.slf4jlog4j
+
+    testImplementation project(':streams:test-utils')
+    testImplementation project(':clients').sourceSets.test.output // for org.apache.kafka.test.IntegrationTest
+    testImplementation libs.junitJupiter
+    testImplementation libs.hamcrest
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-streams*')
+    }
+    into "$buildDir/dependant-libs-${versions.scala}"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn 'copyDependantLibs'
+  }
+}
+
+project(':streams:upgrade-system-tests-0100') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-0100"
+
+  dependencies {
+    testImplementation(libs.kafkaStreams_0100) {
+      exclude group: 'org.slf4j', module: 'slf4j-log4j12'
+      exclude group: 'log4j', module: 'log4j'
+    }
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-0101') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-0101"
+
+  dependencies {
+    testImplementation(libs.kafkaStreams_0101) {
+      exclude group: 'org.slf4j', module: 'slf4j-log4j12'
+      exclude group: 'log4j', module: 'log4j'
+    }
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-0102') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-0102"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_0102
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-0110') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-0110"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_0110
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-10') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-10"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_10
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-11') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-11"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_11
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-20') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-20"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_20
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-21') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-21"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_21
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-22') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-22"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_22
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-23') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-23"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_23
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-24') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-24"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_24
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-25') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-25"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_25
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-26') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-26"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_26
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-27') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-27"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_27
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-28') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-28"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_28
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-30') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-30"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_30
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-31') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-31"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_31
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-32') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-32"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_32
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-33') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-33"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_33
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-34') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-34"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_34
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':streams:upgrade-system-tests-35') {
+  archivesBaseName = "kafka-streams-upgrade-system-tests-35"
+
+  dependencies {
+    testImplementation libs.kafkaStreams_35
+    testRuntimeOnly libs.junitJupiter
+  }
+
+  systemTestLibs {
+    dependsOn testJar
+  }
+}
+
+project(':jmh-benchmarks') {
+
+  apply plugin: 'com.github.johnrengelman.shadow'
+
+  shadowJar {
+    archiveBaseName = 'kafka-jmh-benchmarks'
+  }
+
+  dependencies {
+    implementation(project(':core')) {
+      // jmh requires jopt 4.x while `core` depends on 5.0, they are not binary compatible
+      exclude group: 'net.sf.jopt-simple', module: 'jopt-simple'
+    }
+    implementation project(':server-common')
+    implementation project(':clients')
+    implementation project(':group-coordinator')
+    implementation project(':metadata')
+    implementation project(':storage')
+    implementation project(':streams')
+    implementation project(':core')
+    implementation project(':connect:api')
+    implementation project(':connect:transforms')
+    implementation project(':clients').sourceSets.test.output
+    implementation project(':core').sourceSets.test.output
+    implementation project(':server-common').sourceSets.test.output
+
+    implementation libs.jmhCore
+    annotationProcessor libs.jmhGeneratorAnnProcess
+    implementation libs.jmhCoreBenchmarks
+    implementation libs.jacksonDatabind
+    implementation libs.metrics
+    implementation libs.mockitoCore
+    implementation libs.slf4jlog4j
+    implementation libs.scalaLibrary
+    implementation libs.scalaJava8Compat
+  }
+
+  tasks.withType(JavaCompile) {
+    // Suppress warning caused by code generated by jmh: `warning: [cast] redundant cast to long`
+    options.compilerArgs << "-Xlint:-cast"
+  }
+
+  jar {
+    manifest {
+      attributes "Main-Class": "org.openjdk.jmh.Main"
+    }
+  }
+
+  checkstyle {
+    configProperties = checkstyleConfigProperties("import-control-jmh-benchmarks.xml")
+  }
+
+  task jmh(type: JavaExec, dependsOn: [':jmh-benchmarks:clean', ':jmh-benchmarks:shadowJar']) {
+
+    mainClass = "-jar"
+
+    doFirst {
+      if (System.getProperty("jmhArgs")) {
+          args System.getProperty("jmhArgs").split(' ')
+      }
+      args = [shadowJar.archivePath, *args]
+    }
+  }
+
+  javadoc {
+     enabled = false
+  }
+}
+
+project(':log4j-appender') {
+  archivesBaseName = "kafka-log4j-appender"
+
+  dependencies {
+    implementation project(':clients')
+    implementation libs.slf4jlog4j
+
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation libs.junitJupiter
+    testImplementation libs.hamcrest
+    testImplementation libs.mockitoCore
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+}
+
+project(':connect:api') {
+  archivesBaseName = "connect-api"
+
+  dependencies {
+    api project(':clients')
+    implementation libs.slf4jApi
+    implementation libs.jaxrsApi
+
+    testImplementation libs.junitJupiter
+    testRuntimeOnly libs.slf4jlog4j
+    testImplementation project(':clients').sourceSets.test.output
+  }
+
+  javadoc {
+    include "**/org/apache/kafka/connect/**" // needed for the `aggregatedJavadoc` task
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      include('slf4j-log4j12*')
+      include('reload4j*jar')
+    }
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-clients*')
+      exclude('connect-*')
+    }
+    into "$buildDir/dependant-libs"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn copyDependantLibs
+  }
+}
+
+project(':connect:transforms') {
+  archivesBaseName = "connect-transforms"
+
+  dependencies {
+    api project(':connect:api')
+
+    implementation libs.slf4jApi
+
+    testImplementation libs.easymock
+    testImplementation libs.junitJupiter
+
+    testRuntimeOnly libs.slf4jlog4j
+    testImplementation project(':clients').sourceSets.test.output
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      include('slf4j-log4j12*')
+      include('reload4j*jar')
+    }
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-clients*')
+      exclude('connect-*')
+    }
+    into "$buildDir/dependant-libs"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn copyDependantLibs
+  }
+}
+
+project(':connect:json') {
+  archivesBaseName = "connect-json"
+
+  dependencies {
+    api project(':connect:api')
+
+    api libs.jacksonDatabind
+    api libs.jacksonJDK8Datatypes
+
+    implementation libs.slf4jApi
+
+    testImplementation libs.easymock
+    testImplementation libs.junitJupiter
+
+    testRuntimeOnly libs.slf4jlog4j
+    testImplementation project(':clients').sourceSets.test.output
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      include('slf4j-log4j12*')
+      include('reload4j*jar')
+    }
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-clients*')
+      exclude('connect-*')
+    }
+    into "$buildDir/dependant-libs"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn copyDependantLibs
+  }
+}
+
+project(':connect:runtime') {
+  configurations {
+    swagger
+  }
+
+  archivesBaseName = "connect-runtime"
+
+  dependencies {
+    // connect-runtime is used in tests, use `api` for modules below for backwards compatibility even though
+    // applications should generally not depend on `connect-runtime`
+    api project(':connect:api')
+    api project(':clients')
+    api project(':connect:json')
+    api project(':connect:transforms')
+
+    implementation libs.slf4jApi
+    implementation libs.log4j
+    implementation libs.jose4j                    // for SASL/OAUTHBEARER JWT validation
+    implementation libs.jacksonAnnotations
+    implementation libs.jacksonJaxrsJsonProvider
+    implementation libs.jerseyContainerServlet
+    implementation libs.jerseyHk2
+    implementation libs.jaxbApi // Jersey dependency that was available in the JDK before Java 9
+    implementation libs.activation // Jersey dependency that was available in the JDK before Java 9
+    implementation libs.jettyServer
+    implementation libs.jettyServlet
+    implementation libs.jettyServlets
+    implementation libs.jettyClient
+    implementation libs.reflections
+    implementation libs.mavenArtifact
+    implementation libs.swaggerAnnotations
+
+    // We use this library to generate OpenAPI docs for the REST API, but we don't want or need it at compile
+    // or run time. So, we add it to a separate configuration, which we use later on during docs generation
+    swagger libs.swaggerJaxrs2
+
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation project(':core')
+    testImplementation project(':metadata')
+    testImplementation project(':core').sourceSets.test.output
+    testImplementation project(':server-common')
+    testImplementation project(':connect:test-plugins')
+
+    testImplementation libs.easymock
+    testImplementation libs.junitJupiterApi
+    testImplementation libs.junitVintageEngine
+    testImplementation libs.powermockJunit4
+    testImplementation libs.powermockEasymock
+    testImplementation libs.mockitoInline
+    testImplementation libs.httpclient
+
+    testRuntimeOnly libs.slf4jlog4j
+    testRuntimeOnly libs.bcpkix
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      // No need to copy log4j since the module has an explicit dependency on that
+      include('slf4j-log4j12*')
+    }
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-clients*')
+      exclude('connect-*')
+    }
+    into "$buildDir/dependant-libs"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn copyDependantLibs
+  }
+
+  task genConnectConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.connect.runtime.distributed.DistributedConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "connect_config.html").newOutputStream()
+  }
+
+  task genSinkConnectorConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.connect.runtime.SinkConnectorConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "sink_connector_config.html").newOutputStream()
+  }
+
+  task genSourceConnectorConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.connect.runtime.SourceConnectorConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "source_connector_config.html").newOutputStream()
+  }
+
+  task genConnectTransformationDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.connect.tools.TransformationDoc'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "connect_transforms.html").newOutputStream()
+  }
+
+  task genConnectPredicateDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.connect.tools.PredicateDoc'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "connect_predicates.html").newOutputStream()
+  }
+
+  task genConnectMetricsDocs(type: JavaExec) {
+    classpath = sourceSets.test.runtimeClasspath
+    mainClass = 'org.apache.kafka.connect.runtime.ConnectMetrics'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "connect_metrics.html").newOutputStream()
+  }
+
+  task setVersionInOpenAPISpec(type: Copy) {
+    from "$rootDir/gradle/openapi.template"
+    into "$buildDir/resources/docs"
+    rename ('openapi.template', 'openapi.yaml')
+    expand(kafkaVersion: "$rootProject.version")
+  }
+
+  task genConnectOpenAPIDocs(type: io.swagger.v3.plugins.gradle.tasks.ResolveTask, dependsOn: setVersionInOpenAPISpec) {
+    classpath = sourceSets.main.runtimeClasspath
+
+    buildClasspath = classpath + configurations.swagger
+    outputFileName = 'connect_rest'
+    outputFormat = 'YAML'
+    prettyPrint = 'TRUE'
+    sortOutput = 'TRUE'
+    openApiFile = file("$buildDir/resources/docs/openapi.yaml")
+    resourcePackages = ['org.apache.kafka.connect.runtime.rest.resources']
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    outputDir = file(generatedDocsDir)
+  }
+
+}
+
+project(':connect:file') {
+  archivesBaseName = "connect-file"
+
+  dependencies {
+    implementation project(':connect:api')
+    implementation libs.slf4jApi
+
+    testImplementation libs.junitJupiter
+    testImplementation libs.mockitoCore
+
+    testRuntimeOnly libs.slf4jlog4j
+    testImplementation project(':clients').sourceSets.test.output
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      include('slf4j-log4j12*')
+      include('reload4j*jar')
+    }
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-clients*')
+      exclude('connect-*')
+    }
+    into "$buildDir/dependant-libs"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn copyDependantLibs
+  }
+}
+
+project(':connect:basic-auth-extension') {
+  archivesBaseName = "connect-basic-auth-extension"
+
+  dependencies {
+    implementation project(':connect:api')
+    implementation libs.slf4jApi
+    implementation libs.jaxrsApi
+    implementation libs.jaxAnnotationApi
+
+    testImplementation libs.bcpkix
+    testImplementation libs.mockitoCore
+    testImplementation libs.junitJupiter
+    testImplementation project(':clients').sourceSets.test.output
+
+    testRuntimeOnly libs.slf4jlog4j
+    testRuntimeOnly libs.jerseyContainerServlet
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      include('slf4j-log4j12*')
+      include('reload4j*jar')
+    }
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-clients*')
+      exclude('connect-*')
+    }
+    into "$buildDir/dependant-libs"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn copyDependantLibs
+  }
+}
+
+project(':connect:mirror') {
+  archivesBaseName = "connect-mirror"
+
+  dependencies {
+    implementation project(':connect:api')
+    implementation project(':connect:runtime')
+    implementation project(':connect:mirror-client')
+    implementation project(':clients')
+
+    implementation libs.argparse4j
+    implementation libs.jacksonAnnotations
+    implementation libs.slf4jApi
+    implementation libs.jacksonAnnotations
+    implementation libs.jacksonJaxrsJsonProvider
+    implementation libs.jerseyContainerServlet
+    implementation libs.jerseyHk2
+    implementation libs.jaxbApi // Jersey dependency that was available in the JDK before Java 9
+    implementation libs.activation // Jersey dependency that was available in the JDK before Java 9
+    implementation libs.jettyServer
+    implementation libs.jettyServlet
+    implementation libs.jettyServlets
+    implementation libs.jettyClient
+    implementation libs.swaggerAnnotations
+
+    testImplementation libs.junitJupiter
+    testImplementation libs.log4j
+    testImplementation libs.mockitoCore
+    testImplementation project(':clients').sourceSets.test.output
+    testImplementation project(':connect:runtime').sourceSets.test.output
+    testImplementation project(':core')
+    testImplementation project(':core').sourceSets.test.output
+
+    testRuntimeOnly project(':connect:runtime')
+    testRuntimeOnly libs.slf4jlog4j
+    testRuntimeOnly libs.bcpkix
+  }
+
+  javadoc {
+    enabled = false
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      include('slf4j-log4j12*')
+      include('reload4j*jar')
+    }
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-clients*')
+      exclude('connect-*')
+    }
+    into "$buildDir/dependant-libs"
+    duplicatesStrategy 'exclude'
+  }
+
+  task genMirrorConnectorConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.connect.mirror.MirrorConnectorConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "mirror_connector_config.html").newOutputStream()
+  }
+
+  task genMirrorSourceConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.connect.mirror.MirrorSourceConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "mirror_source_config.html").newOutputStream()
+  }
+
+  task genMirrorCheckpointConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.connect.mirror.MirrorCheckpointConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "mirror_checkpoint_config.html").newOutputStream()
+  }
+
+  task genMirrorHeartbeatConfigDocs(type: JavaExec) {
+    classpath = sourceSets.main.runtimeClasspath
+    mainClass = 'org.apache.kafka.connect.mirror.MirrorHeartbeatConfig'
+    if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() }
+    standardOutput = new File(generatedDocsDir, "mirror_heartbeat_config.html").newOutputStream()
+  }
+
+  jar {
+    dependsOn copyDependantLibs
+  }
+}
+
+project(':connect:mirror-client') {
+  archivesBaseName = "connect-mirror-client"
+
+  dependencies {
+    implementation project(':clients')
+    implementation libs.slf4jApi
+
+    testImplementation libs.junitJupiter
+    testImplementation project(':clients').sourceSets.test.output
+
+    testRuntimeOnly libs.slf4jlog4j
+  }
+
+  javadoc {
+    enabled = true
+  }
+
+  tasks.create(name: "copyDependantLibs", type: Copy) {
+    from (configurations.testRuntimeClasspath) {
+      include('slf4j-log4j12*')
+      include('reload4j*jar')
+    }
+    from (configurations.runtimeClasspath) {
+      exclude('kafka-clients*')
+      exclude('connect-*')
+    }
+    into "$buildDir/dependant-libs"
+    duplicatesStrategy 'exclude'
+  }
+
+  jar {
+    dependsOn copyDependantLibs
+  }
+}
+
+project(':connect:test-plugins') {
+  archivesBaseName = "connect-test-plugins"
+
+  dependencies {
+    api project(':connect:api')
+
+    implementation project(':server-common')
+    implementation libs.slf4jApi
+    implementation libs.jacksonDatabind
+  }
+}
+
+task aggregatedJavadoc(type: Javadoc, dependsOn: compileJava) {
+  def projectsWithJavadoc = subprojects.findAll { it.javadoc.enabled }
+  source = projectsWithJavadoc.collect { it.sourceSets.main.allJava }
+  classpath = files(projectsWithJavadoc.collect { it.sourceSets.main.compileClasspath })
+  includes = projectsWithJavadoc.collectMany { it.javadoc.getIncludes() }
+  excludes = projectsWithJavadoc.collectMany { it.javadoc.getExcludes() }
+
+  options.charSet = 'UTF-8'
+  options.docEncoding = 'UTF-8'
+  options.encoding = 'UTF-8'
+  // Turn off doclint for now, see https://blog.joda.org/2014/02/turning-off-doclint-in-jdk-8-javadoc.html for rationale
+  options.addStringOption('Xdoclint:none', '-quiet')
+
+  // The URL structure was changed to include the locale after Java 8
+  if (JavaVersion.current().isJava11Compatible())
+    options.links "https://docs.oracle.com/en/java/javase/${JavaVersion.current().majorVersion}/docs/api/"
+  else
+    options.links "https://docs.oracle.com/javase/8/docs/api/";
+}
diff --git a/checkstyle/.scalafmt.conf b/checkstyle/.scalafmt.conf
new file mode 100644
index 0000000..a6fae4a
--- /dev/null
+++ b/checkstyle/.scalafmt.conf
@@ -0,0 +1,22 @@
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+version = 3.5.9
+runner.dialect = scala213
+docstrings.style = Asterisk
+docstrings.wrap = false
+maxColumn = 120
+continuationIndent.defnSite = 2
+assumeStandardLibraryStripMargin = true
+rewrite.rules = [SortImports, RedundantBraces, RedundantParens, SortModifiers]
\ No newline at end of file
diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml
new file mode 100644
index 0000000..bf2d339
--- /dev/null
+++ b/checkstyle/checkstyle.xml
@@ -0,0 +1,157 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE module PUBLIC
+    "-//Puppy Crawl//DTD Check Configuration 1.3//EN"
+     "http://www.puppycrawl.com/dtds/configuration_1_3.dtd";>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<module name="Checker">
+  <property name="localeLanguage" value="en"/>
+
+  <module name="FileTabCharacter"/>
+
+  <!-- header -->
+  <module name="Header">
+    <property name="headerFile" value="${headerFile}" />
+  </module>
+
+  <module name="TreeWalker">
+
+    <!-- code cleanup -->
+    <module name="UnusedImports">
+      <property name="processJavadoc" value="true" />
+    </module>
+    <module name="RedundantImport"/>
+    <module name="IllegalImport" />
+    <module name="EqualsHashCode"/>
+    <module name="SimplifyBooleanExpression"/>
+    <module name="OneStatementPerLine"/>
+    <module name="UnnecessaryParentheses" />
+    <module name="SimplifyBooleanReturn"/>
+
+    <!-- style -->
+    <module name="DefaultComesLast"/>
+    <module name="EmptyStatement"/>
+    <module name="ArrayTypeStyle"/>
+    <module name="UpperEll"/>
+    <module name="LeftCurly"/>
+    <module name="RightCurly"/>
+    <module name="EmptyStatement"/>
+    <module name="ConstantName">
+      <property name="format" value="(^[A-Z][A-Z0-9]*(_[A-Z0-9]+)*$)|(^log$)"/>
+    </module>
+    <module name="LocalVariableName"/>
+    <module name="LocalFinalVariableName"/>
+    <module name="MemberName"/>
+    <module name="ClassTypeParameterName">
+      <property name="format" value="^[A-Z][a-zA-Z0-9]*$$"/>
+    </module>
+    <module name="MethodTypeParameterName">
+      <property name="format" value="^[A-Z][a-zA-Z0-9]*$$"/>
+    </module>
+    <module name="InterfaceTypeParameterName">
+      <property name="format" value="^[A-Z][a-zA-Z0-9]*$$"/>
+    </module>
+    <module name="PackageName"/>
+    <module name="ParameterName"/>
+    <module name="StaticVariableName"/>
+    <module name="TypeName"/>
+    <module name="AvoidStarImport"/>
+
+    <!-- variables that can be final should be final (suppressed except for Streams) -->
+    <module name="FinalLocalVariable">
+      <property name="tokens" value="VARIABLE_DEF,PARAMETER_DEF"/>
+      <property name="validateEnhancedForLoopVariable" value="true"/>
+    </module>
+
+    <!-- dependencies -->
+    <module name="ImportControl">
+      <property name="file" value="${importControlFile}"/>
+    </module>
+
+    <!-- whitespace -->
+    <module name="GenericWhitespace"/>
+    <module name="NoWhitespaceBefore"/>
+    <module name="WhitespaceAfter" />
+    <module name="NoWhitespaceAfter"/>
+    <module name="WhitespaceAround">
+      <property name="allowEmptyConstructors" value="true"/>
+      <property name="allowEmptyMethods" value="true"/>
+    </module>
+    <module name="Indentation"/>
+    <module name="MethodParamPad"/>
+    <module name="ParenPad"/>
+    <module name="TypecastParenPad"/>
+
+    <!-- locale-sensitive methods should specify locale -->
+    <module name="Regexp">
+      <property name="format" value="\.to(Lower|Upper)Case\(\)"/>
+      <property name="illegalPattern" value="true"/>
+      <property name="ignoreComments" value="true"/>
+    </module>
+
+    <module name="Regexp">
+      <property name="id" value="dontUseSystemExit"/>
+      <property name="format" value="System\.exit"/>
+      <property name="illegalPattern" value="true"/>
+      <property name="ignoreComments" value="true"/>
+      <property name="message" value="'System.exit': Should not directly call System.exit, but Exit.exit instead."/>
+    </module>
+
+    <!-- code quality -->
+    <module name="MethodLength"/>
+    <module name="ParameterNumber">
+      <!-- default is 8 -->
+      <property name="max" value="13"/>
+    </module>
+    <module name="ClassDataAbstractionCoupling">
+      <!-- default is 7 -->
+      <property name="max" value="25"/>
+      <property name="excludeClassesRegexps" value="AtomicInteger"/>
+    </module>
+    <module name="BooleanExpressionComplexity">
+      <!-- default is 3 -->
+      <property name="max" value="5"/>
+    </module>
+
+    <module name="ClassFanOutComplexity">
+      <!-- default is 20 -->
+      <property name="max" value="50"/>
+    </module>
+    <module name="CyclomaticComplexity">
+      <!-- default is 10-->
+      <property name="max" value="16"/>
+    </module>
+    <module name="JavaNCSS">
+      <!-- default is 50 -->
+      <property name="methodMaximum" value="100"/>
+    </module>
+    <module name="NPathComplexity">
+      <!-- default is 200 -->
+      <property name="max" value="500"/>
+    </module>
+
+    <!-- Allows the use of the @SuppressWarnings annotation in the code -->
+    <module name="SuppressWarningsHolder"/>
+  </module>
+
+  <module name="SuppressionFilter">
+    <property name="file" value="${suppressionsFile}"/>
+  </module>
+
+  <!-- Allows the use of the @SuppressWarnings annotation in the code -->
+  <module name="SuppressWarningsFilter"/>
+</module>
diff --git a/checkstyle/import-control-core.xml b/checkstyle/import-control-core.xml
new file mode 100644
index 0000000..6136ff5
--- /dev/null
+++ b/checkstyle/import-control-core.xml
@@ -0,0 +1,116 @@
+<!DOCTYPE import-control PUBLIC
+"-//Puppy Crawl//DTD Import Control 1.1//EN"
+"http://www.puppycrawl.com/dtds/import_control_1_1.dtd";>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<import-control pkg="kafka">
+
+  <!-- THINK HARD ABOUT THE LAYERING OF THE PROJECT BEFORE CHANGING THIS FILE -->
+
+  <!-- common library dependencies -->
+  <allow pkg="java" />
+  <allow pkg="scala" />
+  <allow pkg="javax.management" />
+  <allow pkg="org.slf4j" />
+  <allow pkg="org.junit" />
+  <allow pkg="java.security" />
+  <allow pkg="javax.net.ssl" />
+  <allow pkg="javax.security" />
+
+  <allow pkg="kafka.common" />
+  <allow pkg="kafka.utils" />
+  <allow pkg="kafka.serializer" />
+  <allow pkg="org.apache.kafka.common" />
+
+  <!-- see KIP-544 for why KafkaYammerMetrics should be used instead of the global default yammer metrics registry
+       https://cwiki.apache.org/confluence/display/KAFKA/KIP-544%3A+Make+metrics+exposed+via+JMX+configurable -->
+  <disallow class="com.yammer.metrics.Metrics" />
+  <allow pkg="com.yammer.metrics"/>
+
+  <subpackage name="testkit">
+    <allow pkg="kafka.metrics"/>
+    <allow pkg="kafka.raft"/>
+    <allow pkg="kafka.server"/>
+    <allow pkg="kafka.tools"/>
+    <allow pkg="org.apache.kafka.clients"/>
+    <allow pkg="org.apache.kafka.controller"/>
+    <allow pkg="org.apache.kafka.raft"/>
+    <allow pkg="org.apache.kafka.test"/>
+    <allow pkg="org.apache.kafka.metadata" />
+    <allow pkg="org.apache.kafka.metalog" />
+    <allow pkg="org.apache.kafka.server.common" />
+    <allow pkg="org.apache.kafka.server.fault" />
+  </subpackage>
+
+  <subpackage name="tools">
+    <allow pkg="org.apache.kafka.clients.admin" />
+    <allow pkg="kafka.admin" />
+    <allow pkg="org.apache.kafka.clients.consumer" />
+    <allow pkg="org.apache.kafka.server.util" />
+    <allow pkg="joptsimple" />
+  </subpackage>
+
+  <subpackage name="coordinator">
+    <allow class="kafka.server.MetadataCache" />
+  </subpackage>
+
+  <subpackage name="examples">
+    <allow pkg="org.apache.kafka.clients" />
+  </subpackage>
+
+  <subpackage name="log.remote">
+    <allow pkg="org.apache.kafka.server.common" />
+    <allow pkg="org.apache.kafka.server.log.remote" />
+    <allow pkg="org.apache.kafka.server.metrics" />
+    <allow pkg="org.apache.kafka.storage.internals" />
+    <allow pkg="kafka.log" />
+    <allow pkg="kafka.cluster" />
+    <allow pkg="kafka.server" />
+    <allow pkg="org.mockito" />
+    <allow pkg="org.apache.kafka.test" />
+  </subpackage>
+
+  <subpackage name="server">
+    <allow pkg="kafka" />
+    <allow pkg="org.apache.kafka" />
+  </subpackage>
+
+  <subpackage name="test">
+    <allow pkg="org.apache.kafka.controller"/>
+    <allow pkg="org.apache.kafka.metadata"/>
+    <allow pkg="org.apache.kafka.server.authorizer"/>
+    <allow pkg="org.apache.kafka.server.common" />
+    <allow pkg="kafka.test.annotation"/>
+    <allow pkg="kafka.test.junit"/>
+    <allow pkg="kafka.network"/>
+    <allow pkg="kafka.api"/>
+    <allow pkg="kafka.server"/>
+    <allow pkg="kafka.zk" />
+    <allow pkg="org.apache.kafka.clients.admin"/>
+    <allow pkg="integration.kafka.server" class="IntegrationTestHelper"/>
+    <subpackage name="annotation">
+      <allow pkg="kafka.test"/>
+    </subpackage>
+    <subpackage name="junit">
+      <allow pkg="kafka.test"/>
+      <allow pkg="kafka.testkit"/>
+      <allow pkg="org.apache.kafka.clients"/>
+      <allow pkg="org.apache.kafka.metadata" />
+    </subpackage>
+  </subpackage>
+</import-control>
diff --git a/checkstyle/import-control-jmh-benchmarks.xml b/checkstyle/import-control-jmh-benchmarks.xml
new file mode 100644
index 0000000..1160e3f
--- /dev/null
+++ b/checkstyle/import-control-jmh-benchmarks.xml
@@ -0,0 +1,61 @@
+<!DOCTYPE import-control PUBLIC
+        "-//Puppy Crawl//DTD Import Control 1.1//EN"
+        "http://www.puppycrawl.com/dtds/import_control_1_1.dtd";>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<import-control pkg="org.apache.kafka.jmh">
+    <allow pkg="java"/>
+    <allow pkg="scala"/>
+    <allow pkg="javax.management"/>
+    <allow pkg="org.slf4j"/>
+    <allow pkg="org.openjdk.jmh.annotations"/>
+    <allow pkg="org.openjdk.jmh.runner"/>
+    <allow pkg="org.openjdk.jmh.infra"/>
+    <allow pkg="java.security"/>
+    <allow pkg="javax.net.ssl"/>
+    <allow pkg="javax.security"/>
+    <allow pkg="org.apache.kafka.common"/>
+    <allow pkg="org.apache.kafka.clients.producer"/>
+    <allow pkg="kafka.cluster"/>
+    <allow pkg="kafka.log"/>
+    <allow pkg="kafka.server"/>
+    <allow pkg="kafka.api"/>
+    <allow pkg="kafka.controller"/>
+    <allow pkg="kafka.coordinator"/>
+    <allow pkg="kafka.network"/>
+    <allow pkg="kafka.utils"/>
+    <allow pkg="kafka.zk"/>
+    <allow class="kafka.utils.Pool"/>
+    <allow class="kafka.utils.KafkaScheduler"/>
+    <allow class="org.apache.kafka.clients.FetchSessionHandler"/>
+    <allow pkg="kafka.common"/>
+    <allow pkg="kafka.message"/>
+    <allow pkg="org.mockito"/>
+    <allow pkg="kafka.security.authorizer"/>
+    <allow pkg="org.apache.kafka.server"/>
+    <allow pkg="org.apache.kafka.storage"/>
+    <allow pkg="org.apache.kafka.clients"/>
+    <allow pkg="org.apache.kafka.coordinator.group"/>
+    <allow pkg="org.apache.kafka.image"/>
+    <allow pkg="org.apache.kafka.metadata"/>
+    <allow pkg="org.apache.kafka.timeline" />
+    <allow pkg="org.apache.kafka.connect" />
+
+    <subpackage name="cache">
+    </subpackage>
+</import-control>
diff --git a/checkstyle/import-control-metadata.xml b/checkstyle/import-control-metadata.xml
new file mode 100644
index 0000000..3fe650c
--- /dev/null
+++ b/checkstyle/import-control-metadata.xml
@@ -0,0 +1,201 @@
+<!DOCTYPE import-control PUBLIC
+        "-//Puppy Crawl//DTD Import Control 1.1//EN"
+        "http://www.puppycrawl.com/dtds/import_control_1_1.dtd";>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<import-control pkg="org.apache.kafka">
+
+    <!-- THINK HARD ABOUT THE LAYERING OF THE PROJECT BEFORE CHANGING THIS FILE -->
+
+    <!-- common library dependencies -->
+    <allow pkg="java" />
+    <allow pkg="javax.management" />
+    <allow pkg="org.slf4j" />
+    <allow pkg="org.junit" />
+    <allow pkg="org.opentest4j" />
+    <allow pkg="org.hamcrest" />
+    <allow pkg="org.mockito" />
+    <allow pkg="org.easymock" />
+    <allow pkg="org.powermock" />
+    <allow pkg="java.security" />
+    <allow pkg="javax.net.ssl" />
+    <allow pkg="javax.security" />
+    <allow pkg="javax.crypto" />
+    <allow pkg="org.ietf.jgss" />
+    <allow pkg="net.jqwik.api" />
+
+    <!-- no one depends on the server -->
+    <disallow pkg="kafka" />
+
+    <!-- anyone can use public classes -->
+    <allow pkg="org.apache.kafka.common" exact-match="true" />
+    <allow pkg="org.apache.kafka.common.security" />
+    <allow pkg="org.apache.kafka.common.serialization" />
+    <allow pkg="org.apache.kafka.common.utils" />
+    <allow pkg="org.apache.kafka.common.errors" exact-match="true" />
+    <allow pkg="org.apache.kafka.common.memory" />
+
+    <!-- persistent collection factories/non-library-specific wrappers -->
+    <allow pkg="org.apache.kafka.server.immutable" exact-match="true" />
+
+    <subpackage name="common">
+        <subpackage name="metadata">
+            <allow pkg="com.fasterxml.jackson" />
+            <allow pkg="org.apache.kafka.common.protocol" />
+            <allow pkg="org.apache.kafka.common.protocol.types" />
+            <allow pkg="org.apache.kafka.common.message" />
+            <allow pkg="org.apache.kafka.common.metadata" />
+        </subpackage>
+    </subpackage>
+
+    <subpackage name="controller">
+        <allow pkg="org.apache.kafka.clients" />
+        <allow pkg="org.apache.kafka.clients.admin" />
+        <allow pkg="org.apache.kafka.common.acl" />
+        <allow pkg="org.apache.kafka.common.annotation" />
+        <allow pkg="org.apache.kafka.common.config" />
+        <allow pkg="org.apache.kafka.common.feature" />
+        <allow pkg="org.apache.kafka.common.internals" />
+        <allow pkg="org.apache.kafka.common.message" />
+        <allow pkg="org.apache.kafka.common.metadata" />
+        <allow pkg="org.apache.kafka.common.network" />
+        <allow pkg="org.apache.kafka.common.protocol" />
+        <allow pkg="org.apache.kafka.common.quota" />
+        <allow pkg="org.apache.kafka.common.requests" />
+        <allow pkg="org.apache.kafka.common.resource" />
+        <allow pkg="org.apache.kafka.controller" />
+        <allow pkg="org.apache.kafka.image" />
+        <allow pkg="org.apache.kafka.image.writer" />
+        <allow pkg="org.apache.kafka.metadata" />
+        <allow pkg="org.apache.kafka.metadata.authorizer" />
+        <allow pkg="org.apache.kafka.metadata.migration" />
+        <allow pkg="org.apache.kafka.metalog" />
+        <allow pkg="org.apache.kafka.deferred" />
+        <allow pkg="org.apache.kafka.queue" />
+        <allow pkg="org.apache.kafka.raft" />
+        <allow pkg="org.apache.kafka.server.authorizer" />
+        <allow pkg="org.apache.kafka.server.common" />
+        <allow pkg="org.apache.kafka.server.config" />
+        <allow pkg="org.apache.kafka.server.fault" />
+        <allow pkg="org.apache.kafka.server.mutable" />
+        <allow pkg="org.apache.kafka.server.policy"/>
+        <allow pkg="org.apache.kafka.server.util"/>
+        <allow pkg="org.apache.kafka.snapshot" />
+        <allow pkg="org.apache.kafka.test" />
+        <allow pkg="org.apache.kafka.timeline" />
+        <subpackage name="metrics">
+            <allow pkg="com.yammer.metrics"/>
+            <allow pkg="org.apache.kafka.common.metrics" />
+            <allow pkg="org.apache.kafka.server.metrics" />
+        </subpackage>
+    </subpackage>
+
+    <subpackage name="image">
+        <allow pkg="org.apache.kafka.common.config" />
+        <allow pkg="org.apache.kafka.common.message" />
+        <allow pkg="org.apache.kafka.common.metadata" />
+        <allow pkg="org.apache.kafka.common.protocol" />
+        <allow pkg="org.apache.kafka.common.quota" />
+        <allow pkg="org.apache.kafka.common.record" />
+        <allow pkg="org.apache.kafka.common.requests" />
+        <allow pkg="org.apache.kafka.common.resource" />
+        <allow pkg="org.apache.kafka.image" />
+        <allow pkg="org.apache.kafka.image.writer" />
+        <allow pkg="org.apache.kafka.metadata" />
+        <allow pkg="org.apache.kafka.queue" />
+        <allow pkg="org.apache.kafka.clients.admin" />
+        <allow pkg="org.apache.kafka.raft" />
+        <allow pkg="org.apache.kafka.server.common" />
+        <allow pkg="org.apache.kafka.server.fault" />
+        <allow pkg="org.apache.kafka.server.util" />
+        <allow pkg="org.apache.kafka.snapshot" />
+        <allow pkg="org.apache.kafka.test" />
+        <subpackage name="loader">
+            <subpackage name="metrics">
+                <allow pkg="com.yammer.metrics"/>
+                <allow pkg="org.apache.kafka.common.metrics" />
+                <allow pkg="org.apache.kafka.controller.metrics" />
+                <allow pkg="org.apache.kafka.server.metrics" />
+            </subpackage>
+        </subpackage>
+        <subpackage name="publisher">
+            <subpackage name="metrics">
+                <allow pkg="com.yammer.metrics"/>
+                <allow pkg="org.apache.kafka.common.metrics" />
+                <allow pkg="org.apache.kafka.controller.metrics" />
+                <allow pkg="org.apache.kafka.server.metrics" />
+            </subpackage>
+        </subpackage>
+    </subpackage>
+
+    <subpackage name="metadata">
+        <allow pkg="org.apache.kafka.clients" />
+        <allow pkg="org.apache.kafka.common.acl" />
+        <allow pkg="org.apache.kafka.common.annotation" />
+        <allow pkg="org.apache.kafka.common.config" />
+        <allow pkg="org.apache.kafka.common.message" />
+        <allow pkg="org.apache.kafka.common.metadata" />
+        <allow pkg="org.apache.kafka.common.protocol" />
+        <allow pkg="org.apache.kafka.common.quota" />
+        <allow pkg="org.apache.kafka.common.record" />
+        <allow pkg="org.apache.kafka.common.resource" />
+        <allow pkg="org.apache.kafka.common.requests" />
+        <allow pkg="org.apache.kafka.image" />
+        <allow pkg="org.apache.kafka.metadata" />
+        <allow pkg="org.apache.kafka.metalog" />
+        <allow pkg="org.apache.kafka.queue" />
+        <allow pkg="org.apache.kafka.raft" />
+        <allow pkg="org.apache.kafka.server.authorizer" />
+        <allow pkg="org.apache.kafka.server.common" />
+        <allow pkg="org.apache.kafka.server.fault" />
+        <allow pkg="org.apache.kafka.server.config" />
+        <allow pkg="org.apache.kafka.server.util"/>
+        <allow pkg="org.apache.kafka.test" />
+        <subpackage name="authorizer">
+            <allow pkg="org.apache.kafka.common.acl" />
+            <allow pkg="org.apache.kafka.common.requests" />
+            <allow pkg="org.apache.kafka.common.resource" />
+            <allow pkg="org.apache.kafka.controller" />
+            <allow pkg="org.apache.kafka.metadata" />
+            <allow pkg="org.apache.kafka.common.internals" />
+        </subpackage>
+        <subpackage name="migration">
+            <allow pkg="org.apache.kafka.controller" />
+        </subpackage>
+        <subpackage name="bootstrap">
+            <allow pkg="org.apache.kafka.snapshot" />
+        </subpackage>
+        <subpackage name="fault">
+            <allow pkg="org.apache.kafka.server.fault" />
+        </subpackage>
+    </subpackage>
+
+    <subpackage name="metalog">
+        <allow pkg="org.apache.kafka.common.metadata" />
+        <allow pkg="org.apache.kafka.common.protocol" />
+        <allow pkg="org.apache.kafka.common.record" />
+        <allow pkg="org.apache.kafka.metadata" />
+        <allow pkg="org.apache.kafka.metalog" />
+        <allow pkg="org.apache.kafka.raft" />
+        <allow pkg="org.apache.kafka.snapshot" />
+        <allow pkg="org.apache.kafka.queue" />
+        <allow pkg="org.apache.kafka.server.common" />
+        <allow pkg="org.apache.kafka.test" />
+    </subpackage>
+
+</import-control>
diff --git a/checkstyle/import-control-server-common.xml b/checkstyle/import-control-server-common.xml
new file mode 100644
index 0000000..a8d032c
--- /dev/null
+++ b/checkstyle/import-control-server-common.xml
@@ -0,0 +1,107 @@
+<!DOCTYPE import-control PUBLIC
+        "-//Puppy Crawl//DTD Import Control 1.1//EN"
+        "http://www.puppycrawl.com/dtds/import_control_1_1.dtd";>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<import-control pkg="org.apache.kafka">
+
+    <!-- THINK HARD ABOUT THE LAYERING OF THE PROJECT BEFORE CHANGING THIS FILE -->
+
+    <!-- common library dependencies -->
+    <allow pkg="java" />
+    <allow pkg="javax.management" />
+    <allow pkg="org.slf4j" />
+    <allow pkg="org.junit" />
+    <allow pkg="org.opentest4j" />
+    <allow pkg="org.hamcrest" />
+    <allow pkg="org.mockito" />
+    <allow pkg="org.easymock" />
+    <allow pkg="org.powermock" />
+    <allow pkg="java.security" />
+    <allow pkg="javax.net.ssl" />
+    <allow pkg="javax.security" />
+    <allow pkg="org.ietf.jgss" />
+    <allow pkg="net.jqwik.api" />
+
+    <!-- no one depends on the server -->
+    <disallow pkg="kafka" />
+
+    <!-- anyone can use public classes -->
+    <allow pkg="org.apache.kafka.common" exact-match="true" />
+    <allow pkg="org.apache.kafka.common.security" />
+    <allow pkg="org.apache.kafka.common.serialization" />
+    <allow pkg="org.apache.kafka.common.utils" />
+    <allow pkg="org.apache.kafka.common.errors" exact-match="true" />
+    <allow pkg="org.apache.kafka.common.memory" />
+
+    <!-- persistent collection factories/non-library-specific wrappers -->
+    <allow pkg="org.apache.kafka.server.immutable" exact-match="true" />
+
+    <subpackage name="queue">
+        <allow pkg="org.apache.kafka.test" />
+    </subpackage>
+
+    <subpackage name="server">
+        <allow pkg="org.apache.kafka.common" />
+        <allow pkg="joptsimple" />
+
+        <subpackage name="common">
+            <allow pkg="org.apache.kafka.server.common" />
+        </subpackage>
+
+        <subpackage name="immutable">
+            <allow pkg="org.apache.kafka.server.util"/>
+            <!-- only the factory package can use persistent collection library-specific wrapper implementations -->
+            <!-- the library-specific wrapper implementation for PCollections -->
+            <allow pkg="org.apache.kafka.server.immutable.pcollections" />
+            <subpackage name="pcollections">
+                <allow pkg="org.pcollections" />
+            </subpackage>
+        </subpackage>
+
+        <subpackage name="metrics">
+            <allow pkg="com.yammer.metrics" />
+        </subpackage>
+
+        <subpackage name="network">
+            <allow pkg="org.apache.kafka.server.authorizer" />
+        </subpackage>
+
+        <subpackage name="util">
+            <!-- InterBrokerSendThread uses some clients classes that are not part of the public -->
+            <!-- API but are still relatively common -->
+            <allow class="org.apache.kafka.clients.ClientRequest" />
+            <allow class="org.apache.kafka.clients.ClientResponse" />
+            <allow class="org.apache.kafka.clients.KafkaClient" />
+            <allow class="org.apache.kafka.clients.RequestCompletionHandler" />
+            <allow pkg="com.fasterxml.jackson" />
+            <allow pkg="org.apache.kafka.server.util.json" />
+
+            <allow class="org.apache.kafka.server.util.TopicFilter.IncludeList" />
+            <subpackage name="timer">
+                <allow class="org.apache.kafka.server.util.MockTime" />
+                <allow class="org.apache.kafka.server.util.ShutdownableThread" />
+                <allow class="org.apache.kafka.test.TestUtils" />
+            </subpackage>
+        </subpackage>
+    </subpackage>
+
+    <subpackage name="admin">
+        <allow pkg="org.apache.kafka.server.common" />
+    </subpackage>
+</import-control>
diff --git a/checkstyle/import-control-storage.xml b/checkstyle/import-control-storage.xml
new file mode 100644
index 0000000..2e0b85d
--- /dev/null
+++ b/checkstyle/import-control-storage.xml
@@ -0,0 +1,133 @@
+<!DOCTYPE import-control PUBLIC
+        "-//Puppy Crawl//DTD Import Control 1.1//EN"
+        "http://www.puppycrawl.com/dtds/import_control_1_1.dtd";>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<import-control pkg="org.apache.kafka">
+
+    <!-- THINK HARD ABOUT THE LAYERING OF THE PROJECT BEFORE CHANGING THIS FILE -->
+
+    <!-- common library dependencies -->
+    <allow pkg="java" />
+    <allow pkg="javax.management" />
+    <allow pkg="org.slf4j" />
+    <allow pkg="org.junit" />
+    <allow pkg="org.opentest4j" />
+    <allow pkg="org.hamcrest" />
+    <allow pkg="org.mockito" />
+    <allow pkg="org.easymock" />
+    <allow pkg="org.powermock" />
+    <allow pkg="java.security" />
+    <allow pkg="javax.net.ssl" />
+    <allow pkg="javax.security" />
+    <allow pkg="org.ietf.jgss" />
+    <allow pkg="net.jqwik.api" />
+
+    <!-- no one depends on the server -->
+    <disallow pkg="kafka" />
+
+    <!-- anyone can use public classes -->
+    <allow pkg="org.apache.kafka.common" exact-match="true" />
+    <allow pkg="org.apache.kafka.common.security" />
+    <allow pkg="org.apache.kafka.common.serialization" />
+    <allow pkg="org.apache.kafka.common.utils" />
+    <allow pkg="org.apache.kafka.common.errors" exact-match="true" />
+    <allow pkg="org.apache.kafka.common.memory" />
+
+
+    <subpackage name="server">
+        <allow pkg="org.apache.kafka.common" />
+
+        <subpackage name="log">
+            <allow pkg="com.fasterxml.jackson" />
+            <allow pkg="kafka.api" />
+            <allow pkg="kafka.utils" />
+            <allow pkg="org.apache.kafka.clients" />
+            <allow pkg="org.apache.kafka.server.common" />
+            <allow pkg="org.apache.kafka.server.config" />
+            <allow pkg="org.apache.kafka.server.log" />
+            <allow pkg="org.apache.kafka.server.record" />
+            <allow pkg="org.apache.kafka.test" />
+            <allow pkg="org.apache.kafka.storage"/>
+            <subpackage name="remote">
+                <allow pkg="scala.collection" />
+                <subpackage name="storage">
+                    <allow pkg="com.yammer.metrics.core" />
+                    <allow pkg="org.apache.kafka.server.metrics" />
+                </subpackage>
+            </subpackage>
+        </subpackage>
+    </subpackage>
+
+    <subpackage name="storage.internals">
+        <allow pkg="com.yammer.metrics.core" />
+        <allow pkg="org.apache.kafka.server"/>
+        <allow pkg="org.apache.kafka.storage.internals"/>
+        <allow pkg="org.apache.kafka.common" />
+        <allow pkg="com.github.benmanes.caffeine.cache" />
+    </subpackage>
+
+    <!-- START OF TIERED STORAGE INTEGRATION TEST IMPORT DEPENDENCIES -->
+    <subpackage name="tiered.storage">
+        <allow pkg="scala" />
+
+        <allow pkg="org.apache.kafka.tiered.storage" />
+        <allow pkg="org.apache.kafka.tiered.storage.actions" />
+        <allow pkg="org.apache.kafka.tiered.storage.specs" />
+        <allow pkg="org.apache.kafka.tiered.storage.utils" />
+
+        <allow pkg="kafka.api" />
+        <allow pkg="kafka.log" />
+        <allow pkg="kafka.server" />
+        <allow pkg="kafka.utils" />
+
+        <allow pkg="org.apache.kafka.common.config" />
+        <allow pkg="org.apache.kafka.common.header" />
+        <allow pkg="org.apache.kafka.common.record" />
+        <allow pkg="org.apache.kafka.common.replica" />
+        <allow pkg="org.apache.kafka.common.network" />
+
+        <allow pkg="org.apache.kafka.clients" />
+        <allow pkg="org.apache.kafka.clients.admin" />
+        <allow pkg="org.apache.kafka.clients.consumer" />
+        <allow pkg="org.apache.kafka.clients.producer" />
+
+        <allow pkg="org.apache.kafka.metadata" />
+        <allow pkg="org.apache.kafka.storage"/>
+        <allow pkg="org.apache.kafka.storage.internals.log" />
+
+        <allow pkg="org.apache.kafka.server.log" />
+        <allow pkg="org.apache.kafka.server.log.remote" />
+        <allow pkg="org.apache.kafka.server.log.remote.storage" />
+
+        <allow pkg="org.apache.kafka.test" />
+        <subpackage name="actions">
+        </subpackage>
+
+        <subpackage name="specs">
+        </subpackage>
+
+        <subpackage name="utils">
+        </subpackage>
+
+        <subpackage name="integration">
+        </subpackage>
+    </subpackage>
+    <!-- END OF TIERED STORAGE INTEGRATION TEST IMPORT DEPENDENCIES -->
+
+</import-control>
diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml
new file mode 100644
index 0000000..0843deb
--- /dev/null
+++ b/checkstyle/import-control.xml
@@ -0,0 +1,579 @@
+<!DOCTYPE import-control PUBLIC
+"-//Puppy Crawl//DTD Import Control 1.1//EN"
+"http://www.puppycrawl.com/dtds/import_control_1_1.dtd";>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<import-control pkg="org.apache.kafka">
+
+  <!-- THINK HARD ABOUT THE LAYERING OF THE PROJECT BEFORE CHANGING THIS FILE -->
+
+  <!-- common library dependencies -->
+  <allow pkg="java" />
+  <allow pkg="javax.management" />
+  <allow pkg="org.slf4j" />
+  <allow pkg="org.junit" />
+  <allow pkg="org.opentest4j" />
+  <allow pkg="org.hamcrest" />
+  <allow pkg="org.mockito" />
+  <allow pkg="org.easymock" />
+  <allow pkg="org.powermock" />
+  <allow pkg="java.security" />
+  <allow pkg="javax.net.ssl" />
+  <allow pkg="javax.security" />
+  <allow pkg="org.ietf.jgss" />
+  <allow pkg="net.jqwik.api" />
+
+  <!-- no one depends on the server -->
+  <disallow pkg="kafka" />
+
+  <!-- anyone can use public classes -->
+  <allow pkg="org.apache.kafka.common" exact-match="true" />
+  <allow pkg="org.apache.kafka.common.security" />
+  <allow pkg="org.apache.kafka.common.serialization" />
+  <allow pkg="org.apache.kafka.common.utils" />
+  <allow pkg="org.apache.kafka.common.errors" exact-match="true" />
+  <allow pkg="org.apache.kafka.common.memory" />
+
+  <subpackage name="common">
+    <allow class="org.apache.kafka.clients.consumer.ConsumerRecord" exact-match="true" />
+    <allow class="org.apache.kafka.common.message.ApiMessageType" exact-match="true" />
+    <disallow pkg="org.apache.kafka.clients" />
+    <allow pkg="org.apache.kafka.common" exact-match="true" />
+    <allow pkg="org.apache.kafka.common.annotation" />
+    <allow pkg="org.apache.kafka.common.config" exact-match="true" />
+    <allow pkg="org.apache.kafka.common.internals" exact-match="true" />
+    <allow pkg="org.apache.kafka.test" />
+
+    <subpackage name="acl">
+      <allow pkg="org.apache.kafka.common.annotation" />
+      <allow pkg="org.apache.kafka.common.acl" />
+      <allow pkg="org.apache.kafka.common.resource" />
+    </subpackage>
+
+    <subpackage name="config">
+      <allow pkg="org.apache.kafka.common.config" />
+      <!-- for testing -->
+      <allow pkg="org.apache.kafka.common.metrics" />
+    </subpackage>
+
+    <!-- Third-party compression libraries should only be references from this package -->
+    <subpackage name="compress">
+      <allow pkg="com.github.luben.zstd" />
+      <allow pkg="net.jpountz.lz4" />
+      <allow pkg="net.jpountz.xxhash" />
+      <allow pkg="org.apache.kafka.common.compress" />
+      <allow pkg="org.xerial.snappy" />
+    </subpackage>
+
+    <subpackage name="message">
+      <allow pkg="com.fasterxml.jackson" />
+      <allow pkg="org.apache.kafka.common.protocol" />
+      <allow pkg="org.apache.kafka.common.protocol.types" />
+      <allow pkg="org.apache.kafka.common.message" />
+      <allow pkg="org.apache.kafka.common.record" />
+    </subpackage>
+
+    <subpackage name="metrics">
+      <allow pkg="org.apache.kafka.common.metrics" />
+    </subpackage>
+
+    <subpackage name="memory">
+      <allow pkg="org.apache.kafka.common.metrics" />
+    </subpackage>
+
+    <subpackage name="network">
+      <allow pkg="org.apache.kafka.common.security.auth" />
+      <allow pkg="org.apache.kafka.common.protocol" />
+      <allow pkg="org.apache.kafka.common.config" />
+      <allow pkg="org.apache.kafka.common.metrics" />
+      <allow pkg="org.apache.kafka.common.security" />
+      <allow class="org.apache.kafka.common.requests.ApiVersionsResponse" />
+    </subpackage>
+
+    <subpackage name="resource">
+      <allow pkg="org.apache.kafka.common.annotation" />
+      <allow pkg="org.apache.kafka.common.resource" />
+    </subpackage>
+
+    <subpackage name="security">
+      <allow pkg="org.apache.kafka.common.annotation" />
+      <allow pkg="org.apache.kafka.common.network" />
+      <allow pkg="org.apache.kafka.common.config" />
+      <allow pkg="org.apache.kafka.common.protocol" />
+      <allow pkg="org.apache.kafka.common.errors" />
+      <!-- To access DefaultPrincipalData -->
+      <allow pkg="org.apache.kafka.common.message" />
+      <subpackage name="authenticator">
+        <allow pkg="org.apache.kafka.common.message" />
+        <allow pkg="org.apache.kafka.common.protocol.types" />
+        <allow pkg="org.apache.kafka.common.requests" />
+        <allow pkg="org.apache.kafka.clients" />
+      </subpackage>
+      <subpackage name="ssl">
+        <allow pkg="javax.crypto" />
+      </subpackage>
+      <subpackage name="scram">
+        <allow pkg="javax.crypto" />
+      </subpackage>
+      <subpackage name="oauthbearer">
+        <allow pkg="com.fasterxml.jackson.databind" />
+        <allow pkg="org.jose4j" />
+      </subpackage>
+    </subpackage>
+
+    <subpackage name="protocol">
+      <allow pkg="org.apache.kafka.common.errors" />
+      <allow pkg="org.apache.kafka.common.message" />
+      <allow pkg="org.apache.kafka.common.network" />
+      <allow pkg="org.apache.kafka.common.protocol" />
+      <allow pkg="org.apache.kafka.common.protocol.types" />
+      <allow pkg="org.apache.kafka.common.record" />
+      <allow pkg="org.apache.kafka.common.requests" />
+      <allow pkg="org.apache.kafka.common.resource" />
+      <allow pkg="com.fasterxml.jackson" />
+    </subpackage>
+
+    <subpackage name="record">
+      <allow pkg="org.apache.kafka.common.compress" />
+      <allow pkg="org.apache.kafka.common.header" />
+      <allow pkg="org.apache.kafka.common.record" />
+      <allow pkg="org.apache.kafka.common.message" />
+      <allow pkg="org.apache.kafka.common.network" />
+      <allow pkg="org.apache.kafka.common.protocol" />
+      <allow pkg="org.apache.kafka.common.protocol.types" />
+      <allow pkg="org.apache.kafka.common.errors" />
+    </subpackage>
+
+    <subpackage name="header">
+      <allow pkg="org.apache.kafka.common.header" />
+      <allow pkg="org.apache.kafka.common.record" />
+    </subpackage>
+
+    <subpackage name="requests">
+      <allow pkg="org.apache.kafka.common.acl" />
+      <allow pkg="org.apache.kafka.common.feature" />
+      <allow pkg="org.apache.kafka.common.protocol" />
+      <allow pkg="org.apache.kafka.common.message" />
+      <allow pkg="org.apache.kafka.common.network" />
+      <allow pkg="org.apache.kafka.common.quota" />
+      <allow pkg="org.apache.kafka.common.requests" />
+      <allow pkg="org.apache.kafka.common.resource" />
+      <allow pkg="org.apache.kafka.common.record" />
+      <!-- for AuthorizableRequestContext interface -->
+      <allow pkg="org.apache.kafka.server.authorizer" />
+      <!-- for IncrementalAlterConfigsRequest Builder -->
+      <allow pkg="org.apache.kafka.clients.admin" />
+      <!-- for testing -->
+      <allow pkg="org.apache.kafka.common.errors" />
+    </subpackage>
+
+    <subpackage name="serialization">
+      <allow pkg="org.apache.kafka.clients" />
+      <allow class="org.apache.kafka.common.errors.SerializationException" />
+      <allow class="org.apache.kafka.common.header.Headers" />
+    </subpackage>
+
+    <subpackage name="utils">
+      <allow pkg="org.apache.kafka.common" />
+      <allow pkg="org.apache.log4j" />
+    </subpackage>
+
+    <subpackage name="quotas">
+      <allow pkg="org.apache.kafka.common" />
+    </subpackage>
+  </subpackage>
+
+  <subpackage name="clients">
+    <allow pkg="org.apache.kafka.common" />
+    <allow pkg="org.apache.kafka.clients" exact-match="true"/>
+    <allow pkg="org.apache.kafka.test" />
+
+    <subpackage name="consumer">
+      <allow pkg="org.apache.kafka.clients.consumer" />
+    </subpackage>
+
+    <subpackage name="producer">
+      <allow pkg="org.apache.kafka.clients.consumer" />
+      <allow pkg="org.apache.kafka.clients.producer" />
+    </subpackage>
+
+    <subpackage name="admin">
+      <allow pkg="org.apache.kafka.clients.admin" />
+      <allow pkg="org.apache.kafka.clients.consumer.internals" />
+      <allow pkg="org.apache.kafka.clients.consumer" />
+    </subpackage>
+  </subpackage>
+
+  <subpackage name="coordinator">
+    <subpackage name="group">
+      <allow pkg="org.apache.kafka.clients.consumer" />
+      <allow pkg="org.apache.kafka.common.annotation" />
+      <allow pkg="org.apache.kafka.common.config" />
+      <allow pkg="org.apache.kafka.common.internals" />
+      <allow pkg="org.apache.kafka.common.message" />
+      <allow pkg="org.apache.kafka.common.metadata" />
+      <allow pkg="org.apache.kafka.common.network" />
+      <allow pkg="org.apache.kafka.common.protocol" />
+      <allow pkg="org.apache.kafka.common.record" />
+      <allow pkg="org.apache.kafka.common.requests" />
+      <allow pkg="org.apache.kafka.coordinator.group" />
+      <allow pkg="org.apache.kafka.deferred" />
+      <allow pkg="org.apache.kafka.image"/>
+      <allow pkg="org.apache.kafka.server.common"/>
+      <allow pkg="org.apache.kafka.server.record"/>
+      <allow pkg="org.apache.kafka.server.util"/>
+      <allow pkg="org.apache.kafka.test" />
+      <allow pkg="org.apache.kafka.timeline" />
+    </subpackage>
+  </subpackage>
+
+  <subpackage name="server">
+    <allow pkg="org.apache.kafka.common" />
+
+    <!-- This is required to make AlterConfigPolicyTest work. -->
+    <allow pkg="org.apache.kafka.server.policy" />
+  </subpackage>
+
+  <subpackage name="shell">
+    <allow pkg="com.fasterxml.jackson" />
+    <allow pkg="kafka.raft"/>
+    <allow pkg="kafka.server"/>
+    <allow pkg="kafka.tools"/>
+    <allow pkg="net.sourceforge.argparse4j" />
+    <allow pkg="org.apache.kafka.common"/>
+    <allow pkg="org.apache.kafka.metadata"/>
+    <allow pkg="org.apache.kafka.controller.util"/>
+    <allow pkg="org.apache.kafka.queue"/>
+    <allow pkg="org.apache.kafka.raft"/>
+    <allow pkg="org.apache.kafka.server.common" />
+    <allow pkg="org.apache.kafka.server.fault" />
+    <allow pkg="org.apache.kafka.shell"/>
+    <allow pkg="org.apache.kafka.image"/>
+    <allow pkg="org.apache.kafka.image.loader"/>
+    <allow pkg="org.apache.kafka.snapshot"/>
+    <allow pkg="org.jline"/>
+    <allow pkg="scala.compat"/>
+  </subpackage>
+
+  <subpackage name="tools">
+    <allow pkg="org.apache.kafka.common"/>
+    <allow pkg="org.apache.kafka.server.util" />
+    <allow pkg="org.apache.kafka.server.common" />
+    <allow pkg="org.apache.kafka.clients" />
+    <allow pkg="org.apache.kafka.clients.admin" />
+    <allow pkg="org.apache.kafka.clients.producer" />
+    <allow pkg="org.apache.kafka.clients.consumer" />
+    <allow pkg="org.apache.kafka.test" />
+    <allow pkg="org.apache.kafka.connect.runtime" />
+    <allow pkg="org.apache.kafka.connect.runtime.isolation" />
+    <allow pkg="com.fasterxml.jackson" />
+    <allow pkg="org.jose4j" />
+    <allow pkg="net.sourceforge.argparse4j" />
+    <allow pkg="org.apache.log4j" />
+    <allow pkg="kafka.test" />
+    <allow pkg="joptsimple" />
+    <allow pkg="javax.rmi.ssl"/>
+    <allow pkg="kafka.utils" />
+    <allow pkg="scala.collection" />
+  </subpackage>
+
+  <subpackage name="trogdor">
+    <allow pkg="com.fasterxml.jackson" />
+    <allow pkg="javax.servlet" />
+    <allow pkg="javax.ws.rs" />
+    <allow pkg="net.sourceforge.argparse4j" />
+    <allow pkg="org.apache.kafka.clients" />
+    <allow pkg="org.apache.kafka.clients.admin" />
+    <allow pkg="org.apache.kafka.clients.consumer" exact-match="true"/>
+    <allow pkg="org.apache.kafka.clients.producer" exact-match="true"/>
+    <allow pkg="org.apache.kafka.common" />
+    <allow pkg="org.apache.kafka.test"/>
+    <allow pkg="org.apache.kafka.trogdor" />
+    <allow pkg="org.eclipse.jetty" />
+    <allow pkg="org.glassfish.jersey" />
+  </subpackage>
+
+  <subpackage name="message">
+    <allow pkg="com.fasterxml.jackson" />
+    <allow pkg="com.fasterxml.jackson.annotation" />
+    <allow pkg="net.sourceforge.argparse4j" />
+    <allow pkg="org.apache.message" />
+  </subpackage>
+
+  <subpackage name="streams">
+    <allow pkg="org.apache.kafka.common"/>
+    <allow pkg="org.apache.kafka.test"/>
+    <allow pkg="org.apache.kafka.clients"/>
+    <allow pkg="org.apache.kafka.clients.producer" exact-match="true"/>
+    <allow pkg="org.apache.kafka.clients.consumer" exact-match="true"/>
+    <allow pkg="org.apache.kafka.server.util"/>
+
+    <allow pkg="org.apache.kafka.streams"/>
+
+    <subpackage name="examples">
+      <allow pkg="com.fasterxml.jackson" />
+      <allow pkg="org.apache.kafka.connect.json" />
+    </subpackage>
+
+    <subpackage name="internals">
+      <allow pkg="com.fasterxml.jackson" />
+    </subpackage>
+
+    <subpackage name="perf">
+      <allow pkg="com.fasterxml.jackson.databind" />
+    </subpackage>
+
+    <subpackage name="integration">
+      <allow pkg="kafka.admin" />
+      <allow pkg="kafka.api" />
+      <allow pkg="kafka.cluster" />
+      <allow pkg="kafka.server" />
+      <allow pkg="kafka.tools" />
+      <allow pkg="kafka.utils" />
+      <allow pkg="kafka.log" />
+      <allow pkg="scala" />
+      <allow class="kafka.zk.EmbeddedZookeeper"/>
+      <allow pkg="com.fasterxml.jackson" />
+      <allow pkg="org.apache.kafka.tools" />
+    </subpackage>
+
+    <subpackage name="test">
+      <allow pkg="kafka.admin" />
+    </subpackage>
+
+    <subpackage name="state">
+      <allow pkg="org.rocksdb" />
+    </subpackage>
+
+    <subpackage name="processor">
+      <subpackage name="internals">
+        <allow pkg="com.fasterxml.jackson" />
+        <allow pkg="kafka.utils" />
+        <allow pkg="org.apache.zookeeper" />
+      </subpackage>
+    </subpackage>
+  </subpackage>
+
+  <subpackage name="log4jappender">
+    <allow pkg="org.apache.log4j" />
+    <allow pkg="org.apache.kafka.clients" />
+    <allow pkg="org.apache.kafka.common" />
+    <allow pkg="org.apache.kafka.test" />
+  </subpackage>
+
+  <subpackage name="test">
+    <allow pkg="org.apache.kafka" />
+    <allow pkg="org.bouncycastle" />
+    <allow pkg="org.rocksdb" />
+  </subpackage>
+
+  <subpackage name="raft">
+    <allow pkg="org.apache.kafka.raft" />
+    <allow pkg="org.apache.kafka.metadata" />
+    <allow pkg="org.apache.kafka.snapshot" />
+    <allow pkg="org.apache.kafka.clients" />
+    <allow pkg="org.apache.kafka.common.config" />
+    <allow pkg="org.apache.kafka.common.message" />
+    <allow pkg="org.apache.kafka.common.metadata" />
+    <allow pkg="org.apache.kafka.common.metrics" />
+    <allow pkg="org.apache.kafka.common.record" />
+    <allow pkg="org.apache.kafka.common.requests" />
+    <allow pkg="org.apache.kafka.common.protocol" />
+    <allow pkg="org.apache.kafka.server.common" />
+    <allow pkg="org.apache.kafka.server.common.serialization" />
+    <allow pkg="org.apache.kafka.test"/>
+    <allow pkg="com.fasterxml.jackson" />
+    <allow pkg="net.jqwik"/>
+  </subpackage>
+
+  <subpackage name="snapshot">
+    <allow pkg="org.apache.kafka.common.record" />
+    <allow pkg="org.apache.kafka.common.message" />
+    <allow pkg="org.apache.kafka.raft" />
+    <allow pkg="org.apache.kafka.server.common" />
+    <allow pkg="org.apache.kafka.test"/>
+  </subpackage>
+
+  <subpackage name="connect">
+    <allow pkg="org.apache.kafka.common" />
+    <allow pkg="org.apache.kafka.connect.data" />
+    <allow pkg="org.apache.kafka.connect.errors" />
+    <allow pkg="org.apache.kafka.connect.header" />
+    <allow pkg="org.apache.kafka.connect.components"/>
+    <allow pkg="org.apache.kafka.clients" />
+    <allow pkg="org.apache.kafka.test"/>
+
+    <subpackage name="source">
+      <allow pkg="org.apache.kafka.connect.connector" />
+      <allow pkg="org.apache.kafka.connect.storage" />
+    </subpackage>
+
+    <subpackage name="sink">
+      <allow pkg="org.apache.kafka.clients.consumer" />
+      <allow pkg="org.apache.kafka.connect.connector" />
+      <allow pkg="org.apache.kafka.connect.transforms" />
+      <allow pkg="org.apache.kafka.connect.storage" />
+    </subpackage>
+
+    <subpackage name="converters">
+      <allow pkg="org.apache.kafka.connect.storage" />
+    </subpackage>
+
+    <subpackage name="connector.policy">
+      <allow pkg="org.apache.kafka.connect.health" />
+      <allow pkg="org.apache.kafka.connect.connector" />
+      <!-- for testing -->
+      <allow pkg="org.apache.kafka.connect.runtime" />
+    </subpackage>
+
+    <subpackage name="rest">
+      <allow pkg="org.apache.kafka.connect.health" />
+      <allow pkg="javax.ws.rs" />
+      <allow pkg= "javax.security.auth"/>
+      <subpackage name="basic">
+        <allow pkg="org.apache.kafka.connect.rest"/>
+        <allow pkg="javax.annotation"/>
+      </subpackage>
+    </subpackage>
+
+    <subpackage name="mirror">
+      <allow pkg="org.apache.kafka.clients.consumer" />
+      <allow pkg="org.apache.kafka.connect.source" />
+      <allow pkg="org.apache.kafka.connect.sink" />
+      <allow pkg="org.apache.kafka.connect.storage" />
+      <allow pkg="org.apache.kafka.connect.connector" />
+      <allow pkg="org.apache.kafka.connect.runtime" />
+      <allow pkg="org.apache.kafka.connect.runtime.distributed" />
+      <allow pkg="org.apache.kafka.connect.util" />
+      <allow pkg="org.apache.kafka.connect.converters" />
+      <allow pkg="org.apache.kafka.connect.json" />
+      <allow pkg="net.sourceforge.argparse4j" />
+      <!-- for tests -->
+      <allow pkg="org.apache.kafka.connect.integration" />
+      <allow pkg="org.apache.kafka.connect.mirror" />
+      <allow pkg="kafka.server" />
+      <subpackage name="rest">
+        <allow pkg="javax.ws.rs" />
+      </subpackage>
+    </subpackage>
+
+    <subpackage name="runtime">
+      <allow pkg="org.apache.kafka.connect" />
+      <allow pkg="org.reflections"/>
+      <allow pkg="org.reflections.util"/>
+      <allow pkg="javax.crypto"/>
+      <allow pkg="org.eclipse.jetty.util" />
+
+      <subpackage name="rest">
+        <allow pkg="org.eclipse.jetty" />
+        <allow pkg="javax.ws.rs" />
+        <allow pkg="javax.servlet" />
+        <allow pkg="org.glassfish.jersey" />
+        <allow pkg="com.fasterxml.jackson" />
+        <allow pkg="org.apache.http"/>
+        <allow pkg="io.swagger.v3.oas.annotations"/>
+        <subpackage name="resources">
+          <allow pkg="org.apache.log4j" />
+        </subpackage>
+      </subpackage>
+
+      <subpackage name="isolation">
+        <allow pkg="com.fasterxml.jackson" />
+        <allow pkg="org.apache.maven.artifact.versioning" />
+        <allow pkg="javax.tools" />
+      </subpackage>
+
+      <subpackage name="distributed">
+        <allow pkg="javax.ws.rs.core" />
+      </subpackage>
+    </subpackage>
+
+    <subpackage name="cli">
+      <allow pkg="org.apache.kafka.connect.runtime" />
+      <allow pkg="org.apache.kafka.connect.storage" />
+      <allow pkg="org.apache.kafka.connect.util" />
+      <allow pkg="org.apache.kafka.common" />
+      <allow pkg="org.apache.kafka.connect.connector.policy" />
+      <allow pkg="org.apache.kafka.connect.json" />
+    </subpackage>
+
+    <subpackage name="storage">
+      <allow pkg="org.apache.kafka.connect" />
+      <allow pkg="org.apache.kafka.common.serialization" />
+      <allow pkg="javax.crypto.spec"/>
+    </subpackage>
+
+    <subpackage name="util">
+      <allow pkg="org.apache.kafka.connect" />
+      <allow pkg="org.reflections.vfs" />
+      <!-- for annotations to avoid code duplication -->
+      <allow pkg="com.fasterxml.jackson.annotation" />
+      <allow pkg="com.fasterxml.jackson.databind" />
+      <subpackage name="clusters">
+        <allow pkg="kafka.cluster" />
+        <allow pkg="kafka.server" />
+        <allow pkg="kafka.zk" />
+        <allow pkg="kafka.utils" />
+        <allow class="javax.servlet.http.HttpServletResponse" />
+        <allow class="javax.ws.rs.core.Response" />
+        <allow pkg="com.fasterxml.jackson.core.type" />
+        <allow pkg="org.apache.kafka.metadata" />
+        <allow pkg="org.eclipse.jetty.client"/>
+      </subpackage>
+    </subpackage>
+
+    <subpackage name="integration">
+      <allow pkg="org.apache.kafka.connect.util.clusters" />
+      <allow pkg="org.apache.kafka.connect" />
+      <allow pkg="javax.ws.rs" />
+      <allow pkg="org.apache.http"/>
+      <allow pkg="org.eclipse.jetty.util"/>
+      <!-- for tests -->
+      <allow pkg="org.apache.kafka.server.util" />
+    </subpackage>
+
+    <subpackage name="json">
+      <allow pkg="com.fasterxml.jackson" />
+      <allow pkg="org.apache.kafka.common.serialization" />
+      <allow pkg="org.apache.kafka.common.errors" />
+      <allow pkg="org.apache.kafka.connect.storage" />
+    </subpackage>
+
+    <subpackage name="file">
+      <allow pkg="org.apache.kafka.connect" />
+      <allow pkg="org.apache.kafka.clients.consumer" />
+      <!-- for tests -->
+      <allow pkg="org.easymock" />
+      <allow pkg="org.powermock" />
+    </subpackage>
+
+    <subpackage name="tools">
+      <allow pkg="org.apache.kafka.connect" />
+      <allow pkg="org.apache.kafka.server.util" />
+      <allow pkg="com.fasterxml.jackson" />
+    </subpackage>
+
+    <subpackage name="transforms">
+      <allow class="org.apache.kafka.connect.connector.ConnectRecord" />
+      <allow class="org.apache.kafka.connect.source.SourceRecord" />
+      <allow class="org.apache.kafka.connect.sink.SinkRecord" />
+      <allow pkg="org.apache.kafka.connect.transforms.util" />
+    </subpackage>
+  </subpackage>
+
+</import-control>
diff --git a/checkstyle/java.header b/checkstyle/java.header
new file mode 100644
index 0000000..45fd2d5
--- /dev/null
+++ b/checkstyle/java.header
@@ -0,0 +1,16 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml
new file mode 100644
index 0000000..88c5c20
--- /dev/null
+++ b/checkstyle/suppressions.xml
@@ -0,0 +1,349 @@
+<?xml version="1.0"?>
+
+<!DOCTYPE suppressions PUBLIC
+        "-//Puppy Crawl//DTD Suppressions 1.1//EN"
+        "http://www.puppycrawl.com/dtds/suppressions_1_1.dtd";>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<suppressions>
+
+    <!-- Note that [/\\] must be used as the path separator for cross-platform support -->
+
+    <!-- Generator -->
+    <suppress checks="CyclomaticComplexity|BooleanExpressionComplexity"
+              files="(SchemaGenerator|MessageDataGenerator|FieldSpec|FieldType).java"/>
+    <suppress checks="NPathComplexity"
+              files="(MessageDataGenerator|FieldSpec|WorkerSinkTask).java"/>
+    <suppress checks="JavaNCSS"
+              files="(ApiMessageType|FieldSpec|MessageDataGenerator|KafkaConsumerTest).java"/>
+    <suppress checks="MethodLength"
+              files="(FieldSpec|MessageDataGenerator).java"/>
+    <suppress id="dontUseSystemExit"
+              files="MessageGenerator.java"/>
+
+    <!-- core -->
+    <suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|FinalLocalVariable|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS|AvoidStarImport)"
+              files="core[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
+    <suppress checks="NPathComplexity" files="(ClusterTestExtensions|KafkaApisBuilder).java"/>
+    <suppress checks="NPathComplexity|ClassFanOutComplexity|ClassDataAbstractionCoupling" files="(RemoteLogManager|RemoteLogManagerTest).java"/>
+    <suppress checks="ClassFanOutComplexity" files="RemoteLogManagerTest.java"/>
+    <suppress checks="MethodLength"
+              files="(KafkaClusterTestKit).java"/>
+
+    <!-- Clients -->
+    <suppress id="dontUseSystemExit"
+              files="Exit.java"/>
+    <suppress checks="ClassFanOutComplexity"
+              files="(AbstractFetch|Sender|SenderTest|ConsumerCoordinator|KafkaConsumer|KafkaProducer|Utils|TransactionManager|TransactionManagerTest|KafkaAdminClient|NetworkClient|Admin|KafkaRaftClient|KafkaRaftClientTest|RaftClientTestContext).java"/>
+    <suppress checks="ClassFanOutComplexity"
+              files="(SaslServerAuthenticator|SaslAuthenticatorTest).java"/>
+    <suppress checks="NPath"
+              files="SaslServerAuthenticator.java"/>
+    <suppress checks="ClassFanOutComplexity"
+              files="Errors.java"/>
+    <suppress checks="ClassFanOutComplexity"
+              files="Utils.java"/>
+    <suppress checks="ClassFanOutComplexity"
+              files="AbstractRequest.java"/>
+    <suppress checks="ClassFanOutComplexity"
+              files="AbstractResponse.java"/>
+
+    <suppress checks="MethodLength"
+              files="(KerberosLogin|RequestResponseTest|ConnectMetricsRegistry|KafkaConsumer|AbstractStickyAssignor).java"/>
+
+    <suppress checks="ParameterNumber"
+              files="(NetworkClient|FieldSpec|KafkaRaftClient).java"/>
+    <suppress checks="ParameterNumber"
+              files="(KafkaConsumer|ConsumerCoordinator).java"/>
+    <suppress checks="ParameterNumber"
+              files="Sender.java"/>
+    <suppress checks="ParameterNumber"
+              files="ConfigDef.java"/>
+    <suppress checks="ParameterNumber"
+              files="DefaultRecordBatch.java"/>
+    <suppress checks="ParameterNumber"
+              files="MemoryRecordsBuilder.java"/>
+
+    <suppress checks="ClassDataAbstractionCoupling"
+              files="(KafkaConsumer|ConsumerCoordinator|AbstractFetch|KafkaProducer|AbstractRequest|AbstractResponse|TransactionManager|Admin|KafkaAdminClient|MockAdminClient|KafkaRaftClient|KafkaRaftClientTest).java"/>
+    <suppress checks="ClassDataAbstractionCoupling"
+              files="(Errors|SaslAuthenticatorTest|AgentTest|CoordinatorTest).java"/>
+
+    <suppress checks="BooleanExpressionComplexity"
+              files="(Utils|Topic|KafkaLZ4BlockOutputStream|AclData|JoinGroupRequest).java"/>
+
+    <suppress checks="CyclomaticComplexity"
+              files="(AbstractFetch|ConsumerCoordinator|OffsetFetcherUtils|KafkaProducer|Sender|ConfigDef|KerberosLogin|AbstractRequest|AbstractResponse|Selector|SslFactory|SslTransportLayer|SaslClientAuthenticator|SaslClientCallbackHandler|SaslServerAuthenticator|AbstractCoordinator|TransactionManager|AbstractStickyAssignor|DefaultSslEngineFactory|Authorizer|RecordAccumulator|MemoryRecords|FetchSessionHandler).java"/>
+
+    <suppress checks="JavaNCSS"
+              files="(AbstractRequest|AbstractResponse|KerberosLogin|WorkerSinkTaskTest|TransactionManagerTest|SenderTest|KafkaAdminClient|ConsumerCoordinatorTest|KafkaAdminClientTest|KafkaRaftClientTest).java"/>
+
+    <suppress checks="NPathComplexity"
+              files="(ConsumerCoordinator|BufferPool|MetricName|Node|ConfigDef|RecordBatch|SslFactory|SslTransportLayer|MetadataResponse|KerberosLogin|Selector|Sender|Serdes|TokenInformation|Agent|Values|PluginUtils|MiniTrogdorCluster|TasksRequest|KafkaProducer|AbstractStickyAssignor|KafkaRaftClient|Authorizer|FetchSessionHandler|RecordAccumulator).java"/>
+
+    <suppress checks="(JavaNCSS|CyclomaticComplexity|MethodLength)"
+              files="CoordinatorClient.java"/>
+    <suppress checks="(UnnecessaryParentheses|BooleanExpressionComplexity|CyclomaticComplexity|WhitespaceAfter|LocalVariableName)"
+              files="Murmur3.java"/>
+
+    <suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS|AvoidStarImport)"
+            files="clients[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
+
+    <suppress checks="NPathComplexity"
+            files="MessageTest.java|OffsetFetchRequest.java"/>
+
+    <!-- Clients tests -->
+    <suppress checks="ClassDataAbstractionCoupling"
+              files="(Sender|Fetcher|OffsetFetcher|KafkaConsumer|Metrics|RequestResponse|TransactionManager|KafkaAdminClient|Message|KafkaProducer)Test.java"/>
+
+    <suppress checks="ClassFanOutComplexity"
+              files="(ConsumerCoordinator|KafkaConsumer|RequestResponse|Fetcher|KafkaAdminClient|Message|KafkaProducer)Test.java"/>
+
+    <suppress checks="ClassFanOutComplexity"
+              files="MockAdminClient.java"/>
+
+    <suppress checks="CyclomaticComplexity"
+              files="(OffsetFetcher|RequestResponse)Test.java"/>
+
+    <suppress checks="JavaNCSS"
+              files="RequestResponseTest.java|FetcherTest.java|KafkaAdminClientTest.java"/>
+
+    <suppress checks="NPathComplexity"
+              files="MemoryRecordsTest|MetricsTest|RequestResponseTest|TestSslUtils|AclAuthorizerBenchmark"/>
+
+    <suppress checks="(WhitespaceAround|LocalVariableName|ImportControl|AvoidStarImport)"
+              files="Murmur3Test.java"/>
+
+    <!-- Connect -->
+    <suppress checks="ClassFanOutComplexity"
+              files="(AbstractHerder|DistributedHerder|Worker).java"/>
+    <suppress checks="ClassFanOutComplexity"
+              files="Worker(|Test).java"/>
+    <suppress checks="MethodLength"
+              files="(DistributedHerder|DistributedConfig|KafkaConfigBackingStore|Values|IncrementalCooperativeAssignor).java"/>
+    <suppress checks="ParameterNumber"
+              files="Worker(SinkTask|SourceTask|Coordinator).java"/>
+    <suppress checks="ParameterNumber"
+              files="(ConfigKeyInfo|DistributedHerder).java"/>
+
+    <suppress checks="ClassDataAbstractionCoupling"
+              files="(RestServer|AbstractHerder|DistributedHerder|Worker).java"/>
+
+    <suppress checks="BooleanExpressionComplexity"
+              files="JsonConverter.java"/>
+
+    <suppress checks="CyclomaticComplexity"
+              files="(FileStreamSourceTask|DistributedHerder|KafkaConfigBackingStore).java"/>
+    <suppress checks="CyclomaticComplexity"
+              files="(JsonConverter|Values|ConnectHeaders).java"/>
+
+    <suppress checks="JavaNCSS"
+              files="(KafkaConfigBackingStore|Values|ConnectMetricsRegistry).java"/>
+
+    <suppress checks="NPathComplexity"
+              files="(DistributedHerder|RestClient|RestServer|JsonConverter|KafkaConfigBackingStore|FileStreamSourceTask|WorkerSourceTask|TopicAdmin).java"/>
+
+    <!-- connect tests-->
+    <suppress checks="ClassDataAbstractionCoupling"
+              files="(DistributedHerder|KafkaBasedLog|WorkerSourceTaskWithTopicCreation|WorkerSourceTask)Test.java"/>
+
+    <suppress checks="ClassFanOutComplexity"
+              files="(WorkerSink|WorkerSource|ErrorHandling)Task(|WithTopicCreation)Test.java"/>
+    <suppress checks="ClassFanOutComplexity"
+              files="DistributedHerderTest.java"/>
+
+    <suppress checks="MethodLength"
+              files="(RequestResponse|WorkerSinkTask)Test.java"/>
+
+    <suppress checks="JavaNCSS"
+              files="(DistributedHerder|Worker)Test.java"/>
+
+    <!-- Raft -->
+    <suppress checks="NPathComplexity"
+              files="RecordsIterator.java"/>
+
+    <!-- Streams -->
+    <suppress checks="ClassFanOutComplexity"
+              files="(KafkaStreams|KStreamImpl|KTableImpl|InternalTopologyBuilder|StreamsPartitionAssignor|StreamThread|IQv2StoreIntegrationTest|KStreamImplTest).java"/>
+
+    <suppress checks="MethodLength"
+              files="KTableImpl.java"/>
+
+    <suppress checks="ParameterNumber"
+              files="StreamThread.java"/>
+
+    <suppress checks="ClassDataAbstractionCoupling"
+              files="(KafkaStreams|KStreamImpl|KTableImpl).java"/>
+
+    <suppress checks="CyclomaticComplexity"
+              files="(KafkaStreams|StreamsPartitionAssignor|StreamThread|TaskManager|PartitionGroup|SubscriptionWrapperSerde|AssignorConfiguration).java"/>
+
+    <suppress checks="StaticVariableName"
+              files="StreamsMetricsImpl.java"/>
+
+    <suppress checks="NPathComplexity"
+              files="(KafkaStreams|StreamsPartitionAssignor|StreamThread|TaskManager|GlobalStateManagerImpl|KStreamImplJoin|TopologyConfig|KTableKTableOuterJoin).java"/>
+
+    <suppress checks="(FinalLocalVariable|UnnecessaryParentheses|BooleanExpressionComplexity|CyclomaticComplexity|WhitespaceAfter|LocalVariableName)"
+              files="Murmur3.java"/>
+
+    <suppress checks="(NPathComplexity|CyclomaticComplexity)"
+              files="(KStreamSlidingWindowAggregate|RackAwareTaskAssignor).java"/>
+
+    <!-- suppress FinalLocalVariable outside of the streams package. -->
+    <suppress checks="FinalLocalVariable"
+              files="^(?!.*[\\/]org[\\/]apache[\\/]kafka[\\/]streams[\\/].*$)"/>
+
+    <!-- Generated code -->
+    <suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|FinalLocalVariable|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS|AvoidStarImport)"
+              files="streams[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
+    <suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|FinalLocalVariable|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS|AvoidStarImport)"
+              files="raft[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
+    <suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|FinalLocalVariable|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS|AvoidStarImport)"
+              files="storage[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
+    <suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|FinalLocalVariable|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS|AvoidStarImport)"
+              files="group-coordinator[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
+
+    <suppress checks="ImportControl" files="FetchResponseData.java"/>
+    <suppress checks="ImportControl" files="RecordsSerdeTest.java"/>
+
+    <!-- Streams tests -->
+    <suppress checks="ClassFanOutComplexity"
+              files="(RecordCollectorTest|StreamsPartitionAssignorTest|StreamThreadTest|StreamTaskTest|TaskManagerTest|TopologyTestDriverTest).java"/>
+
+    <suppress checks="MethodLength"
+              files="(EosIntegrationTest|EosV2UpgradeIntegrationTest|KStreamKStreamJoinTest|RocksDBWindowStoreTest|StreamStreamJoinIntegrationTest).java"/>
+
+    <suppress checks="ClassDataAbstractionCoupling"
+              files=".*[/\\]streams[/\\].*test[/\\].*.java"/>
+
+    <suppress checks="CyclomaticComplexity"
+              files="(EosV2UpgradeIntegrationTest|KStreamKStreamJoinTest|KTableKTableForeignKeyJoinIntegrationTest|KTableKTableForeignKeyVersionedJoinIntegrationTest|RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest|RelationalSmokeTest|MockProcessorContextStateStoreTest).java"/>
+
+    <suppress checks="JavaNCSS"
+              files="(EosV2UpgradeIntegrationTest|KStreamKStreamJoinTest|StreamThreadTest|TaskManagerTest).java"/>
+
+    <suppress checks="NPathComplexity"
+              files="(EosV2UpgradeIntegrationTest|EosTestDriver|KStreamKStreamJoinTest|KTableKTableForeignKeyJoinIntegrationTest|KTableKTableForeignKeyVersionedJoinIntegrationTest|RelationalSmokeTest|MockProcessorContextStateStoreTest|TopologyTestDriverTest).java"/>
+
+    <suppress checks="(FinalLocalVariable|WhitespaceAround|LocalVariableName|ImportControl|AvoidStarImport)"
+              files="Murmur3Test.java"/>
+
+    <suppress checks="MethodLength"
+              files="(KStreamSlidingWindowAggregateTest|KStreamKStreamLeftJoinTest|KStreamKStreamOuterJoinTest|KTableKTableForeignKeyVersionedJoinIntegrationTest).java"/>
+
+    <suppress checks="ClassFanOutComplexity"
+              files="StreamTaskTest.java"/>
+
+    <!-- Streams test-utils -->
+    <suppress checks="ClassFanOutComplexity"
+              files="TopologyTestDriver.java"/>
+    <suppress checks="ClassDataAbstractionCoupling"
+              files="TopologyTestDriver.java"/>
+
+    <!-- Streams examples -->
+    <suppress id="dontUseSystemExit"
+              files="PageViewTypedDemo.java|PipeDemo.java|TemperatureDemo.java|WordCountDemo.java|WordCountProcessorDemo.java|WordCountTransformerDemo.java"/>
+
+    <!-- Tools -->
+    <suppress checks="ClassDataAbstractionCoupling"
+              files="VerifiableConsumer.java"/>
+    <suppress checks="CyclomaticComplexity"
+              files="(StreamsResetter|ProducerPerformance|Agent).java"/>
+    <suppress checks="BooleanExpressionComplexity"
+              files="StreamsResetter.java"/>
+    <suppress checks="NPathComplexity"
+              files="(ProducerPerformance|StreamsResetter|Agent|TransactionalMessageCopier|ReplicaVerificationTool).java"/>
+    <suppress checks="ImportControl"
+              files="SignalLogger.java"/>
+    <suppress checks="IllegalImport"
+              files="SignalLogger.java"/>
+    <suppress checks="ParameterNumber"
+              files="ProduceBenchSpec.java"/>
+    <suppress checks="ParameterNumber"
+              files="ConsumeBenchSpec.java"/>
+    <suppress checks="ParameterNumber"
+              files="SustainedConnectionSpec.java"/>
+    <suppress id="dontUseSystemExit"
+              files="VerifiableConsumer.java"/>
+    <suppress id="dontUseSystemExit"
+              files="VerifiableProducer.java"/>
+
+    <!-- Shell -->
+    <suppress checks="CyclomaticComplexity"
+              files="(GlobComponent|MetadataNodeManager).java"/>
+    <suppress checks="MethodLength"
+              files="(MetadataNodeManager).java"/>
+    <suppress checks="JavaNCSS"
+              files="(MetadataNodeManager).java"/>
+
+    <!-- Log4J-Appender -->
+    <suppress checks="CyclomaticComplexity"
+              files="KafkaLog4jAppender.java"/>
+
+    <suppress checks="NPathComplexity"
+              files="KafkaLog4jAppender.java"/>
+    <suppress checks="JavaNCSS"
+              files="RequestResponseTest.java"/>
+
+    <!-- metadata -->
+    <suppress checks="ClassDataAbstractionCoupling"
+              files="(QuorumController|QuorumControllerTest|ReplicationControlManager|ReplicationControlManagerTest|ClusterControlManagerTest|KRaftMigrationDriverTest).java"/>
+    <suppress checks="ClassFanOutComplexity"
+              files="(QuorumController|QuorumControllerTest|ReplicationControlManager|ReplicationControlManagerTest).java"/>
+    <suppress checks="(ParameterNumber|ClassDataAbstractionCoupling)"
+              files="(QuorumController).java"/>
+    <suppress checks="CyclomaticComplexity"
+              files="(ClientQuotasImage|KafkaEventQueue|MetadataDelta|QuorumController|ReplicationControlManager|KRaftMigrationDriver|ClusterControlManager).java"/>
+    <suppress checks="NPathComplexity"
+              files="(ClientQuotasImage|KafkaEventQueue|ReplicationControlManager|FeatureControlManager|KRaftMigrationDriver|ScramControlManager|ClusterControlManager|MetadataDelta).java"/>
+    <suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS|AvoidStarImport)"
+            files="metadata[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
+    <suppress checks="BooleanExpressionComplexity"
+              files="(MetadataImage).java"/>
+    <suppress checks="ImportControl"
+              files="ApiVersionsResponse.java"/>
+    <suppress checks="AvoidStarImport"
+              files="MetadataVersionTest.java"/>
+
+    <!-- group coordinator -->
+    <suppress checks="CyclomaticComplexity"
+              files="(ConsumerGroupMember|GroupMetadataManager).java"/>
+    <suppress checks="(NPathComplexity|MethodLength)"
+              files="(GroupMetadataManager|ConsumerGroupTest|GroupMetadataManagerTest).java"/>
+    <suppress checks="ClassFanOutComplexity"
+              files="(GroupMetadataManager|GroupMetadataManagerTest).java"/>
+    <suppress checks="ParameterNumber"
+              files="(ConsumerGroupMember|GroupMetadataManager).java"/>
+    <suppress checks="ClassDataAbstractionCouplingCheck"
+              files="(RecordHelpersTest|GroupMetadataManagerTest|GroupCoordinatorServiceTest).java"/>
+    <suppress checks="JavaNCSS"
+              files="GroupMetadataManagerTest.java"/>
+
+    <!-- storage -->
+    <suppress checks="CyclomaticComplexity"
+              files="(LogValidator|RemoteLogManagerConfig|RemoteLogManager).java"/>
+    <suppress checks="NPathComplexity"
+              files="(LogValidator|RemoteLogManager|RemoteIndexCache).java"/>
+    <suppress checks="ParameterNumber"
+              files="(LogAppendInfo|RemoteLogManagerConfig).java"/>
+
+    <!-- benchmarks -->
+    <suppress checks="(ClassDataAbstractionCoupling|ClassFanOutComplexity)"
+              files="(ReplicaFetcherThreadBenchmark).java"/>
+
+</suppressions>
diff --git a/clients/.gitignore b/clients/.gitignore
new file mode 100644
index 0000000..ae3c172
--- /dev/null
+++ b/clients/.gitignore
@@ -0,0 +1 @@
+/bin/
diff --git a/clients/src/main/java/org/apache/kafka/clients/ApiVersions.java b/clients/src/main/java/org/apache/kafka/clients/ApiVersions.java
new file mode 100644
index 0000000..a09d581
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/ApiVersions.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+import org.apache.kafka.common.protocol.ApiKeys;
+import org.apache.kafka.common.record.RecordBatch;
+import org.apache.kafka.common.requests.ProduceRequest;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+
+/**
+ * Maintains node api versions for access outside of NetworkClient (which is where the information is derived).
+ * The pattern is akin to the use of {@link Metadata} for topic metadata.
+ *
+ * NOTE: This class is intended for INTERNAL usage only within Kafka.
+ */
+public class ApiVersions {
+
+    private final Map<String, NodeApiVersions> nodeApiVersions = new HashMap<>();
+    private byte maxUsableProduceMagic = RecordBatch.CURRENT_MAGIC_VALUE;
+
+    public synchronized void update(String nodeId, NodeApiVersions nodeApiVersions) {
+        this.nodeApiVersions.put(nodeId, nodeApiVersions);
+        this.maxUsableProduceMagic = computeMaxUsableProduceMagic();
+    }
+
+    public synchronized void remove(String nodeId) {
+        this.nodeApiVersions.remove(nodeId);
+        this.maxUsableProduceMagic = computeMaxUsableProduceMagic();
+    }
+
+    public synchronized NodeApiVersions get(String nodeId) {
+        return this.nodeApiVersions.get(nodeId);
+    }
+
+    private byte computeMaxUsableProduceMagic() {
+        // use a magic version which is supported by all brokers to reduce the chance that
+        // we will need to convert the messages when they are ready to be sent.
+        Optional<Byte> knownBrokerNodesMinRequiredMagicForProduce = this.nodeApiVersions.values().stream()
+            .filter(versions -> versions.apiVersion(ApiKeys.PRODUCE) != null) // filter out Raft controller nodes
+            .map(versions -> ProduceRequest.requiredMagicForVersion(versions.latestUsableVersion(ApiKeys.PRODUCE)))
+            .min(Byte::compare);
+        return (byte) Math.min(RecordBatch.CURRENT_MAGIC_VALUE,
+            knownBrokerNodesMinRequiredMagicForProduce.orElse(RecordBatch.CURRENT_MAGIC_VALUE));
+    }
+
+    public synchronized byte maxUsableProduceMagic() {
+        return maxUsableProduceMagic;
+    }
+
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientDnsLookup.java b/clients/src/main/java/org/apache/kafka/clients/ClientDnsLookup.java
new file mode 100644
index 0000000..e097c7e
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/ClientDnsLookup.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+import java.util.Locale;
+
+public enum ClientDnsLookup {
+    USE_ALL_DNS_IPS("use_all_dns_ips"),
+    RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY("resolve_canonical_bootstrap_servers_only");
+
+    private final String clientDnsLookup;
+
+    ClientDnsLookup(String clientDnsLookup) {
+        this.clientDnsLookup = clientDnsLookup;
+    }
+
+    @Override
+    public String toString() {
+        return clientDnsLookup;
+    }
+
+    public static ClientDnsLookup forConfig(String config) {
+        return ClientDnsLookup.valueOf(config.toUpperCase(Locale.ROOT));
+    }
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientRequest.java b/clients/src/main/java/org/apache/kafka/clients/ClientRequest.java
new file mode 100644
index 0000000..abba795
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/ClientRequest.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+import org.apache.kafka.common.message.RequestHeaderData;
+import org.apache.kafka.common.protocol.ApiKeys;
+import org.apache.kafka.common.requests.AbstractRequest;
+import org.apache.kafka.common.requests.RequestHeader;
+
+/**
+ * A request being sent to the server. This holds both the network send as well as the client-level metadata.
+ */
+public final class ClientRequest {
+
+    private final String destination;
+    private final AbstractRequest.Builder<?> requestBuilder;
+    private final int correlationId;
+    private final String clientId;
+    private final long createdTimeMs;
+    private final boolean expectResponse;
+    private final int requestTimeoutMs;
+    private final RequestCompletionHandler callback;
+
+    /**
+     * @param destination The brokerId to send the request to
+     * @param requestBuilder The builder for the request to make
+     * @param correlationId The correlation id for this client request
+     * @param clientId The client ID to use for the header
+     * @param createdTimeMs The unix timestamp in milliseconds for the time at which this request was created.
+     * @param expectResponse Should we expect a response message or is this request complete once it is sent?
+     * @param callback A callback to execute when the response has been received (or null if no callback is necessary)
+     */
+    public ClientRequest(String destination,
+                         AbstractRequest.Builder<?> requestBuilder,
+                         int correlationId,
+                         String clientId,
+                         long createdTimeMs,
+                         boolean expectResponse,
+                         int requestTimeoutMs,
+                         RequestCompletionHandler callback) {
+        this.destination = destination;
+        this.requestBuilder = requestBuilder;
+        this.correlationId = correlationId;
+        this.clientId = clientId;
+        this.createdTimeMs = createdTimeMs;
+        this.expectResponse = expectResponse;
+        this.requestTimeoutMs = requestTimeoutMs;
+        this.callback = callback;
+    }
+
+    @Override
+    public String toString() {
+        return "ClientRequest(expectResponse=" + expectResponse +
+            ", callback=" + callback +
+            ", destination=" + destination +
+            ", correlationId=" + correlationId +
+            ", clientId=" + clientId +
+            ", createdTimeMs=" + createdTimeMs +
+            ", requestBuilder=" + requestBuilder +
+            ")";
+    }
+
+    public boolean expectResponse() {
+        return expectResponse;
+    }
+
+    public ApiKeys apiKey() {
+        return requestBuilder.apiKey();
+    }
+
+    public RequestHeader makeHeader(short version) {
+        ApiKeys requestApiKey = apiKey();
+        return new RequestHeader(
+            new RequestHeaderData()
+                .setRequestApiKey(requestApiKey.id)
+                .setRequestApiVersion(version)
+                .setClientId(clientId)
+                .setCorrelationId(correlationId),
+            requestApiKey.requestHeaderVersion(version));
+    }
+
+    public AbstractRequest.Builder<?> requestBuilder() {
+        return requestBuilder;
+    }
+
+    public String destination() {
+        return destination;
+    }
+
+    public RequestCompletionHandler callback() {
+        return callback;
+    }
+
+    public long createdTimeMs() {
+        return createdTimeMs;
+    }
+
+    public int correlationId() {
+        return correlationId;
+    }
+
+    public int requestTimeoutMs() {
+        return requestTimeoutMs;
+    }
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientResponse.java b/clients/src/main/java/org/apache/kafka/clients/ClientResponse.java
new file mode 100644
index 0000000..2135dfa
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/ClientResponse.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+import org.apache.kafka.common.errors.AuthenticationException;
+import org.apache.kafka.common.errors.UnsupportedVersionException;
+import org.apache.kafka.common.requests.AbstractResponse;
+import org.apache.kafka.common.requests.RequestHeader;
+
+/**
+ * A response from the server. Contains both the body of the response as well as the correlated request
+ * metadata that was originally sent.
+ */
+public class ClientResponse {
+
+    private final RequestHeader requestHeader;
+    private final RequestCompletionHandler callback;
+    private final String destination;
+    private final long receivedTimeMs;
+    private final long latencyMs;
+    private final boolean disconnected;
+    private final boolean timedOut;
+    private final UnsupportedVersionException versionMismatch;
+    private final AuthenticationException authenticationException;
+    private final AbstractResponse responseBody;
+
+    /**
+     * @param requestHeader The header of the corresponding request
+     * @param callback The callback to be invoked
+     * @param destination The node the corresponding request was sent to
+     * @param createdTimeMs The unix timestamp when the corresponding request was created
+     * @param receivedTimeMs The unix timestamp when this response was received
+     * @param disconnected Whether the client disconnected before fully reading a response
+     * @param versionMismatch Whether there was a version mismatch that prevented sending the request.
+     * @param responseBody The response contents (or null) if we disconnected, no response was expected,
+     *                     or if there was a version mismatch.
+     */
+    public ClientResponse(RequestHeader requestHeader,
+                          RequestCompletionHandler callback,
+                          String destination,
+                          long createdTimeMs,
+                          long receivedTimeMs,
+                          boolean disconnected,
+                          UnsupportedVersionException versionMismatch,
+                          AuthenticationException authenticationException,
+                          AbstractResponse responseBody) {
+        this(requestHeader,
+             callback,
+             destination,
+             createdTimeMs,
+             receivedTimeMs,
+             disconnected,
+             false,
+             versionMismatch,
+             authenticationException,
+             responseBody);
+    }
+
+    /**
+     * @param requestHeader The header of the corresponding request
+     * @param callback The callback to be invoked
+     * @param destination The node the corresponding request was sent to
+     * @param createdTimeMs The unix timestamp when the corresponding request was created
+     * @param receivedTimeMs The unix timestamp when this response was received
+     * @param disconnected Whether the client disconnected before fully reading a response
+     * @param timedOut Whether the client was disconnected because of a timeout; when setting this
+     *                 to <code>true</code>, <code>disconnected</code> must be <code>true</code>
+     *                 or an {@link IllegalStateException} will be thrown
+     * @param versionMismatch Whether there was a version mismatch that prevented sending the request.
+     * @param responseBody The response contents (or null) if we disconnected, no response was expected,
+     *                     or if there was a version mismatch.
+     */
+    public ClientResponse(RequestHeader requestHeader,
+                          RequestCompletionHandler callback,
+                          String destination,
+                          long createdTimeMs,
+                          long receivedTimeMs,
+                          boolean disconnected,
+                          boolean timedOut,
+                          UnsupportedVersionException versionMismatch,
+                          AuthenticationException authenticationException,
+                          AbstractResponse responseBody) {
+        if (!disconnected && timedOut)
+            throw new IllegalStateException("The client response can't be in the state of connected, yet timed out");
+
+        this.requestHeader = requestHeader;
+        this.callback = callback;
+        this.destination = destination;
+        this.receivedTimeMs = receivedTimeMs;
+        this.latencyMs = receivedTimeMs - createdTimeMs;
+        this.disconnected = disconnected;
+        this.timedOut = timedOut;
+        this.versionMismatch = versionMismatch;
+        this.authenticationException = authenticationException;
+        this.responseBody = responseBody;
+    }
+
+    public long receivedTimeMs() {
+        return receivedTimeMs;
+    }
+
+    public boolean wasDisconnected() {
+        return disconnected;
+    }
+
+    public boolean wasTimedOut() {
+        return timedOut;
+    }
+
+    public UnsupportedVersionException versionMismatch() {
+        return versionMismatch;
+    }
+
+    public AuthenticationException authenticationException() {
+        return authenticationException;
+    }
+
+    public RequestHeader requestHeader() {
+        return requestHeader;
+    }
+
+    public String destination() {
+        return destination;
+    }
+
+    public AbstractResponse responseBody() {
+        return responseBody;
+    }
+
+    public boolean hasResponse() {
+        return responseBody != null;
+    }
+
+    public long requestLatencyMs() {
+        return latencyMs;
+    }
+
+    public void onComplete() {
+        if (callback != null)
+            callback.onComplete(this);
+    }
+
+    @Override
+    public String toString() {
+        return "ClientResponse(receivedTimeMs=" + receivedTimeMs +
+               ", latencyMs=" +
+               latencyMs +
+               ", disconnected=" +
+               disconnected +
+               ", timedOut=" +
+               timedOut +
+               ", requestHeader=" +
+               requestHeader +
+               ", responseBody=" +
+               responseBody +
+               ")";
+    }
+
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java
new file mode 100644
index 0000000..83230c0
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigException;
+import org.apache.kafka.common.config.SaslConfigs;
+import org.apache.kafka.common.metrics.Metrics;
+import org.apache.kafka.common.metrics.Sensor;
+import org.apache.kafka.common.network.ChannelBuilder;
+import org.apache.kafka.common.network.ChannelBuilders;
+import org.apache.kafka.common.network.Selector;
+import org.apache.kafka.common.security.JaasContext;
+import org.apache.kafka.common.security.auth.SecurityProtocol;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.common.utils.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import static org.apache.kafka.common.utils.Utils.closeQuietly;
+import static org.apache.kafka.common.utils.Utils.getHost;
+import static org.apache.kafka.common.utils.Utils.getPort;
+
+public final class ClientUtils {
+    private static final Logger log = LoggerFactory.getLogger(ClientUtils.class);
+
+    private ClientUtils() {
+    }
+
+    public static List<InetSocketAddress> parseAndValidateAddresses(AbstractConfig config) {
+        List<String> urls = config.getList(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG);
+        String clientDnsLookupConfig = config.getString(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG);
+        return parseAndValidateAddresses(urls, clientDnsLookupConfig);
+    }
+
+    public static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls, String clientDnsLookupConfig) {
+        return parseAndValidateAddresses(urls, ClientDnsLookup.forConfig(clientDnsLookupConfig));
+    }
+
+    public static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls, ClientDnsLookup clientDnsLookup) {
+        List<InetSocketAddress> addresses = new ArrayList<>();
+        for (String url : urls) {
+            if (url != null && !url.isEmpty()) {
+                try {
+                    String host = getHost(url);
+                    Integer port = getPort(url);
+                    if (host == null || port == null)
+                        throw new ConfigException("Invalid url in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url);
+
+                    if (clientDnsLookup == ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY) {
+                        InetAddress[] inetAddresses = InetAddress.getAllByName(host);
+                        for (InetAddress inetAddress : inetAddresses) {
+                            String resolvedCanonicalName = inetAddress.getCanonicalHostName();
+                            InetSocketAddress address = new InetSocketAddress(resolvedCanonicalName, port);
+                            if (address.isUnresolved()) {
+                                log.warn("Couldn't resolve server {} from {} as DNS resolution of the canonical hostname {} failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, resolvedCanonicalName, host);
+                            } else {
+                                addresses.add(address);
+                            }
+                        }
+                    } else {
+                        InetSocketAddress address = new InetSocketAddress(host, port);
+                        if (address.isUnresolved()) {
+                            log.warn("Couldn't resolve server {} from {} as DNS resolution failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, host);
+                        } else {
+                            addresses.add(address);
+                        }
+                    }
+
+                } catch (IllegalArgumentException e) {
+                    throw new ConfigException("Invalid port in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url);
+                } catch (UnknownHostException e) {
+                    throw new ConfigException("Unknown host in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url);
+                }
+            }
+        }
+        if (addresses.isEmpty())
+            throw new ConfigException("No resolvable bootstrap urls given in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG);
+        return addresses;
+    }
+
+    /**
+     * Create a new channel builder from the provided configuration.
+     *
+     * @param config client configs
+     * @param time the time implementation
+     * @param logContext the logging context
+     *
+     * @return configured ChannelBuilder based on the configs.
+     */
+    public static ChannelBuilder createChannelBuilder(AbstractConfig config, Time time, LogContext logContext) {
+        SecurityProtocol securityProtocol = SecurityProtocol.forName(config.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG));
+        String clientSaslMechanism = config.getString(SaslConfigs.SASL_MECHANISM);
+        return ChannelBuilders.clientChannelBuilder(securityProtocol, JaasContext.Type.CLIENT, config, null,
+                clientSaslMechanism, time, true, logContext);
+    }
+
+    static List<InetAddress> resolve(String host, HostResolver hostResolver) throws UnknownHostException {
+        InetAddress[] addresses = hostResolver.resolve(host);
+        List<InetAddress> result = filterPreferredAddresses(addresses);
+        if (log.isDebugEnabled())
+            log.debug("Resolved host {} as {}", host, result.stream().map(i -> i.getHostAddress()).collect(Collectors.joining(",")));
+        return result;
+    }
+
+    /**
+     * Return a list containing the first address in `allAddresses` and subsequent addresses
+     * that are a subtype of the first address.
+     *
+     * The outcome is that all returned addresses are either IPv4 or IPv6 (InetAddress has two
+     * subclasses: Inet4Address and Inet6Address).
+     */
+    static List<InetAddress> filterPreferredAddresses(InetAddress[] allAddresses) {
+        List<InetAddress> preferredAddresses = new ArrayList<>();
+        Class<? extends InetAddress> clazz = null;
+        for (InetAddress address : allAddresses) {
+            if (clazz == null) {
+                clazz = address.getClass();
+            }
+            if (clazz.isInstance(address)) {
+                preferredAddresses.add(address);
+            }
+        }
+        return preferredAddresses;
+    }
+
+    public static NetworkClient createNetworkClient(AbstractConfig config,
+                                                    Metrics metrics,
+                                                    String metricsGroupPrefix,
+                                                    LogContext logContext,
+                                                    ApiVersions apiVersions,
+                                                    Time time,
+                                                    int maxInFlightRequestsPerConnection,
+                                                    Metadata metadata,
+                                                    Sensor throttleTimeSensor) {
+        return createNetworkClient(config,
+                config.getString(CommonClientConfigs.CLIENT_ID_CONFIG),
+                metrics,
+                metricsGroupPrefix,
+                logContext,
+                apiVersions,
+                time,
+                maxInFlightRequestsPerConnection,
+                config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
+                metadata,
+                null,
+                new DefaultHostResolver(),
+                throttleTimeSensor);
+    }
+
+    public static NetworkClient createNetworkClient(AbstractConfig config,
+                                                    String clientId,
+                                                    Metrics metrics,
+                                                    String metricsGroupPrefix,
+                                                    LogContext logContext,
+                                                    ApiVersions apiVersions,
+                                                    Time time,
+                                                    int maxInFlightRequestsPerConnection,
+                                                    int requestTimeoutMs,
+                                                    MetadataUpdater metadataUpdater,
+                                                    HostResolver hostResolver) {
+        return createNetworkClient(config,
+                clientId,
+                metrics,
+                metricsGroupPrefix,
+                logContext,
+                apiVersions,
+                time,
+                maxInFlightRequestsPerConnection,
+                requestTimeoutMs,
+                null,
+                metadataUpdater,
+                hostResolver,
+                null);
+    }
+
+    public static NetworkClient createNetworkClient(AbstractConfig config,
+                                                    String clientId,
+                                                    Metrics metrics,
+                                                    String metricsGroupPrefix,
+                                                    LogContext logContext,
+                                                    ApiVersions apiVersions,
+                                                    Time time,
+                                                    int maxInFlightRequestsPerConnection,
+                                                    int requestTimeoutMs,
+                                                    Metadata metadata,
+                                                    MetadataUpdater metadataUpdater,
+                                                    HostResolver hostResolver,
+                                                    Sensor throttleTimeSensor) {
+        ChannelBuilder channelBuilder = null;
+        Selector selector = null;
+
+        try {
+            channelBuilder = ClientUtils.createChannelBuilder(config, time, logContext);
+            selector = new Selector(config.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG),
+                    metrics,
+                    time,
+                    metricsGroupPrefix,
+                    channelBuilder,
+                    logContext);
+            return new NetworkClient(metadataUpdater,
+                    metadata,
+                    selector,
+                    clientId,
+                    maxInFlightRequestsPerConnection,
+                    config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG),
+                    config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG),
+                    config.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG),
+                    config.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG),
+                    requestTimeoutMs,
+                    config.getLong(CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG),
+                    config.getLong(CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG),
+                    time,
+                    true,
+                    apiVersions,
+                    throttleTimeSensor,
+                    logContext,
+                    hostResolver);
+        } catch (Throwable t) {
+            closeQuietly(selector, "Selector");
+            closeQuietly(channelBuilder, "ChannelBuilder");
+            throw new KafkaException("Failed to create new NetworkClient", t);
+        }
+    }
+
+    public static <T> List createConfiguredInterceptors(AbstractConfig config,
+                                                        String interceptorClassesConfigName,
+                                                        Class<T> clazz) {
+        String clientId = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG);
+        return config.getConfiguredInstances(
+                interceptorClassesConfigName,
+                clazz,
+                Collections.singletonMap(CommonClientConfigs.CLIENT_ID_CONFIG, clientId));
+    }
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java b/clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java
new file mode 100644
index 0000000..f4d9092
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java
@@ -0,0 +1,541 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import java.util.stream.Collectors;
+import org.apache.kafka.common.errors.AuthenticationException;
+import org.apache.kafka.common.utils.ExponentialBackoff;
+import org.apache.kafka.common.utils.LogContext;
+import org.slf4j.Logger;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * The state of our connection to each node in the cluster.
+ *
+ */
+final class ClusterConnectionStates {
+    final static int RECONNECT_BACKOFF_EXP_BASE = 2;
+    final static double RECONNECT_BACKOFF_JITTER = 0.2;
+    final static int CONNECTION_SETUP_TIMEOUT_EXP_BASE = 2;
+    final static double CONNECTION_SETUP_TIMEOUT_JITTER = 0.2;
+    private final Map<String, NodeConnectionState> nodeState;
+    private final Logger log;
+    private final HostResolver hostResolver;
+    private Set<String> connectingNodes;
+    private ExponentialBackoff reconnectBackoff;
+    private ExponentialBackoff connectionSetupTimeout;
+
+    public ClusterConnectionStates(long reconnectBackoffMs, long reconnectBackoffMaxMs,
+                                   long connectionSetupTimeoutMs, long connectionSetupTimeoutMaxMs,
+                                   LogContext logContext, HostResolver hostResolver) {
+        this.log = logContext.logger(ClusterConnectionStates.class);
+        this.reconnectBackoff = new ExponentialBackoff(
+                reconnectBackoffMs,
+                RECONNECT_BACKOFF_EXP_BASE,
+                reconnectBackoffMaxMs,
+                RECONNECT_BACKOFF_JITTER);
+        this.connectionSetupTimeout = new ExponentialBackoff(
+                connectionSetupTimeoutMs,
+                CONNECTION_SETUP_TIMEOUT_EXP_BASE,
+                connectionSetupTimeoutMaxMs,
+                CONNECTION_SETUP_TIMEOUT_JITTER);
+        this.nodeState = new HashMap<>();
+        this.connectingNodes = new HashSet<>();
+        this.hostResolver = hostResolver;
+    }
+
+    /**
+     * Return true iff we can currently initiate a new connection. This will be the case if we are not
+     * connected and haven't been connected for at least the minimum reconnection backoff period.
+     * @param id the connection id to check
+     * @param now the current time in ms
+     * @return true if we can initiate a new connection
+     */
+    public boolean canConnect(String id, long now) {
+        NodeConnectionState state = nodeState.get(id);
+        if (state == null)
+            return true;
+        else
+            return state.state.isDisconnected() &&
+                   now - state.lastConnectAttemptMs >= state.reconnectBackoffMs;
+    }
+
+    /**
+     * Return true if we are disconnected from the given node and can't re-establish a connection yet.
+     * @param id the connection to check
+     * @param now the current time in ms
+     */
+    public boolean isBlackedOut(String id, long now) {
+        NodeConnectionState state = nodeState.get(id);
+        return state != null
+                && state.state.isDisconnected()
+                && now - state.lastConnectAttemptMs < state.reconnectBackoffMs;
+    }
+
+    /**
+     * Returns the number of milliseconds to wait, based on the connection state, before attempting to send data. When
+     * disconnected, this respects the reconnect backoff time. When connecting, return a delay based on the connection timeout.
+     * When connected, wait indefinitely (i.e. until a wakeup).
+     * @param id the connection to check
+     * @param now the current time in ms
+     */
+    public long connectionDelay(String id, long now) {
+        NodeConnectionState state = nodeState.get(id);
+        if (state == null) return 0;
+
+        if (state.state == ConnectionState.CONNECTING) {
+            return connectionSetupTimeoutMs(id);
+        } else if (state.state.isDisconnected()) {
+            long timeWaited = now - state.lastConnectAttemptMs;
+            return Math.max(state.reconnectBackoffMs - timeWaited, 0);
+        } else {
+            // When connected, we should be able to delay indefinitely since other events (connection or
+            // data acked) will cause a wakeup once data can be sent.
+            return Long.MAX_VALUE;
+        }
+    }
+
+    /**
+     * Return true if a specific connection establishment is currently underway
+     * @param id The id of the node to check
+     */
+    public boolean isConnecting(String id) {
+        NodeConnectionState state = nodeState.get(id);
+        return state != null && state.state == ConnectionState.CONNECTING;
+    }
+
+    /**
+     * Check whether a connection is either being established or awaiting API version information.
+     * @param id The id of the node to check
+     * @return true if the node is either connecting or has connected and is awaiting API versions, false otherwise
+     */
+    public boolean isPreparingConnection(String id) {
+        NodeConnectionState state = nodeState.get(id);
+        return state != null &&
+                (state.state == ConnectionState.CONNECTING || state.state == ConnectionState.CHECKING_API_VERSIONS);
+    }
+
+    /**
+     * Enter the connecting state for the given connection, moving to a new resolved address if necessary.
+     * @param id the id of the connection
+     * @param now the current time in ms
+     * @param host the host of the connection, to be resolved internally if needed
+     */
+    public void connecting(String id, long now, String host) {
+        NodeConnectionState connectionState = nodeState.get(id);
+        if (connectionState != null && connectionState.host().equals(host)) {
+            connectionState.lastConnectAttemptMs = now;
+            connectionState.state = ConnectionState.CONNECTING;
+            // Move to next resolved address, or if addresses are exhausted, mark node to be re-resolved
+            connectionState.moveToNextAddress();
+            connectingNodes.add(id);
+            return;
+        } else if (connectionState != null) {
+            log.info("Hostname for node {} changed from {} to {}.", id, connectionState.host(), host);
+        }
+
+        // Create a new NodeConnectionState if nodeState does not already contain one
+        // for the specified id or if the hostname associated with the node id changed.
+        nodeState.put(id, new NodeConnectionState(ConnectionState.CONNECTING, now,
+                reconnectBackoff.backoff(0), connectionSetupTimeout.backoff(0), host, hostResolver));
+        connectingNodes.add(id);
+    }
+
+    /**
+     * Returns a resolved address for the given connection, resolving it if necessary.
+     * @param id the id of the connection
+     * @throws UnknownHostException if the address was not resolvable
+     */
+    public InetAddress currentAddress(String id) throws UnknownHostException {
+        return nodeState(id).currentAddress();
+    }
+
+    /**
+     * Enter the disconnected state for the given node.
+     * @param id the connection we have disconnected
+     * @param now the current time in ms
+     */
+    public void disconnected(String id, long now) {
+        NodeConnectionState nodeState = nodeState(id);
+        nodeState.lastConnectAttemptMs = now;
+        updateReconnectBackoff(nodeState);
+        if (nodeState.state == ConnectionState.CONNECTING) {
+            updateConnectionSetupTimeout(nodeState);
+            connectingNodes.remove(id);
+        } else {
+            resetConnectionSetupTimeout(nodeState);
+            if (nodeState.state.isConnected()) {
+                // If a connection had previously been established, clear the addresses to trigger a new DNS resolution
+                // because the node IPs may have changed
+                nodeState.clearAddresses();
+            }
+        }
+        nodeState.state = ConnectionState.DISCONNECTED;
+    }
+
+    /**
+     * Indicate that the connection is throttled until the specified deadline.
+     * @param id the connection to be throttled
+     * @param throttleUntilTimeMs the throttle deadline in milliseconds
+     */
+    public void throttle(String id, long throttleUntilTimeMs) {
+        NodeConnectionState state = nodeState.get(id);
+        // The throttle deadline should never regress.
+        if (state != null && state.throttleUntilTimeMs < throttleUntilTimeMs) {
+            state.throttleUntilTimeMs = throttleUntilTimeMs;
+        }
+    }
+
+    /**
+     * Return the remaining throttling delay in milliseconds if throttling is in progress. Return 0, otherwise.
+     * @param id the connection to check
+     * @param now the current time in ms
+     */
+    public long throttleDelayMs(String id, long now) {
+        NodeConnectionState state = nodeState.get(id);
+        if (state != null && state.throttleUntilTimeMs > now) {
+            return state.throttleUntilTimeMs - now;
+        } else {
+            return 0;
+        }
+    }
+
+    /**
+     * Return the number of milliseconds to wait, based on the connection state and the throttle time, before
+     * attempting to send data. If the connection has been established but being throttled, return throttle delay.
+     * Otherwise, return connection delay.
+     * @param id the connection to check
+     * @param now the current time in ms
+     */
+    public long pollDelayMs(String id, long now) {
+        long throttleDelayMs = throttleDelayMs(id, now);
+        if (isConnected(id) && throttleDelayMs > 0) {
+            return throttleDelayMs;
+        } else {
+            return connectionDelay(id, now);
+        }
+    }
+
+    /**
+     * Enter the checking_api_versions state for the given node.
+     * @param id the connection identifier
+     */
+    public void checkingApiVersions(String id) {
+        NodeConnectionState nodeState = nodeState(id);
+        nodeState.state = ConnectionState.CHECKING_API_VERSIONS;
+        resetConnectionSetupTimeout(nodeState);
+        connectingNodes.remove(id);
+    }
+
+    /**
+     * Enter the ready state for the given node.
+     * @param id the connection identifier
+     */
+    public void ready(String id) {
+        NodeConnectionState nodeState = nodeState(id);
+        nodeState.state = ConnectionState.READY;
+        nodeState.authenticationException = null;
+        resetReconnectBackoff(nodeState);
+        resetConnectionSetupTimeout(nodeState);
+        connectingNodes.remove(id);
+    }
+
+    /**
+     * Enter the authentication failed state for the given node.
+     * @param id the connection identifier
+     * @param now the current time in ms
+     * @param exception the authentication exception
+     */
+    public void authenticationFailed(String id, long now, AuthenticationException exception) {
+        NodeConnectionState nodeState = nodeState(id);
+        nodeState.authenticationException = exception;
+        nodeState.state = ConnectionState.AUTHENTICATION_FAILED;
+        nodeState.lastConnectAttemptMs = now;
+        updateReconnectBackoff(nodeState);
+    }
+
+    /**
+     * Return true if the connection is in the READY state and currently not throttled.
+     *
+     * @param id the connection identifier
+     * @param now the current time in ms
+     */
+    public boolean isReady(String id, long now) {
+        return isReady(nodeState.get(id), now);
+    }
+
+    private boolean isReady(NodeConnectionState state, long now) {
+        return state != null && state.state == ConnectionState.READY && state.throttleUntilTimeMs <= now;
+    }
+
+    /**
+     * Return true if there is at least one node with connection in the READY state and not throttled. Returns false
+     * otherwise.
+     *
+     * @param now the current time in ms
+     */
+    public boolean hasReadyNodes(long now) {
+        for (Map.Entry<String, NodeConnectionState> entry : nodeState.entrySet()) {
+            if (isReady(entry.getValue(), now)) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Return true if the connection has been established
+     * @param id The id of the node to check
+     */
+    public boolean isConnected(String id) {
+        NodeConnectionState state = nodeState.get(id);
+        return state != null && state.state.isConnected();
+    }
+
+    /**
+     * Return true if the connection has been disconnected
+     * @param id The id of the node to check
+     */
+    public boolean isDisconnected(String id) {
+        NodeConnectionState state = nodeState.get(id);
+        return state != null && state.state.isDisconnected();
+    }
+
+    /**
+     * Return authentication exception if an authentication error occurred
+     * @param id The id of the node to check
+     */
+    public AuthenticationException authenticationException(String id) {
+        NodeConnectionState state = nodeState.get(id);
+        return state != null ? state.authenticationException : null;
+    }
+
+    /**
+     * Resets the failure count for a node and sets the reconnect backoff to the base
+     * value configured via reconnect.backoff.ms
+     *
+     * @param nodeState The node state object to update
+     */
+    private void resetReconnectBackoff(NodeConnectionState nodeState) {
+        nodeState.failedAttempts = 0;
+        nodeState.reconnectBackoffMs = reconnectBackoff.backoff(0);
+    }
+
+    /**
+     * Resets the failure count for a node and sets the connection setup timeout to the base
+     * value configured via socket.connection.setup.timeout.ms
+     *
+     * @param nodeState The node state object to update
+     */
+    private void resetConnectionSetupTimeout(NodeConnectionState nodeState) {
+        nodeState.failedConnectAttempts = 0;
+        nodeState.connectionSetupTimeoutMs = connectionSetupTimeout.backoff(0);
+    }
+
+    /**
+     * Increment the failure counter, update the node reconnect backoff exponentially,
+     * and record the current timestamp.
+     * The delay is reconnect.backoff.ms * 2**(failures - 1) * (+/- 20% random jitter)
+     * Up to a (pre-jitter) maximum of reconnect.backoff.max.ms
+     *
+     * @param nodeState The node state object to update
+     */
+    private void updateReconnectBackoff(NodeConnectionState nodeState) {
+        nodeState.reconnectBackoffMs = reconnectBackoff.backoff(nodeState.failedAttempts);
+        nodeState.failedAttempts++;
+    }
+
+    /**
+     * Increment the failure counter and update the node connection setup timeout exponentially.
+     * The delay is socket.connection.setup.timeout.ms * 2**(failures) * (+/- 20% random jitter)
+     * Up to a (pre-jitter) maximum of reconnect.backoff.max.ms
+     *
+     * @param nodeState The node state object to update
+     */
+    private void updateConnectionSetupTimeout(NodeConnectionState nodeState) {
+        nodeState.failedConnectAttempts++;
+        nodeState.connectionSetupTimeoutMs = connectionSetupTimeout.backoff(nodeState.failedConnectAttempts);
+    }
+
+    /**
+     * Remove the given node from the tracked connection states. The main difference between this and `disconnected`
+     * is the impact on `connectionDelay`: it will be 0 after this call whereas `reconnectBackoffMs` will be taken
+     * into account after `disconnected` is called.
+     *
+     * @param id the connection to remove
+     */
+    public void remove(String id) {
+        nodeState.remove(id);
+        connectingNodes.remove(id);
+    }
+
+    /**
+     * Get the state of a given connection.
+     * @param id the id of the connection
+     * @return the state of our connection
+     */
+    public ConnectionState connectionState(String id) {
+        return nodeState(id).state;
+    }
+
+    /**
+     * Get the state of a given node.
+     * @param id the connection to fetch the state for
+     */
+    private NodeConnectionState nodeState(String id) {
+        NodeConnectionState state = this.nodeState.get(id);
+        if (state == null)
+            throw new IllegalStateException("No entry found for connection " + id);
+        return state;
+    }
+
+    /**
+     * Get the id set of nodes which are in CONNECTING state
+     */
+    // package private for testing only
+    Set<String> connectingNodes() {
+        return this.connectingNodes;
+    }
+
+    /**
+     * Get the timestamp of the latest connection attempt of a given node
+     * @param id the connection to fetch the state for
+     */
+    public long lastConnectAttemptMs(String id) {
+        NodeConnectionState nodeState = this.nodeState.get(id);
+        return nodeState == null ? 0 : nodeState.lastConnectAttemptMs;
+    }
+
+    /**
+     * Get the current socket connection setup timeout of the given node.
+     * The base value is defined via socket.connection.setup.timeout.
+     * @param id the connection to fetch the state for
+     */
+    public long connectionSetupTimeoutMs(String id) {
+        NodeConnectionState nodeState = this.nodeState(id);
+        return nodeState.connectionSetupTimeoutMs;
+    }
+
+    /**
+     * Test if the connection to the given node has reached its timeout
+     * @param id the connection to fetch the state for
+     * @param now the current time in ms
+     */
+    public boolean isConnectionSetupTimeout(String id, long now) {
+        NodeConnectionState nodeState = this.nodeState(id);
+        if (nodeState.state != ConnectionState.CONNECTING)
+            throw new IllegalStateException("Node " + id + " is not in connecting state");
+        return now - lastConnectAttemptMs(id) > connectionSetupTimeoutMs(id);
+    }
+
+    /**
+     * Return the List of nodes whose connection setup has timed out.
+     * @param now the current time in ms
+     */
+    public List<String> nodesWithConnectionSetupTimeout(long now) {
+        return connectingNodes.stream()
+            .filter(id -> isConnectionSetupTimeout(id, now))
+            .collect(Collectors.toList());
+    }
+
+    /**
+     * The state of our connection to a node.
+     */
+    private static class NodeConnectionState {
+
+        ConnectionState state;
+        AuthenticationException authenticationException;
+        long lastConnectAttemptMs;
+        long failedAttempts;
+        long failedConnectAttempts;
+        long reconnectBackoffMs;
+        long connectionSetupTimeoutMs;
+        // Connection is being throttled if current time < throttleUntilTimeMs.
+        long throttleUntilTimeMs;
+        private List<InetAddress> addresses;
+        private int addressIndex;
+        private final String host;
+        private final HostResolver hostResolver;
+
+        private NodeConnectionState(ConnectionState state, long lastConnectAttempt, long reconnectBackoffMs,
+                long connectionSetupTimeoutMs, String host, HostResolver hostResolver) {
+            this.state = state;
+            this.addresses = Collections.emptyList();
+            this.addressIndex = -1;
+            this.authenticationException = null;
+            this.lastConnectAttemptMs = lastConnectAttempt;
+            this.failedAttempts = 0;
+            this.reconnectBackoffMs = reconnectBackoffMs;
+            this.connectionSetupTimeoutMs = connectionSetupTimeoutMs;
+            this.throttleUntilTimeMs = 0;
+            this.host = host;
+            this.hostResolver = hostResolver;
+        }
+
+        public String host() {
+            return host;
+        }
+
+        /**
+         * Fetches the current selected IP address for this node, resolving {@link #host()} if necessary.
+         * @return the selected address
+         * @throws UnknownHostException if resolving {@link #host()} fails
+         */
+        private InetAddress currentAddress() throws UnknownHostException {
+            if (addresses.isEmpty()) {
+                // (Re-)initialize list
+                addresses = ClientUtils.resolve(host, hostResolver);
+                addressIndex = 0;
+            }
+
+            return addresses.get(addressIndex);
+        }
+
+        /**
+         * Jumps to the next available resolved address for this node. If no other addresses are available, marks the
+         * list to be refreshed on the next {@link #currentAddress()} call.
+         */
+        private void moveToNextAddress() {
+            if (addresses.isEmpty())
+                return; // Avoid div0. List will initialize on next currentAddress() call
+
+            addressIndex = (addressIndex + 1) % addresses.size();
+            if (addressIndex == 0)
+                addresses = Collections.emptyList(); // Exhausted list. Re-resolve on next currentAddress() call
+        }
+
+        /**
+         * Clears the resolved addresses in order to trigger re-resolving on the next {@link #currentAddress()} call.
+         */
+        private void clearAddresses() {
+            addresses = Collections.emptyList();
+        }
+
+        public String toString() {
+            return "NodeState(" + state + ", " + lastConnectAttemptMs + ", " + failedAttempts + ", " + throttleUntilTimeMs + ")";
+        }
+    }
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java
new file mode 100644
index 0000000..ee190df
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java
@@ -0,0 +1,248 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigException;
+import org.apache.kafka.common.config.SaslConfigs;
+import org.apache.kafka.common.metrics.JmxReporter;
+import org.apache.kafka.common.metrics.MetricsReporter;
+import org.apache.kafka.common.security.auth.SecurityProtocol;
+import org.apache.kafka.common.utils.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Configurations shared by Kafka client applications: producer, consumer, connect, etc.
+ */
+public class CommonClientConfigs {
+    private static final Logger log = LoggerFactory.getLogger(CommonClientConfigs.class);
+
+    /*
+     * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE.
+     */
+
+    public static final String BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers";
+    public static final String BOOTSTRAP_SERVERS_DOC = "A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping&mdash;this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form "
+                                                       + "<code>host1:port1,host2:port2,...</code>. Since these servers are just used for the initial connection to "
+                                                       + "discover the full cluster membership (which may change dynamically), this list need not contain the full set of "
+                                                       + "servers (you may want more than one, though, in case a server is down).";
+
+    public static final String CLIENT_DNS_LOOKUP_CONFIG = "client.dns.lookup";
+    public static final String CLIENT_DNS_LOOKUP_DOC = "Controls how the client uses DNS lookups. "
+                                                       + "If set to <code>use_all_dns_ips</code>, connect to each returned IP "
+                                                       + "address in sequence until a successful connection is established. "
+                                                       + "After a disconnection, the next IP is used. Once all IPs have been "
+                                                       + "used once, the client resolves the IP(s) from the hostname again "
+                                                       + "(both the JVM and the OS cache DNS name lookups, however). "
+                                                       + "If set to <code>resolve_canonical_bootstrap_servers_only</code>, "
+                                                       + "resolve each bootstrap address into a list of canonical names. After "
+                                                       + "the bootstrap phase, this behaves the same as <code>use_all_dns_ips</code>.";
+
+    public static final String METADATA_MAX_AGE_CONFIG = "metadata.max.age.ms";
+    public static final String METADATA_MAX_AGE_DOC = "The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.";
+
+    public static final String SEND_BUFFER_CONFIG = "send.buffer.bytes";
+    public static final String SEND_BUFFER_DOC = "The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.";
+    public static final int SEND_BUFFER_LOWER_BOUND = -1;
+
+    public static final String RECEIVE_BUFFER_CONFIG = "receive.buffer.bytes";
+    public static final String RECEIVE_BUFFER_DOC = "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.";
+    public static final int RECEIVE_BUFFER_LOWER_BOUND = -1;
+
+    public static final String CLIENT_ID_CONFIG = "client.id";
+    public static final String CLIENT_ID_DOC = "An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.";
+
+    public static final String CLIENT_RACK_CONFIG = "client.rack";
+    public static final String CLIENT_RACK_DOC = "A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config 'broker.rack'";
+    public static final String DEFAULT_CLIENT_RACK = "";
+
+    public static final String RECONNECT_BACKOFF_MS_CONFIG = "reconnect.backoff.ms";
+    public static final String RECONNECT_BACKOFF_MS_DOC = "The base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker.";
+
+    public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG = "reconnect.backoff.max.ms";
+    public static final String RECONNECT_BACKOFF_MAX_MS_DOC = "The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.";
+
+    public static final String RETRIES_CONFIG = "retries";
+    public static final String RETRIES_DOC = "Setting a value greater than zero will cause the client to resend any request that fails with a potentially transient error." +
+        " It is recommended to set the value to either zero or `MAX_VALUE` and use corresponding timeout parameters to control how long a client should retry a request.";
+
+    public static final String RETRY_BACKOFF_MS_CONFIG = "retry.backoff.ms";
+    public static final String RETRY_BACKOFF_MS_DOC = "The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.";
+
+    public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = "metrics.sample.window.ms";
+    public static final String METRICS_SAMPLE_WINDOW_MS_DOC = "The window of time a metrics sample is computed over.";
+
+    public static final String METRICS_NUM_SAMPLES_CONFIG = "metrics.num.samples";
+    public static final String METRICS_NUM_SAMPLES_DOC = "The number of samples maintained to compute metrics.";
+
+    public static final String METRICS_RECORDING_LEVEL_CONFIG = "metrics.recording.level";
+    public static final String METRICS_RECORDING_LEVEL_DOC = "The highest recording level for metrics.";
+
+    public static final String METRIC_REPORTER_CLASSES_CONFIG = "metric.reporters";
+    public static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. Implementing the <code>org.apache.kafka.common.metrics.MetricsReporter</code> interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.";
+
+    public static final String METRICS_CONTEXT_PREFIX = "metrics.context.";
+
+    @Deprecated
+    public static final String AUTO_INCLUDE_JMX_REPORTER_CONFIG = "auto.include.jmx.reporter";
+    public static final String AUTO_INCLUDE_JMX_REPORTER_DOC = "Deprecated. Whether to automatically include JmxReporter even if it's not listed in <code>metric.reporters</code>. This configuration will be removed in Kafka 4.0, users should instead include <code>org.apache.kafka.common.metrics.JmxReporter</code> in <code>metric.reporters</code> in order to enable the JmxReporter.";
+
+    public static final String SECURITY_PROTOCOL_CONFIG = "security.protocol";
+    public static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Valid values are: " +
+        Utils.join(SecurityProtocol.names(), ", ") + ".";
+    public static final String DEFAULT_SECURITY_PROTOCOL = "PLAINTEXT";
+
+    public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG = "socket.connection.setup.timeout.ms";
+    public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_DOC = "The amount of time the client will wait for the socket connection to be established. If the connection is not built before the timeout elapses, clients will close the socket channel.";
+    public static final Long DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MS = 10 * 1000L;
+
+    public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG = "socket.connection.setup.timeout.max.ms";
+    public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_DOC = "The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. To avoid connection storms, a randomization factor of 0.2 will be applied to the timeout resulting in a random range between 20% below and 20% above the computed value.";
+    public static final Long DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS = 30 * 1000L;
+
+    public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = "connections.max.idle.ms";
+    public static final String CONNECTIONS_MAX_IDLE_MS_DOC = "Close idle connections after the number of milliseconds specified by this config.";
+
+    public static final String REQUEST_TIMEOUT_MS_CONFIG = "request.timeout.ms";
+    public static final String REQUEST_TIMEOUT_MS_DOC = "The configuration controls the maximum amount of time the client will wait "
+                                                         + "for the response of a request. If the response is not received before the timeout "
+                                                         + "elapses the client will resend the request if necessary or fail the request if "
+                                                         + "retries are exhausted.";
+
+    public static final String DEFAULT_LIST_KEY_SERDE_INNER_CLASS = "default.list.key.serde.inner";
+    public static final String DEFAULT_LIST_KEY_SERDE_INNER_CLASS_DOC = "Default inner class of list serde for key that implements the <code>org.apache.kafka.common.serialization.Serde</code> interface. "
+            + "This configuration will be read if and only if <code>default.key.serde</code> configuration is set to <code>org.apache.kafka.common.serialization.Serdes.ListSerde</code>";
+
+    public static final String DEFAULT_LIST_VALUE_SERDE_INNER_CLASS = "default.list.value.serde.inner";
+    public static final String DEFAULT_LIST_VALUE_SERDE_INNER_CLASS_DOC = "Default inner class of list serde for value that implements the <code>org.apache.kafka.common.serialization.Serde</code> interface. "
+            + "This configuration will be read if and only if <code>default.value.serde</code> configuration is set to <code>org.apache.kafka.common.serialization.Serdes.ListSerde</code>";
+
+    public static final String DEFAULT_LIST_KEY_SERDE_TYPE_CLASS = "default.list.key.serde.type";
+    public static final String DEFAULT_LIST_KEY_SERDE_TYPE_CLASS_DOC = "Default class for key that implements the <code>java.util.List</code> interface. "
+            + "This configuration will be read if and only if <code>default.key.serde</code> configuration is set to <code>org.apache.kafka.common.serialization.Serdes.ListSerde</code> "
+            + "Note when list serde class is used, one needs to set the inner serde class that implements the <code>org.apache.kafka.common.serialization.Serde</code> interface via '"
+            + DEFAULT_LIST_KEY_SERDE_INNER_CLASS + "'";
+
+    public static final String DEFAULT_LIST_VALUE_SERDE_TYPE_CLASS = "default.list.value.serde.type";
+    public static final String DEFAULT_LIST_VALUE_SERDE_TYPE_CLASS_DOC = "Default class for value that implements the <code>java.util.List</code> interface. "
+            + "This configuration will be read if and only if <code>default.value.serde</code> configuration is set to <code>org.apache.kafka.common.serialization.Serdes.ListSerde</code> "
+            + "Note when list serde class is used, one needs to set the inner serde class that implements the <code>org.apache.kafka.common.serialization.Serde</code> interface via '"
+            + DEFAULT_LIST_VALUE_SERDE_INNER_CLASS + "'";
+
+    public static final String GROUP_ID_CONFIG = "group.id";
+    public static final String GROUP_ID_DOC = "A unique string that identifies the consumer group this consumer belongs to. This property is required if the consumer uses either the group management functionality by using <code>subscribe(topic)</code> or the Kafka-based offset management strategy.";
+
+    public static final String GROUP_INSTANCE_ID_CONFIG = "group.instance.id";
+    public static final String GROUP_INSTANCE_ID_DOC = "A unique identifier of the consumer instance provided by the end user. "
+                                                       + "Only non-empty strings are permitted. If set, the consumer is treated as a static member, "
+                                                       + "which means that only one instance with this ID is allowed in the consumer group at any time. "
+                                                       + "This can be used in combination with a larger session timeout to avoid group rebalances caused by transient unavailability "
+                                                       + "(e.g. process restarts). If not set, the consumer will join the group as a dynamic member, which is the traditional behavior.";
+
+    public static final String MAX_POLL_INTERVAL_MS_CONFIG = "max.poll.interval.ms";
+    public static final String MAX_POLL_INTERVAL_MS_DOC = "The maximum delay between invocations of poll() when using "
+                                                          + "consumer group management. This places an upper bound on the amount of time that the consumer can be idle "
+                                                          + "before fetching more records. If poll() is not called before expiration of this timeout, then the consumer "
+                                                          + "is considered failed and the group will rebalance in order to reassign the partitions to another member. "
+                                                          + "For consumers using a non-null <code>group.instance.id</code> which reach this timeout, partitions will not be immediately reassigned. "
+                                                          + "Instead, the consumer will stop sending heartbeats and partitions will be reassigned "
+                                                          + "after expiration of <code>session.timeout.ms</code>. This mirrors the behavior of a static consumer which has shutdown.";
+
+    public static final String REBALANCE_TIMEOUT_MS_CONFIG = "rebalance.timeout.ms";
+    public static final String REBALANCE_TIMEOUT_MS_DOC = "The maximum allowed time for each worker to join the group "
+                                                          + "once a rebalance has begun. This is basically a limit on the amount of time needed for all tasks to "
+                                                          + "flush any pending data and commit offsets. If the timeout is exceeded, then the worker will be removed "
+                                                          + "from the group, which will cause offset commit failures.";
+
+    public static final String SESSION_TIMEOUT_MS_CONFIG = "session.timeout.ms";
+    public static final String SESSION_TIMEOUT_MS_DOC = "The timeout used to detect client failures when using "
+                                                        + "Kafka's group management facility. The client sends periodic heartbeats to indicate its liveness "
+                                                        + "to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, "
+                                                        + "then the broker will remove this client from the group and initiate a rebalance. Note that the value "
+                                                        + "must be in the allowable range as configured in the broker configuration by <code>group.min.session.timeout.ms</code> "
+                                                        + "and <code>group.max.session.timeout.ms</code>.";
+
+    public static final String HEARTBEAT_INTERVAL_MS_CONFIG = "heartbeat.interval.ms";
+    public static final String HEARTBEAT_INTERVAL_MS_DOC = "The expected time between heartbeats to the consumer "
+                                                           + "coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the "
+                                                           + "consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. "
+                                                           + "The value must be set lower than <code>session.timeout.ms</code>, but typically should be set no higher "
+                                                           + "than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.";
+
+    public static final String DEFAULT_API_TIMEOUT_MS_CONFIG = "default.api.timeout.ms";
+    public static final String DEFAULT_API_TIMEOUT_MS_DOC = "Specifies the timeout (in milliseconds) for client APIs. " +
+            "This configuration is used as the default timeout for all client operations that do not specify a <code>timeout</code> parameter.";
+
+    /**
+     * Postprocess the configuration so that exponential backoff is disabled when reconnect backoff
+     * is explicitly configured but the maximum reconnect backoff is not explicitly configured.
+     *
+     * @param config                    The config object.
+     * @param parsedValues              The parsedValues as provided to postProcessParsedConfig.
+     *
+     * @return                          The new values which have been set as described in postProcessParsedConfig.
+     */
+    public static Map<String, Object> postProcessReconnectBackoffConfigs(AbstractConfig config,
+                                                    Map<String, Object> parsedValues) {
+        HashMap<String, Object> rval = new HashMap<>();
+        Map<String, Object> originalConfig = config.originals();
+        if ((!originalConfig.containsKey(RECONNECT_BACKOFF_MAX_MS_CONFIG)) &&
+            originalConfig.containsKey(RECONNECT_BACKOFF_MS_CONFIG)) {
+            log.debug("Disabling exponential reconnect backoff because {} is set, but {} is not.",
+                    RECONNECT_BACKOFF_MS_CONFIG, RECONNECT_BACKOFF_MAX_MS_CONFIG);
+            rval.put(RECONNECT_BACKOFF_MAX_MS_CONFIG, parsedValues.get(RECONNECT_BACKOFF_MS_CONFIG));
+        }
+        return rval;
+    }
+
+    public static void postValidateSaslMechanismConfig(AbstractConfig config) {
+        SecurityProtocol securityProtocol = SecurityProtocol.forName(config.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG));
+        String clientSaslMechanism = config.getString(SaslConfigs.SASL_MECHANISM);
+        if (securityProtocol == SecurityProtocol.SASL_PLAINTEXT || securityProtocol == SecurityProtocol.SASL_SSL) {
+            if (clientSaslMechanism == null || clientSaslMechanism.isEmpty()) {
+                throw new ConfigException(SaslConfigs.SASL_MECHANISM, null, "When the " + CommonClientConfigs.SECURITY_PROTOCOL_CONFIG +
+                        " configuration enables SASL, mechanism must be non-null and non-empty string.");
+            }
+        }
+    }
+
+    public static List<MetricsReporter> metricsReporters(AbstractConfig config) {
+        return metricsReporters(Collections.emptyMap(), config);
+    }
+
+    public static List<MetricsReporter> metricsReporters(String clientId, AbstractConfig config) {
+        return metricsReporters(Collections.singletonMap(CommonClientConfigs.CLIENT_ID_CONFIG, clientId), config);
+    }
+
+    public static List<MetricsReporter> metricsReporters(Map<String, Object> clientIdOverride, AbstractConfig config) {
+        List<MetricsReporter> reporters = config.getConfiguredInstances(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG,
+                MetricsReporter.class, clientIdOverride);
+        if (config.getBoolean(CommonClientConfigs.AUTO_INCLUDE_JMX_REPORTER_CONFIG) &&
+                reporters.stream().noneMatch(r -> JmxReporter.class.equals(r.getClass()))) {
+            JmxReporter jmxReporter = new JmxReporter();
+            jmxReporter.configure(config.originals(clientIdOverride));
+            reporters.add(jmxReporter);
+        }
+        return reporters;
+    }
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/ConnectionState.java b/clients/src/main/java/org/apache/kafka/clients/ConnectionState.java
new file mode 100644
index 0000000..f92c7fa
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/ConnectionState.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+/**
+ * The states of a node connection
+ *
+ * DISCONNECTED: connection has not been successfully established yet
+ * CONNECTING: connection is under progress
+ * CHECKING_API_VERSIONS: connection has been established and api versions check is in progress. Failure of this check will cause connection to close
+ * READY: connection is ready to send requests
+ * AUTHENTICATION_FAILED: connection failed due to an authentication error
+ */
+public enum ConnectionState {
+    DISCONNECTED, CONNECTING, CHECKING_API_VERSIONS, READY, AUTHENTICATION_FAILED;
+
+    public boolean isDisconnected() {
+        return this == AUTHENTICATION_FAILED || this == DISCONNECTED;
+    }
+
+    public boolean isConnected() {
+        return this == CHECKING_API_VERSIONS || this == READY;
+    }
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/DefaultHostResolver.java b/clients/src/main/java/org/apache/kafka/clients/DefaultHostResolver.java
new file mode 100644
index 0000000..786173e
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/DefaultHostResolver.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kafka.clients;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+public class DefaultHostResolver implements HostResolver {
+
+    @Override
+    public InetAddress[] resolve(String host) throws UnknownHostException {
+        return InetAddress.getAllByName(host);
+    }
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/FetchSessionHandler.java b/clients/src/main/java/org/apache/kafka/clients/FetchSessionHandler.java
new file mode 100644
index 0000000..e7556d2
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/FetchSessionHandler.java
@@ -0,0 +1,625 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kafka.clients;
+
+import org.apache.kafka.common.TopicIdPartition;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.Uuid;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.requests.FetchMetadata;
+import org.apache.kafka.common.requests.FetchRequest.PartitionData;
+import org.apache.kafka.common.requests.FetchResponse;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.common.utils.Utils;
+import org.slf4j.Logger;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import static org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID;
+
+/**
+ * FetchSessionHandler maintains the fetch session state for connecting to a broker.
+ *
+ * Using the protocol outlined by KIP-227, clients can create incremental fetch sessions.
+ * These sessions allow the client to fetch information about a set of partition over
+ * and over, without explicitly enumerating all the partitions in the request and the
+ * response.
+ *
+ * FetchSessionHandler tracks the partitions which are in the session.  It also
+ * determines which partitions need to be included in each fetch request, and what
+ * the attached fetch session metadata should be for each request.  The corresponding
+ * class on the receiving broker side is FetchManager.
+ */
+public class FetchSessionHandler {
+    private final Logger log;
+
+    private final int node;
+
+    /**
+     * The metadata for the next fetch request.
+     */
+    private FetchMetadata nextMetadata = FetchMetadata.INITIAL;
+
+    public FetchSessionHandler(LogContext logContext, int node) {
+        this.log = logContext.logger(FetchSessionHandler.class);
+        this.node = node;
+    }
+
+    // visible for testing
+    public int sessionId() {
+        return nextMetadata.sessionId();
+    }
+
+    /**
+     * All of the partitions which exist in the fetch request session.
+     */
+    private LinkedHashMap<TopicPartition, PartitionData> sessionPartitions =
+        new LinkedHashMap<>(0);
+
+    /**
+     * All of the topic names mapped to topic ids for topics which exist in the fetch request session.
+     */
+    private Map<Uuid, String> sessionTopicNames = new HashMap<>(0);
+
+    public Map<Uuid, String> sessionTopicNames() {
+        return sessionTopicNames;
+    }
+
+    public static class FetchRequestData {
+        /**
+         * The partitions to send in the fetch request.
+         */
+        private final Map<TopicPartition, PartitionData> toSend;
+
+        /**
+         * The partitions to send in the request's "forget" list.
+         */
+        private final List<TopicIdPartition> toForget;
+
+        /**
+         * The partitions to send in the request's "forget" list if
+         * the version is >= 13.
+         */
+        private final List<TopicIdPartition> toReplace;
+
+        /**
+         * All of the partitions which exist in the fetch request session.
+         */
+        private final Map<TopicPartition, PartitionData> sessionPartitions;
+
+        /**
+         * The metadata to use in this fetch request.
+         */
+        private final FetchMetadata metadata;
+
+        /**
+         * A boolean indicating whether we have a topic ID for every topic in the request so that we can send a request that
+         * uses topic IDs
+         */
+        private final boolean canUseTopicIds;
+
+        FetchRequestData(Map<TopicPartition, PartitionData> toSend,
+                         List<TopicIdPartition> toForget,
+                         List<TopicIdPartition> toReplace,
+                         Map<TopicPartition, PartitionData> sessionPartitions,
+                         FetchMetadata metadata,
+                         boolean canUseTopicIds) {
+            this.toSend = toSend;
+            this.toForget = toForget;
+            this.toReplace = toReplace;
+            this.sessionPartitions = sessionPartitions;
+            this.metadata = metadata;
+            this.canUseTopicIds = canUseTopicIds;
+        }
+
+        /**
+         * Get the set of partitions to send in this fetch request.
+         */
+        public Map<TopicPartition, PartitionData> toSend() {
+            return toSend;
+        }
+
+        /**
+         * Get a list of partitions to forget in this fetch request.
+         */
+        public List<TopicIdPartition> toForget() {
+            return toForget;
+        }
+
+        /**
+         * Get a list of partitions to forget in this fetch request.
+         */
+        public List<TopicIdPartition> toReplace() {
+            return toReplace;
+        }
+
+        /**
+         * Get the full set of partitions involved in this fetch request.
+         */
+        public Map<TopicPartition, PartitionData> sessionPartitions() {
+            return sessionPartitions;
+        }
+
+        public FetchMetadata metadata() {
+            return metadata;
+        }
+
+        public boolean canUseTopicIds() {
+            return canUseTopicIds;
+        }
+
+        @Override
+        public String toString() {
+            StringBuilder bld;
+            if (metadata.isFull()) {
+                bld = new StringBuilder("FullFetchRequest(toSend=(");
+                String prefix = "";
+                for (TopicPartition partition : toSend.keySet()) {
+                    bld.append(prefix);
+                    bld.append(partition);
+                    prefix = ", ";
+                }
+            } else {
+                bld = new StringBuilder("IncrementalFetchRequest(toSend=(");
+                String prefix = "";
+                for (TopicPartition partition : toSend.keySet()) {
+                    bld.append(prefix);
+                    bld.append(partition);
+                    prefix = ", ";
+                }
+                bld.append("), toForget=(");
+                prefix = "";
+                for (TopicIdPartition partition : toForget) {
+                    bld.append(prefix);
+                    bld.append(partition);
+                    prefix = ", ";
+                }
+                bld.append("), toReplace=(");
+                prefix = "";
+                for (TopicIdPartition partition : toReplace) {
+                    bld.append(prefix);
+                    bld.append(partition);
+                    prefix = ", ";
+                }
+                bld.append("), implied=(");
+                prefix = "";
+                for (TopicPartition partition : sessionPartitions.keySet()) {
+                    if (!toSend.containsKey(partition)) {
+                        bld.append(prefix);
+                        bld.append(partition);
+                        prefix = ", ";
+                    }
+                }
+            }
+            if (canUseTopicIds) {
+                bld.append("), canUseTopicIds=True");
+            } else {
+                bld.append("), canUseTopicIds=False");
+            }
+            bld.append(")");
+            return bld.toString();
+        }
+    }
+
+    public class Builder {
+        /**
+         * The next partitions which we want to fetch.
+         *
+         * It is important to maintain the insertion order of this list by using a LinkedHashMap rather
+         * than a regular Map.
+         *
+         * One reason is that when dealing with FULL fetch requests, if there is not enough response
+         * space to return data from all partitions, the server will only return data from partitions
+         * early in this list.
+         *
+         * Another reason is because we make use of the list ordering to optimize the preparation of
+         * incremental fetch requests (see below).
+         */
+        private LinkedHashMap<TopicPartition, PartitionData> next;
+        private Map<Uuid, String> topicNames;
+        private final boolean copySessionPartitions;
+        private int partitionsWithoutTopicIds = 0;
+
+        Builder() {
+            this.next = new LinkedHashMap<>();
+            this.topicNames = new HashMap<>();
+            this.copySessionPartitions = true;
+        }
+
+        Builder(int initialSize, boolean copySessionPartitions) {
+            this.next = new LinkedHashMap<>(initialSize);
+            this.topicNames = new HashMap<>();
+            this.copySessionPartitions = copySessionPartitions;
+        }
+
+        /**
+         * Mark that we want data from this partition in the upcoming fetch.
+         */
+        public void add(TopicPartition topicPartition, PartitionData data) {
+            next.put(topicPartition, data);
+            // topicIds should not change between adding partitions and building, so we can use putIfAbsent
+            if (data.topicId.equals(Uuid.ZERO_UUID)) {
+                partitionsWithoutTopicIds++;
+            } else {
+                topicNames.putIfAbsent(data.topicId, topicPartition.topic());
+            }
+        }
+
+        public FetchRequestData build() {
+            boolean canUseTopicIds = partitionsWithoutTopicIds == 0;
+
+            if (nextMetadata.isFull()) {
+                if (log.isDebugEnabled()) {
+                    log.debug("Built full fetch {} for node {} with {}.",
+                            nextMetadata, node, topicPartitionsToLogString(next.keySet()));
+                }
+                sessionPartitions = next;
+                next = null;
+                // Only add topic IDs to the session if we are using topic IDs.
+                if (canUseTopicIds) {
+                    sessionTopicNames = topicNames;
+                } else {
+                    sessionTopicNames = Collections.emptyMap();
+                }
+                Map<TopicPartition, PartitionData> toSend =
+                        Collections.unmodifiableMap(new LinkedHashMap<>(sessionPartitions));
+                return new FetchRequestData(toSend, Collections.emptyList(), Collections.emptyList(), toSend, nextMetadata, canUseTopicIds);
+            }
+
+            List<TopicIdPartition> added = new ArrayList<>();
+            List<TopicIdPartition> removed = new ArrayList<>();
+            List<TopicIdPartition> altered = new ArrayList<>();
+            List<TopicIdPartition> replaced = new ArrayList<>();
+            for (Iterator<Entry<TopicPartition, PartitionData>> iter =
+                 sessionPartitions.entrySet().iterator(); iter.hasNext(); ) {
+                Entry<TopicPartition, PartitionData> entry = iter.next();
+                TopicPartition topicPartition = entry.getKey();
+                PartitionData prevData = entry.getValue();
+                PartitionData nextData = next.remove(topicPartition);
+                if (nextData != null) {
+                    // We basically check if the new partition had the same topic ID. If not,
+                    // we add it to the "replaced" set. If the request is version 13 or higher, the replaced
+                    // partition will be forgotten. In any case, we will send the new partition in the request.
+                    if (!prevData.topicId.equals(nextData.topicId)
+                            && !prevData.topicId.equals(Uuid.ZERO_UUID)
+                            && !nextData.topicId.equals(Uuid.ZERO_UUID)) {
+                        // Re-add the replaced partition to the end of 'next'
+                        next.put(topicPartition, nextData);
+                        entry.setValue(nextData);
+                        replaced.add(new TopicIdPartition(prevData.topicId, topicPartition));
+                    } else if (!prevData.equals(nextData)) {
+                        // Re-add the altered partition to the end of 'next'
+                        next.put(topicPartition, nextData);
+                        entry.setValue(nextData);
+                        altered.add(new TopicIdPartition(nextData.topicId, topicPartition));
+                    }
+                } else {
+                    // Remove this partition from the session.
+                    iter.remove();
+                    // Indicate that we no longer want to listen to this partition.
+                    removed.add(new TopicIdPartition(prevData.topicId, topicPartition));
+                    // If we do not have this topic ID in the builder or the session, we can not use topic IDs.
+                    if (canUseTopicIds && prevData.topicId.equals(Uuid.ZERO_UUID))
+                        canUseTopicIds = false;
+                }
+            }
+            // Add any new partitions to the session.
+            for (Entry<TopicPartition, PartitionData> entry : next.entrySet()) {
+                TopicPartition topicPartition = entry.getKey();
+                PartitionData nextData = entry.getValue();
+                if (sessionPartitions.containsKey(topicPartition)) {
+                    // In the previous loop, all the partitions which existed in both sessionPartitions
+                    // and next were moved to the end of next, or removed from next.  Therefore,
+                    // once we hit one of them, we know there are no more unseen entries to look
+                    // at in next.
+                    break;
+                }
+                sessionPartitions.put(topicPartition, nextData);
+                added.add(new TopicIdPartition(nextData.topicId, topicPartition));
+            }
+
+            // Add topic IDs to session if we can use them. If an ID is inconsistent, we will handle in the receiving broker.
+            // If we switched from using topic IDs to not using them (or vice versa), that error will also be handled in the receiving broker.
+            if (canUseTopicIds) {
+                sessionTopicNames = topicNames;
+            } else {
+                sessionTopicNames = Collections.emptyMap();
+            }
+
+            if (log.isDebugEnabled()) {
+                log.debug("Built incremental fetch {} for node {}. Added {}, altered {}, removed {}, " +
+                          "replaced {} out of {}", nextMetadata, node, topicIdPartitionsToLogString(added),
+                          topicIdPartitionsToLogString(altered), topicIdPartitionsToLogString(removed),
+                          topicIdPartitionsToLogString(replaced), topicPartitionsToLogString(sessionPartitions.keySet()));
+            }
+            Map<TopicPartition, PartitionData> toSend = Collections.unmodifiableMap(next);
+            Map<TopicPartition, PartitionData> curSessionPartitions = copySessionPartitions
+                    ? Collections.unmodifiableMap(new LinkedHashMap<>(sessionPartitions))
+                    : Collections.unmodifiableMap(sessionPartitions);
+            next = null;
+            return new FetchRequestData(toSend,
+                    Collections.unmodifiableList(removed),
+                    Collections.unmodifiableList(replaced),
+                    curSessionPartitions,
+                    nextMetadata,
+                    canUseTopicIds);
+        }
+    }
+
+    public Builder newBuilder() {
+        return new Builder();
+    }
+
+
+    /** A builder that allows for presizing the PartitionData hashmap, and avoiding making a
+     *  secondary copy of the sessionPartitions, in cases where this is not necessarily.
+     *  This builder is primarily for use by the Replica Fetcher
+     * @param size the initial size of the PartitionData hashmap
+     * @param copySessionPartitions boolean denoting whether the builder should make a deep copy of
+     *                              session partitions
+     */
+    public Builder newBuilder(int size, boolean copySessionPartitions) {
+        return new Builder(size, copySessionPartitions);
+    }
+
+    private String topicPartitionsToLogString(Collection<TopicPartition> partitions) {
+        if (!log.isTraceEnabled()) {
+            return String.format("%d partition(s)", partitions.size());
+        }
+        return "(" + Utils.join(partitions, ", ") + ")";
+    }
+
+    private String topicIdPartitionsToLogString(Collection<TopicIdPartition> partitions) {
+        if (!log.isTraceEnabled()) {
+            return String.format("%d partition(s)", partitions.size());
+        }
+        return "(" + Utils.join(partitions, ", ") + ")";
+    }
+
+    /**
+     * Return missing items which are expected to be in a particular set, but which are not.
+     *
+     * @param toFind    The items to look for.
+     * @param toSearch  The set of items to search.
+     * @return          Empty set if all items were found; some of the missing ones in a set, if not.
+     */
+    static <T> Set<T> findMissing(Set<T> toFind, Set<T> toSearch) {
+        Set<T> ret = new LinkedHashSet<>();
+        for (T toFindItem: toFind) {
+            if (!toSearch.contains(toFindItem)) {
+                ret.add(toFindItem);
+            }
+        }
+        return ret;
+    }
+
+    /**
+     * Verify that a full fetch response contains all the partitions in the fetch session.
+     *
+     * @param topicPartitions  The topicPartitions from the FetchResponse.
+     * @param ids              The topic IDs from the FetchResponse.
+     * @param version          The version of the FetchResponse.
+     * @return                 null if the full fetch response partitions are valid; human-readable problem description otherwise.
+     */
+    String verifyFullFetchResponsePartitions(Set<TopicPartition> topicPartitions, Set<Uuid> ids, short version) {
+        StringBuilder bld = new StringBuilder();
+        Set<TopicPartition> extra =
+            findMissing(topicPartitions, sessionPartitions.keySet());
+        Set<TopicPartition> omitted =
+            findMissing(sessionPartitions.keySet(), topicPartitions);
+        Set<Uuid> extraIds = new HashSet<>();
+        if (version >= 13) {
+            extraIds = findMissing(ids, sessionTopicNames.keySet());
+        }
+        if (!omitted.isEmpty()) {
+            bld.append("omittedPartitions=(").append(Utils.join(omitted, ", ")).append("), ");
+        }
+        if (!extra.isEmpty()) {
+            bld.append("extraPartitions=(").append(Utils.join(extra, ", ")).append("), ");
+        }
+        if (!extraIds.isEmpty()) {
+            bld.append("extraIds=(").append(Utils.join(extraIds, ", ")).append("), ");
+        }
+        if ((!omitted.isEmpty()) || (!extra.isEmpty()) || (!extraIds.isEmpty())) {
+            bld.append("response=(").append(Utils.join(topicPartitions, ", ")).append(")");
+            return bld.toString();
+        }
+        return null;
+    }
+
+    /**
+     * Verify that the partitions in an incremental fetch response are contained in the session.
+     *
+     * @param topicPartitions  The topicPartitions from the FetchResponse.
+     * @param ids              The topic IDs from the FetchResponse.
+     * @param version          The version of the FetchResponse.
+     * @return                 null if the incremental fetch response partitions are valid; human-readable problem description otherwise.
+     */
+    String verifyIncrementalFetchResponsePartitions(Set<TopicPartition> topicPartitions, Set<Uuid> ids, short version) {
+        Set<Uuid> extraIds = new HashSet<>();
+        if (version >= 13) {
+            extraIds = findMissing(ids, sessionTopicNames.keySet());
+        }
+        Set<TopicPartition> extra =
+            findMissing(topicPartitions, sessionPartitions.keySet());
+        StringBuilder bld = new StringBuilder();
+        if (!extra.isEmpty())
+            bld.append("extraPartitions=(").append(Utils.join(extra, ", ")).append("), ");
+        if (!extraIds.isEmpty())
+            bld.append("extraIds=(").append(Utils.join(extraIds, ", ")).append("), ");
+        if ((!extra.isEmpty()) || (!extraIds.isEmpty())) {
+            bld.append("response=(").append(Utils.join(topicPartitions, ", ")).append(")");
+            return bld.toString();
+        }
+        return null;
+    }
+
+    /**
+     * Create a string describing the partitions in a FetchResponse.
+     *
+     * @param topicPartitions  The topicPartitions from the FetchResponse.
+     * @return                 The string to log.
+     */
+    private String responseDataToLogString(Set<TopicPartition> topicPartitions) {
+        if (!log.isTraceEnabled()) {
+            int implied = sessionPartitions.size() - topicPartitions.size();
+            if (implied > 0) {
+                return String.format(" with %d response partition(s), %d implied partition(s)",
+                    topicPartitions.size(), implied);
+            } else {
+                return String.format(" with %d response partition(s)",
+                    topicPartitions.size());
+            }
+        }
+        StringBuilder bld = new StringBuilder();
+        bld.append(" with response=(").
+            append(Utils.join(topicPartitions, ", ")).
+            append(")");
+        String prefix = ", implied=(";
+        String suffix = "";
+        for (TopicPartition partition : sessionPartitions.keySet()) {
+            if (!topicPartitions.contains(partition)) {
+                bld.append(prefix);
+                bld.append(partition);
+                prefix = ", ";
+                suffix = ")";
+            }
+        }
+        bld.append(suffix);
+        return bld.toString();
+    }
+
+    /**
+     * Handle the fetch response.
+     *
+     * @param response  The response.
+     * @param version   The version of the request.
+     * @return          True if the response is well-formed; false if it can't be processed
+     *                  because of missing or unexpected partitions.
+     */
+    public boolean handleResponse(FetchResponse response, short version) {
+        if (response.error() != Errors.NONE) {
+            log.info("Node {} was unable to process the fetch request with {}: {}.",
+                node, nextMetadata, response.error());
+            if (response.error() == Errors.FETCH_SESSION_ID_NOT_FOUND) {
+                nextMetadata = FetchMetadata.INITIAL;
+            } else {
+                nextMetadata = nextMetadata.nextCloseExistingAttemptNew();
+            }
+            return false;
+        }
+        Set<TopicPartition> topicPartitions = response.responseData(sessionTopicNames, version).keySet();
+        if (nextMetadata.isFull()) {
+            if (topicPartitions.isEmpty() && response.throttleTimeMs() > 0) {
+                // Normally, an empty full fetch response would be invalid.  However, KIP-219
+                // specifies that if the broker wants to throttle the client, it will respond
+                // to a full fetch request with an empty response and a throttleTimeMs
+                // value set.  We don't want to log this with a warning, since it's not an error.
+                // However, the empty full fetch response can't be processed, so it's still appropriate
+                // to return false here.
+                if (log.isDebugEnabled()) {
+                    log.debug("Node {} sent a empty full fetch response to indicate that this " +
+                        "client should be throttled for {} ms.", node, response.throttleTimeMs());
+                }
+                nextMetadata = FetchMetadata.INITIAL;
+                return false;
+            }
+            String problem = verifyFullFetchResponsePartitions(topicPartitions, response.topicIds(), version);
+            if (problem != null) {
+                log.info("Node {} sent an invalid full fetch response with {}", node, problem);
+                nextMetadata = FetchMetadata.INITIAL;
+                return false;
+            } else if (response.sessionId() == INVALID_SESSION_ID) {
+                if (log.isDebugEnabled())
+                    log.debug("Node {} sent a full fetch response{}", node, responseDataToLogString(topicPartitions));
+                nextMetadata = FetchMetadata.INITIAL;
+                return true;
+            } else {
+                // The server created a new incremental fetch session.
+                if (log.isDebugEnabled())
+                    log.debug("Node {} sent a full fetch response that created a new incremental " +
+                            "fetch session {}{}", node, response.sessionId(), responseDataToLogString(topicPartitions));
+                nextMetadata = FetchMetadata.newIncremental(response.sessionId());
+                return true;
+            }
+        } else {
+            String problem = verifyIncrementalFetchResponsePartitions(topicPartitions, response.topicIds(), version);
+            if (problem != null) {
+                log.info("Node {} sent an invalid incremental fetch response with {}", node, problem);
+                nextMetadata = nextMetadata.nextCloseExistingAttemptNew();
+                return false;
+            } else if (response.sessionId() == INVALID_SESSION_ID) {
+                // The incremental fetch session was closed by the server.
+                if (log.isDebugEnabled())
+                    log.debug("Node {} sent an incremental fetch response closing session {}{}",
+                            node, nextMetadata.sessionId(), responseDataToLogString(topicPartitions));
+                nextMetadata = FetchMetadata.INITIAL;
+                return true;
+            } else {
+                // The incremental fetch session was continued by the server.
+                // We don't have to do anything special here to support KIP-219, since an empty incremental
+                // fetch request is perfectly valid.
+                if (log.isDebugEnabled())
+                    log.debug("Node {} sent an incremental fetch response with throttleTimeMs = {} " +
+                        "for session {}{}", node, response.throttleTimeMs(), response.sessionId(),
+                        responseDataToLogString(topicPartitions));
+                nextMetadata = nextMetadata.nextIncremental();
+                return true;
+            }
+        }
+    }
+
+    /**
+     * The client will initiate the session close on next fetch request.
+     */
+    public void notifyClose() {
+        log.debug("Set the metadata for next fetch request to close the existing session ID={}", nextMetadata.sessionId());
+        nextMetadata = nextMetadata.nextCloseExisting();
+    }
+
+    /**
+     * Handle an error sending the prepared request.
+     *
+     * When a network error occurs, we close any existing fetch session on our next request,
+     * and try to create a new session.
+     *
+     * @param t     The exception.
+     */
+    public void handleError(Throwable t) {
+        log.info("Error sending fetch request {} to node {}:", nextMetadata, node, t);
+        nextMetadata = nextMetadata.nextCloseExistingAttemptNew();
+    }
+
+    /**
+     * Get the fetch request session's partitions.
+     */
+    public Set<TopicPartition> sessionTopicPartitions() {
+        return sessionPartitions.keySet();
+    }
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/GroupRebalanceConfig.java b/clients/src/main/java/org/apache/kafka/clients/GroupRebalanceConfig.java
new file mode 100644
index 0000000..006800a
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/GroupRebalanceConfig.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.requests.JoinGroupRequest;
+
+import java.util.Locale;
+import java.util.Optional;
+
+/**
+ * Class to extract group rebalance related configs.
+ */
+public class GroupRebalanceConfig {
+
+    public enum ProtocolType {
+        CONSUMER,
+        CONNECT;
+
+        @Override
+        public String toString() {
+            return super.toString().toLowerCase(Locale.ROOT);
+        }
+    }
+
+    public final int sessionTimeoutMs;
+    public final int rebalanceTimeoutMs;
+    public final int heartbeatIntervalMs;
+    public final String groupId;
+    public final Optional<String> groupInstanceId;
+    public final long retryBackoffMs;
+    public final boolean leaveGroupOnClose;
+
+    public GroupRebalanceConfig(AbstractConfig config, ProtocolType protocolType) {
+        this.sessionTimeoutMs = config.getInt(CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG);
+
+        // Consumer and Connect use different config names for defining rebalance timeout
+        if (protocolType == ProtocolType.CONSUMER) {
+            this.rebalanceTimeoutMs = config.getInt(CommonClientConfigs.MAX_POLL_INTERVAL_MS_CONFIG);
+        } else {
+            this.rebalanceTimeoutMs = config.getInt(CommonClientConfigs.REBALANCE_TIMEOUT_MS_CONFIG);
+        }
+
+        this.heartbeatIntervalMs = config.getInt(CommonClientConfigs.HEARTBEAT_INTERVAL_MS_CONFIG);
+        this.groupId = config.getString(CommonClientConfigs.GROUP_ID_CONFIG);
+
+        // Static membership is only introduced in consumer API.
+        if (protocolType == ProtocolType.CONSUMER) {
+            String groupInstanceId = config.getString(CommonClientConfigs.GROUP_INSTANCE_ID_CONFIG);
+            if (groupInstanceId != null) {
+                JoinGroupRequest.validateGroupInstanceId(groupInstanceId);
+                this.groupInstanceId = Optional.of(groupInstanceId);
+            } else {
+                this.groupInstanceId = Optional.empty();
+            }
+        } else {
+            this.groupInstanceId = Optional.empty();
+        }
+
+        this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
+
+        // Internal leave group config is only defined in Consumer.
+        if (protocolType == ProtocolType.CONSUMER) {
+            this.leaveGroupOnClose = config.getBoolean("internal.leave.group.on.close");
+        } else {
+            this.leaveGroupOnClose = true;
+        }
+    }
+
+    // For testing purpose.
+    public GroupRebalanceConfig(final int sessionTimeoutMs,
+                                final int rebalanceTimeoutMs,
+                                final int heartbeatIntervalMs,
+                                String groupId,
+                                Optional<String> groupInstanceId,
+                                long retryBackoffMs,
+                                boolean leaveGroupOnClose) {
+        this.sessionTimeoutMs = sessionTimeoutMs;
+        this.rebalanceTimeoutMs = rebalanceTimeoutMs;
+        this.heartbeatIntervalMs = heartbeatIntervalMs;
+        this.groupId = groupId;
+        this.groupInstanceId = groupInstanceId;
+        this.retryBackoffMs = retryBackoffMs;
+        this.leaveGroupOnClose = leaveGroupOnClose;
+    }
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/HostResolver.java b/clients/src/main/java/org/apache/kafka/clients/HostResolver.java
new file mode 100644
index 0000000..80209ca
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/HostResolver.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kafka.clients;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+public interface HostResolver {
+
+    InetAddress[] resolve(String host) throws UnknownHostException;
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java b/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java
new file mode 100644
index 0000000..6f5477e
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * The set of requests which have been sent or are being sent but haven't yet received a response
+ */
+final class InFlightRequests {
+
+    private final int maxInFlightRequestsPerConnection;
+    private final Map<String, Deque<NetworkClient.InFlightRequest>> requests = new HashMap<>();
+    /** Thread safe total number of in flight requests. */
+    private final AtomicInteger inFlightRequestCount = new AtomicInteger(0);
+
+    public InFlightRequests(int maxInFlightRequestsPerConnection) {
+        this.maxInFlightRequestsPerConnection = maxInFlightRequestsPerConnection;
+    }
+
+    /**
+     * Add the given request to the queue for the connection it was directed to
+     */
+    public void add(NetworkClient.InFlightRequest request) {
+        String destination = request.destination;
+        Deque<NetworkClient.InFlightRequest> reqs = this.requests.get(destination);
+        if (reqs == null) {
+            reqs = new ArrayDeque<>();
+            this.requests.put(destination, reqs);
+        }
+        reqs.addFirst(request);
+        inFlightRequestCount.incrementAndGet();
+    }
+
+    /**
+     * Get the request queue for the given node
+     */
+    private Deque<NetworkClient.InFlightRequest> requestQueue(String node) {
+        Deque<NetworkClient.InFlightRequest> reqs = requests.get(node);
+        if (reqs == null || reqs.isEmpty())
+            throw new IllegalStateException("There are no in-flight requests for node " + node);
+        return reqs;
+    }
+
+    /**
+     * Get the oldest request (the one that will be completed next) for the given node
+     */
+    public NetworkClient.InFlightRequest completeNext(String node) {
+        NetworkClient.InFlightRequest inFlightRequest = requestQueue(node).pollLast();
+        inFlightRequestCount.decrementAndGet();
+        return inFlightRequest;
+    }
+
+    /**
+     * Get the last request we sent to the given node (but don't remove it from the queue)
+     * @param node The node id
+     */
+    public NetworkClient.InFlightRequest lastSent(String node) {
+        return requestQueue(node).peekFirst();
+    }
+
+    /**
+     * Complete the last request that was sent to a particular node.
+     * @param node The node the request was sent to
+     * @return The request
+     */
+    public NetworkClient.InFlightRequest completeLastSent(String node) {
+        NetworkClient.InFlightRequest inFlightRequest = requestQueue(node).pollFirst();
+        inFlightRequestCount.decrementAndGet();
+        return inFlightRequest;
+    }
+
+    /**
+     * Can we send more requests to this node?
+     *
+     * @param node Node in question
+     * @return true iff we have no requests still being sent to the given node
+     */
+    public boolean canSendMore(String node) {
+        Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
+        return queue == null || queue.isEmpty() ||
+               (queue.peekFirst().send.completed() && queue.size() < this.maxInFlightRequestsPerConnection);
+    }
+
+    /**
+     * Return the number of in-flight requests directed at the given node
+     * @param node The node
+     * @return The request count.
+     */
+    public int count(String node) {
+        Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
+        return queue == null ? 0 : queue.size();
+    }
+
+    /**
+     * Return true if there is no in-flight request directed at the given node and false otherwise
+     */
+    public boolean isEmpty(String node) {
+        Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
+        return queue == null || queue.isEmpty();
+    }
+
+    /**
+     * Count all in-flight requests for all nodes. This method is thread safe, but may lag the actual count.
+     */
+    public int count() {
+        return inFlightRequestCount.get();
+    }
+
+    /**
+     * Return true if there is no in-flight request and false otherwise
+     */
+    public boolean isEmpty() {
+        for (Deque<NetworkClient.InFlightRequest> deque : this.requests.values()) {
+            if (!deque.isEmpty())
+                return false;
+        }
+        return true;
+    }
+
+    /**
+     * Clear out all the in-flight requests for the given node and return them
+     *
+     * @param node The node
+     * @return All the in-flight requests for that node that have been removed
+     */
+    public Iterable<NetworkClient.InFlightRequest> clearAll(String node) {
+        Deque<NetworkClient.InFlightRequest> reqs = requests.get(node);
+        if (reqs == null) {
+            return Collections.emptyList();
+        } else {
+            final Deque<NetworkClient.InFlightRequest> clearedRequests = requests.remove(node);
+            inFlightRequestCount.getAndAdd(-clearedRequests.size());
+            return () -> clearedRequests.descendingIterator();
+        }
+    }
+
+    private Boolean hasExpiredRequest(long now, Deque<NetworkClient.InFlightRequest> deque) {
+        for (NetworkClient.InFlightRequest request : deque) {
+            if (request.timeElapsedSinceSendMs(now) > request.requestTimeoutMs)
+                return true;
+        }
+        return false;
+    }
+
+    /**
+     * Returns a list of nodes with pending in-flight request, that need to be timed out
+     *
+     * @param now current time in milliseconds
+     * @return list of nodes
+     */
+    public List<String> nodesWithTimedOutRequests(long now) {
+        List<String> nodeIds = new ArrayList<>();
+        for (Map.Entry<String, Deque<NetworkClient.InFlightRequest>> requestEntry : requests.entrySet()) {
+            String nodeId = requestEntry.getKey();
+            Deque<NetworkClient.InFlightRequest> deque = requestEntry.getValue();
+            if (hasExpiredRequest(now, deque))
+                nodeIds.add(nodeId);
+        }
+        return nodeIds;
+    }
+
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/KafkaClient.java b/clients/src/main/java/org/apache/kafka/clients/KafkaClient.java
new file mode 100644
index 0000000..18a7eef
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/KafkaClient.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+import org.apache.kafka.common.Node;
+import org.apache.kafka.common.errors.AuthenticationException;
+import org.apache.kafka.common.requests.AbstractRequest;
+
+import java.io.Closeable;
+import java.util.List;
+
+/**
+ * The interface for {@link NetworkClient}
+ */
+public interface KafkaClient extends Closeable {
+
+    /**
+     * Check if we are currently ready to send another request to the given node but don't attempt to connect if we
+     * aren't.
+     *
+     * @param node The node to check
+     * @param now The current timestamp
+     */
+    boolean isReady(Node node, long now);
+
+    /**
+     * Initiate a connection to the given node (if necessary), and return true if already connected. The readiness of a
+     * node will change only when poll is invoked.
+     *
+     * @param node The node to connect to.
+     * @param now The current time
+     * @return true iff we are ready to immediately initiate the sending of another request to the given node.
+     */
+    boolean ready(Node node, long now);
+
+    /**
+     * Return the number of milliseconds to wait, based on the connection state, before attempting to send data. When
+     * disconnected, this respects the reconnect backoff time. When connecting or connected, this handles slow/stalled
+     * connections.
+     *
+     * @param node The node to check
+     * @param now The current timestamp
+     * @return The number of milliseconds to wait.
+     */
+    long connectionDelay(Node node, long now);
+
+    /**
+     * Return the number of milliseconds to wait, based on the connection state and the throttle time, before
+     * attempting to send data. If the connection has been established but being throttled, return throttle delay.
+     * Otherwise, return connection delay.
+     *
+     * @param node the connection to check
+     * @param now the current time in ms
+     */
+    long pollDelayMs(Node node, long now);
+
+    /**
+     * Check if the connection of the node has failed, based on the connection state. Such connection failure are
+     * usually transient and can be resumed in the next {@link #ready(org.apache.kafka.common.Node, long)} }
+     * call, but there are cases where transient failures needs to be caught and re-acted upon.
+     *
+     * @param node the node to check
+     * @return true iff the connection has failed and the node is disconnected
+     */
+    boolean connectionFailed(Node node);
+
+    /**
+     * Check if authentication to this node has failed, based on the connection state. Authentication failures are
+     * propagated without any retries.
+     *
+     * @param node the node to check
+     * @return an AuthenticationException iff authentication has failed, null otherwise
+     */
+    AuthenticationException authenticationException(Node node);
+
+    /**
+     * Queue up the given request for sending. Requests can only be sent on ready connections.
+     * @param request The request
+     * @param now The current timestamp
+     */
+    void send(ClientRequest request, long now);
+
+    /**
+     * Do actual reads and writes from sockets.
+     *
+     * @param timeout The maximum amount of time to wait for responses in ms, must be non-negative. The implementation
+     *                is free to use a lower value if appropriate (common reasons for this are a lower request or
+     *                metadata update timeout)
+     * @param now The current time in ms
+     * @throws IllegalStateException If a request is sent to an unready node
+     */
+    List<ClientResponse> poll(long timeout, long now);
+
+    /**
+     * Disconnects the connection to a particular node, if there is one.
+     * Any pending ClientRequests for this connection will receive disconnections.
+     *
+     * @param nodeId The id of the node
+     */
+    void disconnect(String nodeId);
+
+    /**
+     * Closes the connection to a particular node (if there is one).
+     * All requests on the connection will be cleared.  ClientRequest callbacks will not be invoked
+     * for the cleared requests, nor will they be returned from poll().
+     *
+     * @param nodeId The id of the node
+     */
+    void close(String nodeId);
+
+    /**
+     * Choose the node with the fewest outstanding requests. This method will prefer a node with an existing connection,
+     * but will potentially choose a node for which we don't yet have a connection if all existing connections are in
+     * use.
+     *
+     * @param now The current time in ms
+     * @return The node with the fewest in-flight requests.
+     */
+    Node leastLoadedNode(long now);
+
+    /**
+     * The number of currently in-flight requests for which we have not yet returned a response
+     */
+    int inFlightRequestCount();
+
+    /**
+     * Return true if there is at least one in-flight request and false otherwise.
+     */
+    boolean hasInFlightRequests();
+
+    /**
+     * Get the total in-flight requests for a particular node
+     *
+     * @param nodeId The id of the node
+     */
+    int inFlightRequestCount(String nodeId);
+
+    /**
+     * Return true if there is at least one in-flight request for a particular node and false otherwise.
+     */
+    boolean hasInFlightRequests(String nodeId);
+
+    /**
+     * Return true if there is at least one node with connection in the READY state and not throttled. Returns false
+     * otherwise.
+     *
+     * @param now the current time
+     */
+    boolean hasReadyNodes(long now);
+
+    /**
+     * Wake up the client if it is currently blocked waiting for I/O
+     */
+    void wakeup();
+
+    /**
+     * Create a new ClientRequest.
+     *
+     * @param nodeId the node to send to
+     * @param requestBuilder the request builder to use
+     * @param createdTimeMs the time in milliseconds to use as the creation time of the request
+     * @param expectResponse true iff we expect a response
+     */
+    ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder,
+                                   long createdTimeMs, boolean expectResponse);
+
+    /**
+     * Create a new ClientRequest.
+     *
+     * @param nodeId the node to send to
+     * @param requestBuilder the request builder to use
+     * @param createdTimeMs the time in milliseconds to use as the creation time of the request
+     * @param expectResponse true iff we expect a response
+     * @param requestTimeoutMs Upper bound time in milliseconds to await a response before disconnecting the socket and
+     *                         cancelling the request. The request may get cancelled sooner if the socket disconnects
+     *                         for any reason including if another pending request to the same node timed out first.
+     * @param callback the callback to invoke when we get a response
+     */
+    ClientRequest newClientRequest(String nodeId,
+                                   AbstractRequest.Builder<?> requestBuilder,
+                                   long createdTimeMs,
+                                   boolean expectResponse,
+                                   int requestTimeoutMs,
+                                   RequestCompletionHandler callback);
+
+
+
+    /**
+     * Initiates shutdown of this client. This method may be invoked from another thread while this
+     * client is being polled. No further requests may be sent using the client. The current poll()
+     * will be terminated using wakeup(). The client should be explicitly shutdown using {@link #close()}
+     * after poll returns. Note that {@link #close()} should not be invoked concurrently while polling.
+     */
+    void initiateClose();
+
+    /**
+     * Returns true if the client is still active. Returns false if {@link #initiateClose()} or {@link #close()}
+     * was invoked for this client.
+     */
+    boolean active();
+
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/ManualMetadataUpdater.java b/clients/src/main/java/org/apache/kafka/clients/ManualMetadataUpdater.java
new file mode 100644
index 0000000..3d51549
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/ManualMetadataUpdater.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.Node;
+import org.apache.kafka.common.errors.AuthenticationException;
+import org.apache.kafka.common.requests.MetadataResponse;
+import org.apache.kafka.common.requests.RequestHeader;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+
+/**
+ * A simple implementation of `MetadataUpdater` that returns the cluster nodes set via the constructor or via
+ * `setNodes`.
+ *
+ * This is useful in cases where automatic metadata updates are not required. An example is controller/broker
+ * communication.
+ *
+ * This class is not thread-safe!
+ */
+public class ManualMetadataUpdater implements MetadataUpdater {
+    private List<Node> nodes;
+
+    public ManualMetadataUpdater() {
+        this(new ArrayList<>(0));
+    }
+
+    public ManualMetadataUpdater(List<Node> nodes) {
+        this.nodes = nodes;
+    }
+
+    public void setNodes(List<Node> nodes) {
+        this.nodes = nodes;
+    }
+
+    @Override
+    public List<Node> fetchNodes() {
+        return new ArrayList<>(nodes);
+    }
+
+    @Override
+    public boolean isUpdateDue(long now) {
+        return false;
+    }
+
+    @Override
+    public long maybeUpdate(long now) {
+        return Long.MAX_VALUE;
+    }
+
+    @Override
+    public void handleServerDisconnect(long now, String nodeId, Optional<AuthenticationException> maybeAuthException) {
+        // We don't fail the broker on failures. There should be sufficient information from
+        // the NetworkClient logs to indicate the reason for the failure.
+    }
+
+    @Override
+    public void handleFailedRequest(long now, Optional<KafkaException> maybeFatalException) {
+        // Do nothing
+    }
+
+    @Override
+    public void handleSuccessfulResponse(RequestHeader requestHeader, long now, MetadataResponse response) {
+        // Do nothing
+    }
+
+    @Override
+    public void close() {
+    }
+}
diff --git a/clients/src/main/java/org/apache/kafka/clients/Metadata.java b/clients/src/main/java/org/apache/kafka/clients/Metadata.java
new file mode 100644
index 0000000..c42eb47
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/Metadata.java
@@ -0,0 +1,642 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+import org.apache.kafka.common.Cluster;
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.Node;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.Uuid;
+import org.apache.kafka.common.errors.InvalidMetadataException;
+import org.apache.kafka.common.errors.InvalidTopicException;
+import org.apache.kafka.common.errors.TopicAuthorizationException;
+import org.apache.kafka.common.internals.ClusterResourceListeners;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.requests.MetadataRequest;
+import org.apache.kafka.common.requests.MetadataResponse;
+import org.apache.kafka.common.utils.LogContext;
+import org.slf4j.Logger;
+
+import java.io.Closeable;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.function.Supplier;
+
+import static org.apache.kafka.common.record.RecordBatch.NO_PARTITION_LEADER_EPOCH;
+
+/**
+ * A class encapsulating some of the logic around metadata.
+ * <p>
+ * This class is shared by the client thread (for partitioning) and the background sender thread.
+ *
+ * Metadata is maintained for only a subset of topics, which can be added to over time. When we request metadata for a
+ * topic we don't have any metadata for it will trigger a metadata update.
+ * <p>
+ * If topic expiry is enabled for the metadata, any topic that has not been used within the expiry interval
+ * is removed from the metadata refresh set after an update. Consumers disab