diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/README-en.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/README-en.md index 1bc5eafb..497d25d0 100644 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/README-en.md +++ b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/README-en.md @@ -99,20 +99,9 @@ c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:13FC Initialize Start RequestId: c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:13load code for handler:index.initialize c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:132022-08-08 10:34:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] Servers: alikafka-pre-cn-7mz2sr1xa00c-1-vpc.alikafka.aliyuncs.com:9092 c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:132022-08-08 10:34:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] TopicName: HelloTopic -c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:132022-08-08 10:34:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] connect ok c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:13FC Initialize End RequestId: c47410a0-ada0-45d8-863f-a9343feaa47e c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:13FC Invoke Start RequestId: c47410a0-ada0-45d8-863f-a9343feaa47e c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:13load code for handler:index.handler -c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:432022-08-08 10:34:43 1c233449-024d-4a67-8e7f-83fe3bab6bac [verbose] delivery-report err: null -c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:432022-08-08 10:34:43 1c233449-024d-4a67-8e7f-83fe3bab6bac [verbose] delivery-report content: { - topic: 'HelloTopic', - partition: 0, - offset: 16, - key: null, - timestamp: 1659926083428, - value: , - size: 39 -} c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:43FC Invoke End RequestId: 1c233449-024d-4a67-8e7f-83fe3bab6bac Duration: 10006.53 ms, Billed Duration: 10007 ms, Memory Size: 128 MB, Max Memory Used: 52.28 MB @@ -148,20 +137,9 @@ End of method: invoke c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:13 load code for handler:index.initialize c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:132022-08-08 10:32:13 c47410a0-ada0-45d8-863f-a9343fepcaa47e [verbose] Servers: alikafka-pre-cn-1-7mz2srikafkafka0-ada0-45d8-863f-a9343fepcaa47e .aliyuncs.com:9092 c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:132022-08-08 10:32:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] TopicName: HelloTopic - c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:132022-08-08 10:32:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] connect ok c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:13FC Initialize End RequestId: c47410a0-ada0-45d8-863f-a9343feaa47e c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:13FC Invoke Start RequestId: c47410a0-ada0-45d8-863f-a9343feaa47e c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:13 load code for handler:index.handler - c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:132022-08-08 10:32:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] delivery-report err: null - c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:132022-08-08 10:32:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] delivery-report content: { - topic: 'HelloTopic', - partition: 8, - offset: 19, - key: null, - timestamp: 1659925933345, - value: , - size: 70 - } c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:13FC Invoke End RequestId: c47410a0-ada0-45d8-863f-a9343feaa47e ```` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/README.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/README.md index 7abdd23e..6bc82733 100644 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/README.md +++ b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/README.md @@ -98,20 +98,9 @@ c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:13FC Initialize Start RequestId: c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:13load code for handler:index.initialize c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:132022-08-08 10:34:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] Servers: alikafka-pre-cn-7mz2sr1xa00c-1-vpc.alikafka.aliyuncs.com:9092 c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:132022-08-08 10:34:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] TopicName: HelloTopic -c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:132022-08-08 10:34:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] connect ok c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:13FC Initialize End RequestId: c47410a0-ada0-45d8-863f-a9343feaa47e c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:13FC Invoke Start RequestId: c47410a0-ada0-45d8-863f-a9343feaa47e c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:13load code for handler:index.handler -c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:432022-08-08 10:34:43 1c233449-024d-4a67-8e7f-83fe3bab6bac [verbose] delivery-report err: null -c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:432022-08-08 10:34:43 1c233449-024d-4a67-8e7f-83fe3bab6bac [verbose] delivery-report content: { - topic: 'HelloTopic', - partition: 0, - offset: 16, - key: null, - timestamp: 1659926083428, - value: , - size: 39 -} c-62f075a3-58d34b2b8b444083bb972022-08-08 10:34:43FC Invoke End RequestId: 1c233449-024d-4a67-8e7f-83fe3bab6bac Duration: 10006.53 ms, Billed Duration: 10007 ms, Memory Size: 128 MB, Max Memory Used: 52.28 MB @@ -148,20 +137,9 @@ End of method: invoke c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:13load code for handler:index.initialize c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:132022-08-08 10:32:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] Servers: alikafka-pre-cn-7mz2sr1xa00c-1-vpc.alikafka.aliyuncs.com:9092 c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:132022-08-08 10:32:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] TopicName: HelloTopic - c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:132022-08-08 10:32:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] connect ok c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:13FC Initialize End RequestId: c47410a0-ada0-45d8-863f-a9343feaa47e c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:13FC Invoke Start RequestId: c47410a0-ada0-45d8-863f-a9343feaa47e c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:13load code for handler:index.handler - c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:132022-08-08 10:32:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] delivery-report err: null - c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:132022-08-08 10:32:13 c47410a0-ada0-45d8-863f-a9343feaa47e [verbose] delivery-report content: { - topic: 'HelloTopic', - partition: 8, - offset: 19, - key: null, - timestamp: 1659925933345, - value: , - size: 70 - } c-62f075a3-58d34b2b8b444083bb972022-08-08 10:32:13FC Invoke End RequestId: c47410a0-ada0-45d8-863f-a9343feaa47e ``` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/README-en.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/README-en.md index 20aa314f..96376620 100644 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/README-en.md +++ b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/README-en.md @@ -99,8 +99,6 @@ load code for handler: index.initialize FC Initialize End RequestId: 9b230404-4e05-49ca-a3d6-3be4629b367f FC Invoke Start RequestId: 3f19de00-ab31-48b4-a386-03cded9f97ee load code for handler:index.handler -2022-07-31 14:30:31 3f19de00-ab31-48b4-a386-03cded9f97ee [verbose] connect ok -2022-07-31 14:30:36 3f19de00-ab31-48b4-a386-03cded9f97ee [verbose] delivery-report: producer ok FC Invoke End RequestId: 3f19de00-ab31-48b4-a386-03cded9f97ee ```` @@ -149,8 +147,6 @@ load code for handler: index.initialize FC Initialize End RequestId: f9f17f0b-62fb-4ffa-853a-67fb316e0bf1 FC Invoke Start RequestId: f9f17f0b-62fb-4ffa-853a-67fb316e0bf1 load code for handler:index.handler -2022-07-31T06:22:37.621Z f9f17f0b-62fb-4ffa-853a-67fb316e0bf1 [verbose] connect ok -2022-07-31T06:22:37.951Z f9f17f0b-62fb-4ffa-853a-67fb316e0bf1 [verbose] delivery-report: producer ok FC Invoke End RequestId: f9f17f0b-62fb-4ffa-853a-67fb316e0bf1 Duration: 10006.34 ms, Billed Duration: 10007 ms, Memory Size: 128 MB, Max Memory Used: 53.24 MB diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/README.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/README.md index 8fc03eac..c4821d67 100644 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/README.md +++ b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/README.md @@ -99,8 +99,6 @@ load code for handler:index.initialize FC Initialize End RequestId: 9b230404-4e05-49ca-a3d6-3be4629b367f FC Invoke Start RequestId: 3f19de00-ab31-48b4-a386-03cded9f97ee load code for handler:index.handler -2022-07-31 14:30:31 3f19de00-ab31-48b4-a386-03cded9f97ee [verbose] connect ok -2022-07-31 14:30:36 3f19de00-ab31-48b4-a386-03cded9f97ee [verbose] delivery-report: producer ok FC Invoke End RequestId: 3f19de00-ab31-48b4-a386-03cded9f97ee ``` @@ -149,8 +147,6 @@ load code for handler:index.initialize FC Initialize End RequestId: f9f17f0b-62fb-4ffa-853a-67fb316e0bf1 FC Invoke Start RequestId: f9f17f0b-62fb-4ffa-853a-67fb316e0bf1 load code for handler:index.handler -2022-07-31T06:22:37.621Z f9f17f0b-62fb-4ffa-853a-67fb316e0bf1 [verbose] connect ok -2022-07-31T06:22:37.951Z f9f17f0b-62fb-4ffa-853a-67fb316e0bf1 [verbose] delivery-report: producer ok FC Invoke End RequestId: f9f17f0b-62fb-4ffa-853a-67fb316e0bf1 Duration: 10006.34 ms, Billed Duration: 10007 ms, Memory Size: 128 MB, Max Memory Used: 53.24 MB diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/index.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/index.js index d94fd67c..1455fd16 100644 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/index.js +++ b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/index.js @@ -1,80 +1,44 @@ 'use strict'; - -const Kafka = require('node-rdkafka'); +const Kafka = require('kafkajs'); var BOOTSTRAP_SERVERS var TOPIC_NAME var producer - -exports.initialize = (context, callback) => { +exports.initialize = async (context, callback) => { BOOTSTRAP_SERVERS = process.env.BOOTSTRAP_SERVERS TOPIC_NAME = process.env.TOPIC_NAME console.log("Servers: ", BOOTSTRAP_SERVERS); console.log("TopicName: ", TOPIC_NAME); - producer = new Kafka.Producer({ - 'api.version.request': 'true', - 'bootstrap.servers': BOOTSTRAP_SERVERS, - 'dr_cb': true, - 'dr_msg_cb': true, - 'message.send.max.retries': 10 - }); - - producer.on('event.log', function(event) { - console.log("event.log", event); - callback(new Error(event.message), ""); - }); - - producer.on("error", function(err) { - console.log("error:" + err); - callback(err, ""); - }); - - // Any errors we encounter, including connection errors - producer.on('event.error', function(err) { - console.error('event.error:' + err); - callback(err, ""); + var servers = BOOTSTRAP_SERVERS.split(","); + const kafka = new Kafka.Kafka({ + clientId: 'testId', + brokers: servers }) + producer = kafka.producer() + await producer.connect() - // Poll for events every 10 ms - producer.setPollInterval(10); - - producer.connect(); - - // Wait for the ready event before proceeding - producer.on('ready', function() { - console.log("connect ok") - callback(null, ""); - }); -}; - -exports.handler = async(event, context, callback) => { - // Wait for connection - producer.produce( - TOPIC_NAME, - null, - Buffer.from(event), - null, - Date.now() - ); - producer.flush(); - - // waiting for sending - await producer.on('delivery-report', function(err, report) { - console.log("delivery-report err: ", err); - console.log("delivery-report content: ", report); + callback(null,"initialize"); +} - if (err == null) { - callback(null, "Finish sending the message:" + event); - } else { - callback(err, "Send message fail!"); - } - }); +exports.handler = async (event, context, callback) => { + try { + await producer.send({ + topic: 'TestTopic', + messages: [ + { value: event }, + ], + }) + callback(null,"Finish sending the message:" + event); + } catch (e) { + console.log(e) + callback(e,"Send message fail!"); + } } -module.exports.preStop = function(context, callback) { +exports.preStop = async function(context, callback) { console.log('preStop hook start'); if (producer != null) { - producer.disconnect(); + await producer.disconnect(); } console.log('preStop hook finish'); callback(null, ""); diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/bindings/LICENSE.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/bindings/LICENSE.md deleted file mode 100644 index 5a92289f..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/bindings/LICENSE.md +++ /dev/null @@ -1,22 +0,0 @@ -(The MIT License) - -Copyright (c) 2012 Nathan Rajlich <nathan@tootallnate.net> - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/bindings/README.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/bindings/README.md deleted file mode 100644 index 5b3e7a81..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/bindings/README.md +++ /dev/null @@ -1,98 +0,0 @@ -node-bindings -============= -### Helper module for loading your native module's `.node` file - -This is a helper module for authors of Node.js native addon modules. -It is basically the "swiss army knife" of `require()`ing your native module's -`.node` file. - -Throughout the course of Node's native addon history, addons have ended up being -compiled in a variety of different places, depending on which build tool and which -version of node was used. To make matters worse, now the `gyp` build tool can -produce either a __Release__ or __Debug__ build, each being built into different -locations. - -This module checks _all_ the possible locations that a native addon would be built -at, and returns the first one that loads successfully. - - -Installation ------------- - -Install with `npm`: - -``` bash -$ npm install --save bindings -``` - -Or add it to the `"dependencies"` section of your `package.json` file. - - -Example -------- - -`require()`ing the proper bindings file for the current node version, platform -and architecture is as simple as: - -``` js -var bindings = require('bindings')('binding.node') - -// Use your bindings defined in your C files -bindings.your_c_function() -``` - - -Nice Error Output ------------------ - -When the `.node` file could not be loaded, `node-bindings` throws an Error with -a nice error message telling you exactly what was tried. You can also check the -`err.tries` Array property. - -``` -Error: Could not load the bindings file. Tried: - → /Users/nrajlich/ref/build/binding.node - → /Users/nrajlich/ref/build/Debug/binding.node - → /Users/nrajlich/ref/build/Release/binding.node - → /Users/nrajlich/ref/out/Debug/binding.node - → /Users/nrajlich/ref/Debug/binding.node - → /Users/nrajlich/ref/out/Release/binding.node - → /Users/nrajlich/ref/Release/binding.node - → /Users/nrajlich/ref/build/default/binding.node - → /Users/nrajlich/ref/compiled/0.8.2/darwin/x64/binding.node - at bindings (/Users/nrajlich/ref/node_modules/bindings/bindings.js:84:13) - at Object. (/Users/nrajlich/ref/lib/ref.js:5:47) - at Module._compile (module.js:449:26) - at Object.Module._extensions..js (module.js:467:10) - at Module.load (module.js:356:32) - at Function.Module._load (module.js:312:12) - ... -``` - -The searching for the `.node` file will originate from the first directory in which has a `package.json` file is found. - -License -------- - -(The MIT License) - -Copyright (c) 2012 Nathan Rajlich <nathan@tootallnate.net> - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/bindings/bindings.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/bindings/bindings.js deleted file mode 100644 index 727413a1..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/bindings/bindings.js +++ /dev/null @@ -1,221 +0,0 @@ -/** - * Module dependencies. - */ - -var fs = require('fs'), - path = require('path'), - fileURLToPath = require('file-uri-to-path'), - join = path.join, - dirname = path.dirname, - exists = - (fs.accessSync && - function(path) { - try { - fs.accessSync(path); - } catch (e) { - return false; - } - return true; - }) || - fs.existsSync || - path.existsSync, - defaults = { - arrow: process.env.NODE_BINDINGS_ARROW || ' → ', - compiled: process.env.NODE_BINDINGS_COMPILED_DIR || 'compiled', - platform: process.platform, - arch: process.arch, - nodePreGyp: - 'node-v' + - process.versions.modules + - '-' + - process.platform + - '-' + - process.arch, - version: process.versions.node, - bindings: 'bindings.node', - try: [ - // node-gyp's linked version in the "build" dir - ['module_root', 'build', 'bindings'], - // node-waf and gyp_addon (a.k.a node-gyp) - ['module_root', 'build', 'Debug', 'bindings'], - ['module_root', 'build', 'Release', 'bindings'], - // Debug files, for development (legacy behavior, remove for node v0.9) - ['module_root', 'out', 'Debug', 'bindings'], - ['module_root', 'Debug', 'bindings'], - // Release files, but manually compiled (legacy behavior, remove for node v0.9) - ['module_root', 'out', 'Release', 'bindings'], - ['module_root', 'Release', 'bindings'], - // Legacy from node-waf, node <= 0.4.x - ['module_root', 'build', 'default', 'bindings'], - // Production "Release" buildtype binary (meh...) - ['module_root', 'compiled', 'version', 'platform', 'arch', 'bindings'], - // node-qbs builds - ['module_root', 'addon-build', 'release', 'install-root', 'bindings'], - ['module_root', 'addon-build', 'debug', 'install-root', 'bindings'], - ['module_root', 'addon-build', 'default', 'install-root', 'bindings'], - // node-pre-gyp path ./lib/binding/{node_abi}-{platform}-{arch} - ['module_root', 'lib', 'binding', 'nodePreGyp', 'bindings'] - ] - }; - -/** - * The main `bindings()` function loads the compiled bindings for a given module. - * It uses V8's Error API to determine the parent filename that this function is - * being invoked from, which is then used to find the root directory. - */ - -function bindings(opts) { - // Argument surgery - if (typeof opts == 'string') { - opts = { bindings: opts }; - } else if (!opts) { - opts = {}; - } - - // maps `defaults` onto `opts` object - Object.keys(defaults).map(function(i) { - if (!(i in opts)) opts[i] = defaults[i]; - }); - - // Get the module root - if (!opts.module_root) { - opts.module_root = exports.getRoot(exports.getFileName()); - } - - // Ensure the given bindings name ends with .node - if (path.extname(opts.bindings) != '.node') { - opts.bindings += '.node'; - } - - // https://github.com/webpack/webpack/issues/4175#issuecomment-342931035 - var requireFunc = - typeof __webpack_require__ === 'function' - ? __non_webpack_require__ - : require; - - var tries = [], - i = 0, - l = opts.try.length, - n, - b, - err; - - for (; i < l; i++) { - n = join.apply( - null, - opts.try[i].map(function(p) { - return opts[p] || p; - }) - ); - tries.push(n); - try { - b = opts.path ? requireFunc.resolve(n) : requireFunc(n); - if (!opts.path) { - b.path = n; - } - return b; - } catch (e) { - if (e.code !== 'MODULE_NOT_FOUND' && - e.code !== 'QUALIFIED_PATH_RESOLUTION_FAILED' && - !/not find/i.test(e.message)) { - throw e; - } - } - } - - err = new Error( - 'Could not locate the bindings file. Tried:\n' + - tries - .map(function(a) { - return opts.arrow + a; - }) - .join('\n') - ); - err.tries = tries; - throw err; -} -module.exports = exports = bindings; - -/** - * Gets the filename of the JavaScript file that invokes this function. - * Used to help find the root directory of a module. - * Optionally accepts an filename argument to skip when searching for the invoking filename - */ - -exports.getFileName = function getFileName(calling_file) { - var origPST = Error.prepareStackTrace, - origSTL = Error.stackTraceLimit, - dummy = {}, - fileName; - - Error.stackTraceLimit = 10; - - Error.prepareStackTrace = function(e, st) { - for (var i = 0, l = st.length; i < l; i++) { - fileName = st[i].getFileName(); - if (fileName !== __filename) { - if (calling_file) { - if (fileName !== calling_file) { - return; - } - } else { - return; - } - } - } - }; - - // run the 'prepareStackTrace' function above - Error.captureStackTrace(dummy); - dummy.stack; - - // cleanup - Error.prepareStackTrace = origPST; - Error.stackTraceLimit = origSTL; - - // handle filename that starts with "file://" - var fileSchema = 'file://'; - if (fileName.indexOf(fileSchema) === 0) { - fileName = fileURLToPath(fileName); - } - - return fileName; -}; - -/** - * Gets the root directory of a module, given an arbitrary filename - * somewhere in the module tree. The "root directory" is the directory - * containing the `package.json` file. - * - * In: /home/nate/node-native-module/lib/index.js - * Out: /home/nate/node-native-module - */ - -exports.getRoot = function getRoot(file) { - var dir = dirname(file), - prev; - while (true) { - if (dir === '.') { - // Avoids an infinite loop in rare cases, like the REPL - dir = process.cwd(); - } - if ( - exists(join(dir, 'package.json')) || - exists(join(dir, 'node_modules')) - ) { - // Found the 'package.json' file or 'node_modules' dir; we're done - return dir; - } - if (prev === dir) { - // Got to the top - throw new Error( - 'Could not find module root given file: "' + - file + - '". Do you have a `package.json` file? ' - ); - } - // Try the parent dir next - prev = dir; - dir = join(dir, '..'); - } -}; diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/bindings/package.json b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/bindings/package.json deleted file mode 100644 index baae2760..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/bindings/package.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "_from": "bindings@^1.3.1", - "_id": "bindings@1.5.0", - "_inBundle": false, - "_integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", - "_location": "/bindings", - "_phantomChildren": {}, - "_requested": { - "type": "range", - "registry": true, - "raw": "bindings@^1.3.1", - "name": "bindings", - "escapedName": "bindings", - "rawSpec": "^1.3.1", - "saveSpec": null, - "fetchSpec": "^1.3.1" - }, - "_requiredBy": [ - "/node-rdkafka" - ], - "_resolved": "https://registry.npmmirror.com/bindings/-/bindings-1.5.0.tgz", - "_shasum": "10353c9e945334bc0511a6d90b38fbc7c9c504df", - "_spec": "bindings@^1.3.1", - "_where": "/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka", - "author": { - "name": "Nathan Rajlich", - "email": "nathan@tootallnate.net", - "url": "http://tootallnate.net" - }, - "bugs": { - "url": "https://github.com/TooTallNate/node-bindings/issues" - }, - "bundleDependencies": false, - "dependencies": { - "file-uri-to-path": "1.0.0" - }, - "deprecated": false, - "description": "Helper module for loading your native module's .node file", - "homepage": "https://github.com/TooTallNate/node-bindings", - "keywords": [ - "native", - "addon", - "bindings", - "gyp", - "waf", - "c", - "c++" - ], - "license": "MIT", - "main": "./bindings.js", - "name": "bindings", - "repository": { - "type": "git", - "url": "git://github.com/TooTallNate/node-bindings.git" - }, - "version": "1.5.0" -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/.npmignore b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/.npmignore deleted file mode 100644 index 07e6e472..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/.npmignore +++ /dev/null @@ -1 +0,0 @@ -/node_modules diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/.travis.yml b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/.travis.yml deleted file mode 100644 index 016bb6e7..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -sudo: false - -language: node_js - -node_js: - - "0.8" - - "0.10" - - "0.12" - - "1" - - "2" - - "3" - - "4" - - "5" - - "6" - - "7" - - "8" - -install: - - PATH="`npm bin`:`npm bin -g`:$PATH" - # Node 0.8 comes with a too obsolete npm - - if [[ "`node --version`" =~ ^v0\.8\. ]]; then npm install -g npm@1.4.28 ; fi - # Install dependencies and build - - npm install - -script: - # Output useful info for debugging - - node --version - - npm --version - # Run tests - - npm test diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/History.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/History.md deleted file mode 100644 index c8682be4..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/History.md +++ /dev/null @@ -1,21 +0,0 @@ - -1.0.0 / 2017-07-06 -================== - - * update "mocha" to v3 - * fixed unicode URI decoding (#6) - * add typings for Typescript - * README: use SVG Travis-CI badge - * add LICENSE file (MIT) - * add .travis.yml file (testing Node.js 0.8 through 8 currently) - * add README.md file - -0.0.2 / 2014-01-27 -================== - - * index: invert the path separators on Windows - -0.0.1 / 2014-01-27 -================== - - * initial commit diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/LICENSE b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/LICENSE deleted file mode 100644 index e1af7838..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2014 Nathan Rajlich - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/README.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/README.md deleted file mode 100644 index ab30be8f..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/README.md +++ /dev/null @@ -1,74 +0,0 @@ -file-uri-to-path -================ -### Convert a `file:` URI to a file path -[![Build Status](https://travis-ci.org/TooTallNate/file-uri-to-path.svg?branch=master)](https://travis-ci.org/TooTallNate/file-uri-to-path) - -Accepts a `file:` URI and returns a regular file path suitable for use with the -`fs` module functions. - - -Installation ------------- - -Install with `npm`: - -``` bash -$ npm install file-uri-to-path -``` - - -Example -------- - -``` js -var uri2path = require('file-uri-to-path'); - -uri2path('file://localhost/c|/WINDOWS/clock.avi'); -// "c:\\WINDOWS\\clock.avi" - -uri2path('file:///c|/WINDOWS/clock.avi'); -// "c:\\WINDOWS\\clock.avi" - -uri2path('file://localhost/c:/WINDOWS/clock.avi'); -// "c:\\WINDOWS\\clock.avi" - -uri2path('file://hostname/path/to/the%20file.txt'); -// "\\\\hostname\\path\\to\\the file.txt" - -uri2path('file:///c:/path/to/the%20file.txt'); -// "c:\\path\\to\\the file.txt" -``` - - -API ---- - -### fileUriToPath(String uri) → String - - - -License -------- - -(The MIT License) - -Copyright (c) 2014 Nathan Rajlich <nathan@tootallnate.net> - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/index.d.ts b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/index.d.ts deleted file mode 100644 index 99dc3f96..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/index.d.ts +++ /dev/null @@ -1,2 +0,0 @@ -declare function fileUriToPath(uri: string): string; -export = fileUriToPath; diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/index.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/index.js deleted file mode 100644 index 48cb280c..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/index.js +++ /dev/null @@ -1,66 +0,0 @@ - -/** - * Module dependencies. - */ - -var sep = require('path').sep || '/'; - -/** - * Module exports. - */ - -module.exports = fileUriToPath; - -/** - * File URI to Path function. - * - * @param {String} uri - * @return {String} path - * @api public - */ - -function fileUriToPath (uri) { - if ('string' != typeof uri || - uri.length <= 7 || - 'file://' != uri.substring(0, 7)) { - throw new TypeError('must pass in a file:// URI to convert to a file path'); - } - - var rest = decodeURI(uri.substring(7)); - var firstSlash = rest.indexOf('/'); - var host = rest.substring(0, firstSlash); - var path = rest.substring(firstSlash + 1); - - // 2. Scheme Definition - // As a special case, can be the string "localhost" or the empty - // string; this is interpreted as "the machine from which the URL is - // being interpreted". - if ('localhost' == host) host = ''; - - if (host) { - host = sep + sep + host; - } - - // 3.2 Drives, drive letters, mount points, file system root - // Drive letters are mapped into the top of a file URI in various ways, - // depending on the implementation; some applications substitute - // vertical bar ("|") for the colon after the drive letter, yielding - // "file:///c|/tmp/test.txt". In some cases, the colon is left - // unchanged, as in "file:///c:/tmp/test.txt". In other cases, the - // colon is simply omitted, as in "file:///c/tmp/test.txt". - path = path.replace(/^(.+)\|/, '$1:'); - - // for Windows, we need to invert the path separators from what a URI uses - if (sep == '\\') { - path = path.replace(/\//g, '\\'); - } - - if (/^.+\:/.test(path)) { - // has Windows drive at beginning of path - } else { - // unix path… - path = sep + path; - } - - return host + path; -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/package.json b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/package.json deleted file mode 100644 index c3f61b1f..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/package.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "_from": "file-uri-to-path@1.0.0", - "_id": "file-uri-to-path@1.0.0", - "_inBundle": false, - "_integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", - "_location": "/file-uri-to-path", - "_phantomChildren": {}, - "_requested": { - "type": "version", - "registry": true, - "raw": "file-uri-to-path@1.0.0", - "name": "file-uri-to-path", - "escapedName": "file-uri-to-path", - "rawSpec": "1.0.0", - "saveSpec": null, - "fetchSpec": "1.0.0" - }, - "_requiredBy": [ - "/bindings" - ], - "_resolved": "https://registry.npmmirror.com/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", - "_shasum": "553a7b8446ff6f684359c445f1e37a05dacc33dd", - "_spec": "file-uri-to-path@1.0.0", - "_where": "/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/bindings", - "author": { - "name": "Nathan Rajlich", - "email": "nathan@tootallnate.net", - "url": "http://n8.io/" - }, - "bugs": { - "url": "https://github.com/TooTallNate/file-uri-to-path/issues" - }, - "bundleDependencies": false, - "deprecated": false, - "description": "Convert a file: URI to a file path", - "devDependencies": { - "mocha": "3" - }, - "directories": { - "test": "test" - }, - "homepage": "https://github.com/TooTallNate/file-uri-to-path", - "keywords": [ - "file", - "uri", - "convert", - "path" - ], - "license": "MIT", - "main": "index.js", - "name": "file-uri-to-path", - "repository": { - "type": "git", - "url": "git://github.com/TooTallNate/file-uri-to-path.git" - }, - "scripts": { - "test": "mocha --reporter spec" - }, - "types": "index.d.ts", - "version": "1.0.0" -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/test/test.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/test/test.js deleted file mode 100644 index 79305dca..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/test/test.js +++ /dev/null @@ -1,24 +0,0 @@ - -var sep = require('path').sep || '/'; -var assert = require('assert'); -var uri2path = require('../'); -var tests = require('./tests.json'); - -describe('file-uri-to-path', function () { - - Object.keys(tests).forEach(function (uri) { - - // the test cases were generated from Windows' PathCreateFromUrlA() function. - // On Unix, we have to replace the path separator with the Unix one instead of - // the Windows one. - var expected = tests[uri].replace(/\\/g, sep); - - it('should convert ' + JSON.stringify(uri) + ' to ' + JSON.stringify(expected), - function () { - var actual = uri2path(uri); - assert.equal(actual, expected); - }); - - }); - -}); diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/test/tests.json b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/test/tests.json deleted file mode 100644 index b935a639..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/file-uri-to-path/test/tests.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "file://host/path": "\\\\host\\path", - "file://localhost/etc/fstab": "\\etc\\fstab", - "file:///etc/fstab": "\\etc\\fstab", - "file:///c:/WINDOWS/clock.avi": "c:\\WINDOWS\\clock.avi", - "file://localhost/c|/WINDOWS/clock.avi": "c:\\WINDOWS\\clock.avi", - "file:///c|/WINDOWS/clock.avi": "c:\\WINDOWS\\clock.avi", - "file://localhost/c:/WINDOWS/clock.avi": "c:\\WINDOWS\\clock.avi", - "file://hostname/path/to/the%20file.txt": "\\\\hostname\\path\\to\\the file.txt", - "file:///c:/path/to/the%20file.txt": "c:\\path\\to\\the file.txt", - "file:///C:/Documents%20and%20Settings/davris/FileSchemeURIs.doc": "C:\\Documents and Settings\\davris\\FileSchemeURIs.doc", - "file:///C:/caf%C3%A9/%C3%A5r/d%C3%BCnn/%E7%89%9B%E9%93%83/Ph%E1%BB%9F/%F0%9F%98%B5.exe": "C:\\café\\år\\dünn\\牛铃\\Phở\\😵.exe" -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/CHANGELOG.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/CHANGELOG.md deleted file mode 100644 index d82f56fa..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/CHANGELOG.md +++ /dev/null @@ -1,545 +0,0 @@ -# NAN ChangeLog - -**Version 2.16.0: current Node 18.2.0, Node 0.12: 0.12.18, Node 0.10: 0.10.48, iojs: 3.3.1** - -### 2.16.0 May 25 2022 - - - Feature: Add support for Node 18 (#937) 16fa32231e2ccd89d2804b3f765319128b20c4ac - -### 2.15.0 Aug 4 2021 - - - Feature: add ScriptOrigin (#918) d09debf9eeedcb7ca4073e84ffe5fbb455ecb709 - -### 2.14.2 Oct 13 2020 - - - Bugfix: fix gcc 8 function cast warning (#899) 35f0fab205574b2cbda04e6347c8b2db755e124f - -### 2.14.1 Apr 21 2020 - - - Bugfix: use GetBackingStore() instead of GetContents() (#888) 2c023bd447661a61071da318b0ff4003c3858d39 - -### 2.14.0 May 16 2019 - - - Feature: Add missing methods to Nan::Maybe (#852) 4e962489fb84a184035b9fa74f245f650249aca6 - -### 2.13.2 Mar 24 2019 - - - Bugfix: remove usage of deprecated `IsNearDeath` (#842) fbaf42252af279c3d867c6b193571f9711c39847 - -### 2.13.1 Mar 14 2019 - - - Bugfix: check V8 version directly instead of inferring from NMV (#840) 12f9df9f393285de8fb4a8cd01478dc4fe3b089d - -### 2.13.0 Mar 13 2019 - - - Feature: add support for node master (#831) 113c0282072e7ff4f9dfc98b432fd894b798c2c - -### 2.12.1 Dec 18 2018 - - - Bugfix: Fix build breakage with Node.js 10.0.0-10.9.0. (#833) 625e90e8fef8d39ffa7247250a76a100b2487474 - -### 2.12.0 Dec 16 2018 - - - Bugfix: Add scope.Escape() to Call() (#817) 2e5ed4fc3a8ac80a6ef1f2a55099ab3ac8800dc6 - - Bugfix: Fix Node.js v10.12.0 deprecation warnings. 509859cc23b1770376b56550a027840a2ce0f73d - - Feature: Allow SetWeak() for non-object persistent handles. (#824) e6ef6a48e7e671fe3e4b7dddaa8912a3f8262ecd - -### 2.11.1 Sep 29 2018 - - - Fix: adapt to V8 7.0 24a22c3b25eeeec2016c6ec239bdd6169e985447 - -### 2.11.0 Aug 25 2018 - - - Removal: remove `FunctionCallbackInfo::Callee` for nodejs `>= 10` 1a56c0a6efd4fac944cb46c30912a8e023bda7d4 - - Bugfix: Fix `AsyncProgressWorkerBase::WorkProgress` sends invalid data b0c764d1dab11e9f8b37ffb81e2560a4498aad5e - - Feature: Introduce `GetCurrentEventLoop` b4911b0bb1f6d47d860e10ec014d941c51efac5e - - Feature: Add `NAN_MODULE_WORKER_ENABLED` macro as a replacement for `NAN_MODULE` b058fb047d18a58250e66ae831444441c1f2ac7a - -### 2.10.0 Mar 16 2018 - - - Deprecation: Deprecate `MakeCallback` 5e92b19a59e194241d6a658bd6ff7bfbda372950 - - Feature: add `Nan::Call` overload 4482e1242fe124d166fc1a5b2be3c1cc849fe452 - - Feature: add more `Nan::Call` overloads 8584e63e6d04c7d2eb8c4a664e4ef57d70bf672b - - Feature: Fix deprecation warnings for Node 10 1caf258243b0602ed56922bde74f1c91b0cbcb6a - -### 2.9.2 Feb 22 2018 - - - Bugfix: Bandaid for async hooks 212bd2f849be14ef1b02fc85010b053daa24252b - -### 2.9.1 Feb 22 2018 - - - Bugfix: Avoid deprecation warnings in deprecated `Nan::Callback::operator()` 372b14d91289df4604b0f81780709708c45a9aa4 - - Bugfix: Avoid deprecation warnings in `Nan::JSON` 3bc294bce0b7d0a3ee4559926303e5ed4866fda2 - -### 2.9.0 Feb 22 2018 - - - Deprecation: Deprecate legacy `Callback::Call` 6dd5fa690af61ca3523004b433304c581b3ea309 - - Feature: introduce `AsyncResource` class 90c0a179c0d8cb5fd26f1a7d2b1d6231eb402d48o - - Feature: Add context aware `Nan::Callback::Call` functions 7169e09fb088418b6e388222e88b4c13f07ebaee - - Feature: Make `AsyncWorker` context aware 066ba21a6fb9e2b5230c9ed3a6fc51f1211736a4 - - Feature: add `Callback` overload to `Nan::Call` 5328daf66e202658c1dc0d916c3aaba99b3cc606 - - Bugfix: fix warning: suggest parentheses around `&&` within `||` b2bb63d68b8ae623a526b542764e1ac82319cb2c - - Bugfix: Fix compilation on io.js 3 d06114dba0a522fb436f0c5f47b994210968cd7b - -### 2.8.0 Nov 15 2017 - - - Deprecation: Deprecate `Nan::ForceSet` in favor of `Nan::DefineOwnProperty()` 95cbb976d6fbbba88ba0f86dd188223a8591b4e7 - - Feature: Add `Nan::AsyncProgressQueueWorker` a976636ecc2ef617d1b061ce4a6edf39923691cb - - Feature: Add `Nan::DefineOwnProperty()` 95cbb976d6fbbba88ba0f86dd188223a8591b4e7 - - Bugfix: Fix compiling on io.js 1 & 2 82705a64503ce60c62e98df5bd02972bba090900 - - Bugfix: Use DefineOwnProperty instead of ForceSet 95cbb976d6fbbba88ba0f86dd188223a8591b4e7 - -### 2.7.0 Aug 30 2017 - - - Feature: Add `Nan::To()` overload. b93280670c9f6da42ed4cf6cbf085ffdd87bd65b - - Bugfix: Fix ternary in `Nan::MaybeLocal::FromMaybe()`. 79a26f7d362e756a9524e672a82c3d603b542867 - -### 2.6.2 Apr 12 2017 - - - Bugfix: Fix v8::JSON::Parse() deprecation warning. 87f6a3c65815fa062296a994cc863e2fa124867d - -### 2.6.1 Apr 6 2017 - - - Bugfix: nan_json.h: fix build breakage in Node 6 ac8d47dc3c10bfbf3f15a6b951633120c0ee6d51 - -### 2.6.0 Apr 6 2017 - - - Feature: nan: add support for JSON::Parse & Stringify b533226c629cce70e1932a873bb6f849044a56c5 - -### 2.5.1 Jan 23 2017 - - - Bugfix: Fix disappearing handle for private value 6a80995694f162ef63dbc9948fbefd45d4485aa0 - - Bugfix: Add missing scopes a93b8bae6bc7d32a170db6e89228b7f60ee57112 - - Bugfix: Use string::data instead of string::front in NewOneByteString d5f920371e67e1f3b268295daee6e83af86b6e50 - -### 2.5.0 Dec 21 2016 - - - Feature: Support Private accessors a86255cb357e8ad8ccbf1f6a4a901c921e39a178 - - Bugfix: Abort in delete operators that shouldn't be called 0fe38215ff8581703967dfd26c12793feb960018 - -### 2.4.0 Jul 10 2016 - - - Feature: Rewrite Callback to add Callback::Reset c4cf44d61f8275cd5f7b0c911d7a806d4004f649 - - Feature: AsyncProgressWorker: add template types for .send 1242c9a11a7ed481c8f08ec06316385cacc513d0 - - Bugfix: Add constness to old Persistent comparison operators bd43cb9982c7639605d60fd073efe8cae165d9b2 - -### 2.3.5 May 31 2016 - - - Bugfix: Replace NAN_INLINE with 'inline' keyword. 71819d8725f822990f439479c9aba3b240804909 - -### 2.3.4 May 31 2016 - - - Bugfix: Remove V8 deprecation warnings 0592fb0a47f3a1c7763087ebea8e1138829f24f9 - - Bugfix: Fix new versions not to use WeakCallbackInfo::IsFirstPass 615c19d9e03d4be2049c10db0151edbc3b229246 - - Bugfix: Make ObjectWrap::handle() const d19af99595587fe7a26bd850af6595c2a7145afc - - Bugfix: Fix compilation errors related to 0592fb0a47f3a1c7763087ebea8e1138829f24f9 e9191c525b94f652718325e28610a1adcf90fed8 - -### 2.3.3 May 4 2016 - - - Bugfix: Refactor SetMethod() to deal with v8::Templates (#566) b9083cf6d5de6ebe6bcb49c7502fbb7c0d9ddda8 - -### 2.3.2 Apr 27 2016 - - - Bugfix: Fix compilation on outdated versions due to Handle removal f8b7c875d04d425a41dfd4f3f8345bc3a11e6c52 - -### 2.3.1 Apr 27 2016 - - - Bugfix: Don't use deprecated v8::Template::Set() in SetMethod a90951e9ea70fa1b3836af4b925322919159100e - -### 2.3.0 Apr 27 2016 - - - Feature: added Signal() for invoking async callbacks without sending data from AsyncProgressWorker d8adba45f20e077d00561b20199133620c990b38 - - Bugfix: Don't use deprecated v8::Template::Set() 00dacf0a4b86027415867fa7f1059acc499dcece - -### 2.2.1 Mar 29 2016 - - - Bugfix: Use NewFromUnsigned in ReturnValue::Set(uint32_t i) for pre_12 3a18f9bdce29826e0e4c217854bc476918241a58 - - Performance: Remove unneeeded nullptr checks b715ef44887931c94f0d1605b3b1a4156eebece9 - -### 2.2.0 Jan 9 2016 - - - Feature: Add Function::Call wrapper 4c157474dacf284d125c324177b45aa5dabc08c6 - - Feature: Rename GC*logueCallback to GCCallback for > 4.0 3603435109f981606d300eb88004ca101283acec - - Bugfix: Fix Global::Pass for old versions 367e82a60fbaa52716232cc89db1cc3f685d77d9 - - Bugfix: Remove weird MaybeLocal wrapping of what already is a MaybeLocal 23b4590db10c2ba66aee2338aebe9751c4cb190b - -### 2.1.0 Oct 8 2015 - - - Deprecation: Deprecate NanErrnoException in favor of ErrnoException 0af1ca4cf8b3f0f65ed31bc63a663ab3319da55c - - Feature: added helper class for accessing contents of typedarrays 17b51294c801e534479d5463697a73462d0ca555 - - Feature: [Maybe types] Add MakeMaybe(...) 48d7b53d9702b0c7a060e69ea10fea8fb48d814d - - Feature: new: allow utf16 string with length 66ac6e65c8ab9394ef588adfc59131b3b9d8347b - - Feature: Introduce SetCallHandler and SetCallAsFunctionHandler 7764a9a115d60ba10dc24d86feb0fbc9b4f75537 - - Bugfix: Enable creating Locals from Globals under Node 0.10. 9bf9b8b190821af889790fdc18ace57257e4f9ff - - Bugfix: Fix issue #462 where PropertyCallbackInfo data is not stored safely. 55f50adedd543098526c7b9f4fffd607d3f9861f - -### 2.0.9 Sep 8 2015 - - - Bugfix: EscapableHandleScope in Nan::NewBuffer for Node 0.8 and 0.10 b1654d7 - -### 2.0.8 Aug 28 2015 - - - Work around duplicate linking bug in clang 11902da - -### 2.0.7 Aug 26 2015 - - - Build: Repackage - -### 2.0.6 Aug 26 2015 - - - Bugfix: Properly handle null callback in FunctionTemplate factory 6e99cb1 - - Bugfix: Remove unused static std::map instances 525bddc - - Bugfix: Make better use of maybe versions of APIs bfba85b - - Bugfix: Fix shadowing issues with handle in ObjectWrap 0a9072d - -### 2.0.5 Aug 10 2015 - - - Bugfix: Reimplement weak callback in ObjectWrap 98d38c1 - - Bugfix: Make sure callback classes are not assignable, copyable or movable 81f9b1d - -### 2.0.4 Aug 6 2015 - - - Build: Repackage - -### 2.0.3 Aug 6 2015 - - - Bugfix: Don't use clang++ / g++ syntax extension. 231450e - -### 2.0.2 Aug 6 2015 - - - Build: Repackage - -### 2.0.1 Aug 6 2015 - - - Bugfix: Add workaround for missing REPLACE_INVALID_UTF8 60d6687 - - Bugfix: Reimplement ObjectWrap from scratch to prevent memory leaks 6484601 - - Bugfix: Fix Persistent leak in FunctionCallbackInfo and PropertyCallbackInfo 641ef5f - - Bugfix: Add missing overload for Nan::NewInstance that takes argc/argv 29450ed - -### 2.0.0 Jul 31 2015 - - - Change: Renamed identifiers with leading underscores b5932b4 - - Change: Replaced NanObjectWrapHandle with class NanObjectWrap 464f1e1 - - Change: Replace NanScope and NanEscpableScope macros with classes 47751c4 - - Change: Rename NanNewBufferHandle to NanNewBuffer 6745f99 - - Change: Rename NanBufferUse to NanNewBuffer 3e8b0a5 - - Change: Rename NanNewBuffer to NanCopyBuffer d6af78d - - Change: Remove Nan prefix from all names 72d1f67 - - Change: Update Buffer API for new upstream changes d5d3291 - - Change: Rename Scope and EscapableScope to HandleScope and EscapableHandleScope 21a7a6a - - Change: Get rid of Handles e6c0daf - - Feature: Support io.js 3 with V8 4.4 - - Feature: Introduce NanPersistent 7fed696 - - Feature: Introduce NanGlobal 4408da1 - - Feature: Added NanTryCatch 10f1ca4 - - Feature: Update for V8 v4.3 4b6404a - - Feature: Introduce NanNewOneByteString c543d32 - - Feature: Introduce namespace Nan 67ed1b1 - - Removal: Remove NanLocker and NanUnlocker dd6e401 - - Removal: Remove string converters, except NanUtf8String, which now follows the node implementation b5d00a9 - - Removal: Remove NanReturn* macros d90a25c - - Removal: Remove HasInstance e8f84fe - - -### 1.9.0 Jul 31 2015 - - - Feature: Added `NanFatalException` 81d4a2c - - Feature: Added more error types 4265f06 - - Feature: Added dereference and function call operators to NanCallback c4b2ed0 - - Feature: Added indexed GetFromPersistent and SaveToPersistent edd510c - - Feature: Added more overloads of SaveToPersistent and GetFromPersistent 8b1cef6 - - Feature: Added NanErrnoException dd87d9e - - Correctness: Prevent assign, copy, and move for classes that do not support it 1f55c59, 4b808cb, c96d9b2, fba4a29, 3357130 - - Deprecation: Deprecate `NanGetPointerSafe` and `NanSetPointerSafe` 81d4a2c - - Deprecation: Deprecate `NanBooleanOptionValue` and `NanUInt32OptionValue` 0ad254b - -### 1.8.4 Apr 26 2015 - - - Build: Repackage - -### 1.8.3 Apr 26 2015 - - - Bugfix: Include missing header 1af8648 - -### 1.8.2 Apr 23 2015 - - - Build: Repackage - -### 1.8.1 Apr 23 2015 - - - Bugfix: NanObjectWrapHandle should take a pointer 155f1d3 - -### 1.8.0 Apr 23 2015 - - - Feature: Allow primitives with NanReturnValue 2e4475e - - Feature: Added comparison operators to NanCallback 55b075e - - Feature: Backport thread local storage 15bb7fa - - Removal: Remove support for signatures with arguments 8a2069d - - Correcteness: Replaced NanObjectWrapHandle macro with function 0bc6d59 - -### 1.7.0 Feb 28 2015 - - - Feature: Made NanCallback::Call accept optional target 8d54da7 - - Feature: Support atom-shell 0.21 0b7f1bb - -### 1.6.2 Feb 6 2015 - - - Bugfix: NanEncode: fix argument type for node::Encode on io.js 2be8639 - -### 1.6.1 Jan 23 2015 - - - Build: version bump - -### 1.5.3 Jan 23 2015 - - - Build: repackage - -### 1.6.0 Jan 23 2015 - - - Deprecated `NanNewContextHandle` in favor of `NanNew` 49259af - - Support utility functions moved in newer v8 versions (Node 0.11.15, io.js 1.0) a0aa179 - - Added `NanEncode`, `NanDecodeBytes` and `NanDecodeWrite` 75e6fb9 - -### 1.5.2 Jan 23 2015 - - - Bugfix: Fix non-inline definition build error with clang++ 21d96a1, 60fadd4 - - Bugfix: Readded missing String constructors 18d828f - - Bugfix: Add overload handling NanNew(..) 5ef813b - - Bugfix: Fix uv_work_cb versioning 997e4ae - - Bugfix: Add function factory and test 4eca89c - - Bugfix: Add object template factory and test cdcb951 - - Correctness: Lifted an io.js related typedef c9490be - - Correctness: Make explicit downcasts of String lengths 00074e6 - - Windows: Limit the scope of disabled warning C4530 83d7deb - -### 1.5.1 Jan 15 2015 - - - Build: version bump - -### 1.4.3 Jan 15 2015 - - - Build: version bump - -### 1.4.2 Jan 15 2015 - - - Feature: Support io.js 0dbc5e8 - -### 1.5.0 Jan 14 2015 - - - Feature: Support io.js b003843 - - Correctness: Improved NanNew internals 9cd4f6a - - Feature: Implement progress to NanAsyncWorker 8d6a160 - -### 1.4.1 Nov 8 2014 - - - Bugfix: Handle DEBUG definition correctly - - Bugfix: Accept int as Boolean - -### 1.4.0 Nov 1 2014 - - - Feature: Added NAN_GC_CALLBACK 6a5c245 - - Performance: Removed unnecessary local handle creation 18a7243, 41fe2f8 - - Correctness: Added constness to references in NanHasInstance 02c61cd - - Warnings: Fixed spurious warnings from -Wundef and -Wshadow, 541b122, 99d8cb6 - - Windoze: Shut Visual Studio up when compiling 8d558c1 - - License: Switch to plain MIT from custom hacked MIT license 11de983 - - Build: Added test target to Makefile e232e46 - - Performance: Removed superfluous scope in NanAsyncWorker f4b7821 - - Sugar/Feature: Added NanReturnThis() and NanReturnHolder() shorthands 237a5ff, d697208 - - Feature: Added suitable overload of NanNew for v8::Integer::NewFromUnsigned b27b450 - -### 1.3.0 Aug 2 2014 - - - Added NanNew(std::string) - - Added NanNew(std::string&) - - Added NanAsciiString helper class - - Added NanUtf8String helper class - - Added NanUcs2String helper class - - Deprecated NanRawString() - - Deprecated NanCString() - - Added NanGetIsolateData(v8::Isolate *isolate) - - Added NanMakeCallback(v8::Handle target, v8::Handle func, int argc, v8::Handle* argv) - - Added NanMakeCallback(v8::Handle target, v8::Handle symbol, int argc, v8::Handle* argv) - - Added NanMakeCallback(v8::Handle target, const char* method, int argc, v8::Handle* argv) - - Added NanSetTemplate(v8::Handle templ, v8::Handle name , v8::Handle value, v8::PropertyAttribute attributes) - - Added NanSetPrototypeTemplate(v8::Local templ, v8::Handle name, v8::Handle value, v8::PropertyAttribute attributes) - - Added NanSetInstanceTemplate(v8::Local templ, const char *name, v8::Handle value) - - Added NanSetInstanceTemplate(v8::Local templ, v8::Handle name, v8::Handle value, v8::PropertyAttribute attributes) - -### 1.2.0 Jun 5 2014 - - - Add NanSetPrototypeTemplate - - Changed NAN_WEAK_CALLBACK internals, switched _NanWeakCallbackData to class, - introduced _NanWeakCallbackDispatcher - - Removed -Wno-unused-local-typedefs from test builds - - Made test builds Windows compatible ('Sleep()') - -### 1.1.2 May 28 2014 - - - Release to fix more stuff-ups in 1.1.1 - -### 1.1.1 May 28 2014 - - - Release to fix version mismatch in nan.h and lack of changelog entry for 1.1.0 - -### 1.1.0 May 25 2014 - - - Remove nan_isolate, use v8::Isolate::GetCurrent() internally instead - - Additional explicit overloads for NanNew(): (char*,int), (uint8_t*[,int]), - (uint16_t*[,int), double, int, unsigned int, bool, v8::String::ExternalStringResource*, - v8::String::ExternalAsciiStringResource* - - Deprecate NanSymbol() - - Added SetErrorMessage() and ErrorMessage() to NanAsyncWorker - -### 1.0.0 May 4 2014 - - - Heavy API changes for V8 3.25 / Node 0.11.13 - - Use cpplint.py - - Removed NanInitPersistent - - Removed NanPersistentToLocal - - Removed NanFromV8String - - Removed NanMakeWeak - - Removed NanNewLocal - - Removed NAN_WEAK_CALLBACK_OBJECT - - Removed NAN_WEAK_CALLBACK_DATA - - Introduce NanNew, replaces NanNewLocal, NanPersistentToLocal, adds many overloaded typed versions - - Introduce NanUndefined, NanNull, NanTrue and NanFalse - - Introduce NanEscapableScope and NanEscapeScope - - Introduce NanMakeWeakPersistent (requires a special callback to work on both old and new node) - - Introduce NanMakeCallback for node::MakeCallback - - Introduce NanSetTemplate - - Introduce NanGetCurrentContext - - Introduce NanCompileScript and NanRunScript - - Introduce NanAdjustExternalMemory - - Introduce NanAddGCEpilogueCallback, NanAddGCPrologueCallback, NanRemoveGCEpilogueCallback, NanRemoveGCPrologueCallback - - Introduce NanGetHeapStatistics - - Rename NanAsyncWorker#SavePersistent() to SaveToPersistent() - -### 0.8.0 Jan 9 2014 - - - NanDispose -> NanDisposePersistent, deprecate NanDispose - - Extract _NAN_*_RETURN_TYPE, pull up NAN_*() - -### 0.7.1 Jan 9 2014 - - - Fixes to work against debug builds of Node - - Safer NanPersistentToLocal (avoid reinterpret_cast) - - Speed up common NanRawString case by only extracting flattened string when necessary - -### 0.7.0 Dec 17 2013 - - - New no-arg form of NanCallback() constructor. - - NanCallback#Call takes Handle rather than Local - - Removed deprecated NanCallback#Run method, use NanCallback#Call instead - - Split off _NAN_*_ARGS_TYPE from _NAN_*_ARGS - - Restore (unofficial) Node 0.6 compatibility at NanCallback#Call() - - Introduce NanRawString() for char* (or appropriate void*) from v8::String - (replacement for NanFromV8String) - - Introduce NanCString() for null-terminated char* from v8::String - -### 0.6.0 Nov 21 2013 - - - Introduce NanNewLocal(v8::Handle value) for use in place of - v8::Local::New(...) since v8 started requiring isolate in Node 0.11.9 - -### 0.5.2 Nov 16 2013 - - - Convert SavePersistent and GetFromPersistent in NanAsyncWorker from protected and public - -### 0.5.1 Nov 12 2013 - - - Use node::MakeCallback() instead of direct v8::Function::Call() - -### 0.5.0 Nov 11 2013 - - - Added @TooTallNate as collaborator - - New, much simpler, "include_dirs" for binding.gyp - - Added full range of NAN_INDEX_* macros to match NAN_PROPERTY_* macros - -### 0.4.4 Nov 2 2013 - - - Isolate argument from v8::Persistent::MakeWeak removed for 0.11.8+ - -### 0.4.3 Nov 2 2013 - - - Include node_object_wrap.h, removed from node.h for Node 0.11.8. - -### 0.4.2 Nov 2 2013 - - - Handle deprecation of v8::Persistent::Dispose(v8::Isolate* isolate)) for - Node 0.11.8 release. - -### 0.4.1 Sep 16 2013 - - - Added explicit `#include ` as it was removed from node.h for v0.11.8 - -### 0.4.0 Sep 2 2013 - - - Added NAN_INLINE and NAN_DEPRECATED and made use of them - - Added NanError, NanTypeError and NanRangeError - - Cleaned up code - -### 0.3.2 Aug 30 2013 - - - Fix missing scope declaration in GetFromPersistent() and SaveToPersistent - in NanAsyncWorker - -### 0.3.1 Aug 20 2013 - - - fix "not all control paths return a value" compile warning on some platforms - -### 0.3.0 Aug 19 2013 - - - Made NAN work with NPM - - Lots of fixes to NanFromV8String, pulling in features from new Node core - - Changed node::encoding to Nan::Encoding in NanFromV8String to unify the API - - Added optional error number argument for NanThrowError() - - Added NanInitPersistent() - - Added NanReturnNull() and NanReturnEmptyString() - - Added NanLocker and NanUnlocker - - Added missing scopes - - Made sure to clear disposed Persistent handles - - Changed NanAsyncWorker to allocate error messages on the heap - - Changed NanThrowError(Local) to NanThrowError(Handle) - - Fixed leak in NanAsyncWorker when errmsg is used - -### 0.2.2 Aug 5 2013 - - - Fixed usage of undefined variable with node::BASE64 in NanFromV8String() - -### 0.2.1 Aug 5 2013 - - - Fixed 0.8 breakage, node::BUFFER encoding type not available in 0.8 for - NanFromV8String() - -### 0.2.0 Aug 5 2013 - - - Added NAN_PROPERTY_GETTER, NAN_PROPERTY_SETTER, NAN_PROPERTY_ENUMERATOR, - NAN_PROPERTY_DELETER, NAN_PROPERTY_QUERY - - Extracted _NAN_METHOD_ARGS, _NAN_GETTER_ARGS, _NAN_SETTER_ARGS, - _NAN_PROPERTY_GETTER_ARGS, _NAN_PROPERTY_SETTER_ARGS, - _NAN_PROPERTY_ENUMERATOR_ARGS, _NAN_PROPERTY_DELETER_ARGS, - _NAN_PROPERTY_QUERY_ARGS - - Added NanGetInternalFieldPointer, NanSetInternalFieldPointer - - Added NAN_WEAK_CALLBACK, NAN_WEAK_CALLBACK_OBJECT, - NAN_WEAK_CALLBACK_DATA, NanMakeWeak - - Renamed THROW_ERROR to _NAN_THROW_ERROR - - Added NanNewBufferHandle(char*, size_t, node::smalloc::FreeCallback, void*) - - Added NanBufferUse(char*, uint32_t) - - Added NanNewContextHandle(v8::ExtensionConfiguration*, - v8::Handle, v8::Handle) - - Fixed broken NanCallback#GetFunction() - - Added optional encoding and size arguments to NanFromV8String() - - Added NanGetPointerSafe() and NanSetPointerSafe() - - Added initial test suite (to be expanded) - - Allow NanUInt32OptionValue to convert any Number object - -### 0.1.0 Jul 21 2013 - - - Added `NAN_GETTER`, `NAN_SETTER` - - Added `NanThrowError` with single Local argument - - Added `NanNewBufferHandle` with single uint32_t argument - - Added `NanHasInstance(Persistent&, Handle)` - - Added `Local NanCallback#GetFunction()` - - Added `NanCallback#Call(int, Local[])` - - Deprecated `NanCallback#Run(int, Local[])` in favour of Call diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/LICENSE.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/LICENSE.md deleted file mode 100644 index 2d33043d..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/LICENSE.md +++ /dev/null @@ -1,9 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2018 [NAN contributors]() - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/README.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/README.md deleted file mode 100644 index 0913333f..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/README.md +++ /dev/null @@ -1,456 +0,0 @@ -Native Abstractions for Node.js -=============================== - -**A header file filled with macro and utility goodness for making add-on development for Node.js easier across versions 0.8, 0.10, 0.12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 and 18.** - -***Current version: 2.16.0*** - -*(See [CHANGELOG.md](https://github.com/nodejs/nan/blob/master/CHANGELOG.md) for complete ChangeLog)* - -[![NPM](https://nodei.co/npm/nan.png?downloads=true&downloadRank=true)](https://nodei.co/npm/nan/) [![NPM](https://nodei.co/npm-dl/nan.png?months=6&height=3)](https://nodei.co/npm/nan/) - -[![Build Status](https://api.travis-ci.com/nodejs/nan.svg?branch=master)](https://travis-ci.com/nodejs/nan) -[![Build status](https://ci.appveyor.com/api/projects/status/kh73pbm9dsju7fgh)](https://ci.appveyor.com/project/RodVagg/nan) - -Thanks to the crazy changes in V8 (and some in Node core), keeping native addons compiling happily across versions, particularly 0.10 to 0.12 to 4.0, is a minor nightmare. The goal of this project is to store all logic necessary to develop native Node.js addons without having to inspect `NODE_MODULE_VERSION` and get yourself into a macro-tangle. - -This project also contains some helper utilities that make addon development a bit more pleasant. - - * **[News & Updates](#news)** - * **[Usage](#usage)** - * **[Example](#example)** - * **[API](#api)** - * **[Tests](#tests)** - * **[Known issues](#issues)** - * **[Governance & Contributing](#governance)** - - - -## News & Updates - - - -## Usage - -Simply add **NAN** as a dependency in the *package.json* of your Node addon: - -``` bash -$ npm install --save nan -``` - -Pull in the path to **NAN** in your *binding.gyp* so that you can use `#include ` in your *.cpp* files: - -``` python -"include_dirs" : [ - "` when compiling your addon. - - - -## Example - -Just getting started with Nan? Take a look at the **[Node Add-on Examples](https://github.com/nodejs/node-addon-examples)**. - -Refer to a [quick-start **Nan** Boilerplate](https://github.com/fcanas/node-native-boilerplate) for a ready-to-go project that utilizes basic Nan functionality. - -For a simpler example, see the **[async pi estimation example](https://github.com/nodejs/nan/tree/master/examples/async_pi_estimate)** in the examples directory for full code and an explanation of what this Monte Carlo Pi estimation example does. Below are just some parts of the full example that illustrate the use of **NAN**. - -Yet another example is **[nan-example-eol](https://github.com/CodeCharmLtd/nan-example-eol)**. It shows newline detection implemented as a native addon. - -Also take a look at our comprehensive **[C++ test suite](https://github.com/nodejs/nan/tree/master/test/cpp)** which has a plethora of code snippets for your pasting pleasure. - - - -## API - -Additional to the NAN documentation below, please consult: - -* [The V8 Getting Started * Guide](https://v8.dev/docs/embed) -* [V8 API Documentation](https://v8docs.nodesource.com/) -* [Node Add-on Documentation](https://nodejs.org/api/addons.html) - - - -### JavaScript-accessible methods - -A _template_ is a blueprint for JavaScript functions and objects in a context. You can use a template to wrap C++ functions and data structures within JavaScript objects so that they can be manipulated from JavaScript. See the V8 Embedders Guide section on [Templates](https://github.com/v8/v8/wiki/Embedder%27s-Guide#templates) for further information. - -In order to expose functionality to JavaScript via a template, you must provide it to V8 in a form that it understands. Across the versions of V8 supported by NAN, JavaScript-accessible method signatures vary widely, NAN fully abstracts method declaration and provides you with an interface that is similar to the most recent V8 API but is backward-compatible with older versions that still use the now-deceased `v8::Argument` type. - -* **Method argument types** - - Nan::FunctionCallbackInfo - - Nan::PropertyCallbackInfo - - Nan::ReturnValue -* **Method declarations** - - Method declaration - - Getter declaration - - Setter declaration - - Property getter declaration - - Property setter declaration - - Property enumerator declaration - - Property deleter declaration - - Property query declaration - - Index getter declaration - - Index setter declaration - - Index enumerator declaration - - Index deleter declaration - - Index query declaration -* Method and template helpers - - Nan::SetMethod() - - Nan::SetPrototypeMethod() - - Nan::SetAccessor() - - Nan::SetNamedPropertyHandler() - - Nan::SetIndexedPropertyHandler() - - Nan::SetTemplate() - - Nan::SetPrototypeTemplate() - - Nan::SetInstanceTemplate() - - Nan::SetCallHandler() - - Nan::SetCallAsFunctionHandler() - -### Scopes - -A _local handle_ is a pointer to an object. All V8 objects are accessed using handles, they are necessary because of the way the V8 garbage collector works. - -A handle scope can be thought of as a container for any number of handles. When you've finished with your handles, instead of deleting each one individually you can simply delete their scope. - -The creation of `HandleScope` objects is different across the supported versions of V8. Therefore, NAN provides its own implementations that can be used safely across these. - - - Nan::HandleScope - - Nan::EscapableHandleScope - -Also see the V8 Embedders Guide section on [Handles and Garbage Collection](https://github.com/v8/v8/wiki/Embedder%27s%20Guide#handles-and-garbage-collection). - -### Persistent references - -An object reference that is independent of any `HandleScope` is a _persistent_ reference. Where a `Local` handle only lives as long as the `HandleScope` in which it was allocated, a `Persistent` handle remains valid until it is explicitly disposed. - -Due to the evolution of the V8 API, it is necessary for NAN to provide a wrapper implementation of the `Persistent` classes to supply compatibility across the V8 versions supported. - - - Nan::PersistentBase & v8::PersistentBase - - Nan::NonCopyablePersistentTraits & v8::NonCopyablePersistentTraits - - Nan::CopyablePersistentTraits & v8::CopyablePersistentTraits - - Nan::Persistent - - Nan::Global - - Nan::WeakCallbackInfo - - Nan::WeakCallbackType - -Also see the V8 Embedders Guide section on [Handles and Garbage Collection](https://developers.google.com/v8/embed#handles). - -### New - -NAN provides a `Nan::New()` helper for the creation of new JavaScript objects in a way that's compatible across the supported versions of V8. - - - Nan::New() - - Nan::Undefined() - - Nan::Null() - - Nan::True() - - Nan::False() - - Nan::EmptyString() - - -### Converters - -NAN contains functions that convert `v8::Value`s to other `v8::Value` types and native types. Since type conversion is not guaranteed to succeed, they return `Nan::Maybe` types. These converters can be used in place of `value->ToX()` and `value->XValue()` (where `X` is one of the types, e.g. `Boolean`) in a way that provides a consistent interface across V8 versions. Newer versions of V8 use the new `v8::Maybe` and `v8::MaybeLocal` types for these conversions, older versions don't have this functionality so it is provided by NAN. - - - Nan::To() - -### Maybe Types - -The `Nan::MaybeLocal` and `Nan::Maybe` types are monads that encapsulate `v8::Local` handles that _may be empty_. - -* **Maybe Types** - - Nan::MaybeLocal - - Nan::Maybe - - Nan::Nothing - - Nan::Just -* **Maybe Helpers** - - Nan::Call() - - Nan::ToDetailString() - - Nan::ToArrayIndex() - - Nan::Equals() - - Nan::NewInstance() - - Nan::GetFunction() - - Nan::Set() - - Nan::DefineOwnProperty() - - Nan::ForceSet() - - Nan::Get() - - Nan::GetPropertyAttributes() - - Nan::Has() - - Nan::Delete() - - Nan::GetPropertyNames() - - Nan::GetOwnPropertyNames() - - Nan::SetPrototype() - - Nan::ObjectProtoToString() - - Nan::HasOwnProperty() - - Nan::HasRealNamedProperty() - - Nan::HasRealIndexedProperty() - - Nan::HasRealNamedCallbackProperty() - - Nan::GetRealNamedPropertyInPrototypeChain() - - Nan::GetRealNamedProperty() - - Nan::CallAsFunction() - - Nan::CallAsConstructor() - - Nan::GetSourceLine() - - Nan::GetLineNumber() - - Nan::GetStartColumn() - - Nan::GetEndColumn() - - Nan::CloneElementAt() - - Nan::HasPrivate() - - Nan::GetPrivate() - - Nan::SetPrivate() - - Nan::DeletePrivate() - - Nan::MakeMaybe() - -### Script - -NAN provides `v8::Script` helpers as the API has changed over the supported versions of V8. - - - Nan::CompileScript() - - Nan::RunScript() - - Nan::ScriptOrigin - - -### JSON - -The _JSON_ object provides the C++ versions of the methods offered by the `JSON` object in javascript. V8 exposes these methods via the `v8::JSON` object. - - - Nan::JSON.Parse - - Nan::JSON.Stringify - -Refer to the V8 JSON object in the [V8 documentation](https://v8docs.nodesource.com/node-8.16/da/d6f/classv8_1_1_j_s_o_n.html) for more information about these methods and their arguments. - -### Errors - -NAN includes helpers for creating, throwing and catching Errors as much of this functionality varies across the supported versions of V8 and must be abstracted. - -Note that an Error object is simply a specialized form of `v8::Value`. - -Also consult the V8 Embedders Guide section on [Exceptions](https://developers.google.com/v8/embed#exceptions) for more information. - - - Nan::Error() - - Nan::RangeError() - - Nan::ReferenceError() - - Nan::SyntaxError() - - Nan::TypeError() - - Nan::ThrowError() - - Nan::ThrowRangeError() - - Nan::ThrowReferenceError() - - Nan::ThrowSyntaxError() - - Nan::ThrowTypeError() - - Nan::FatalException() - - Nan::ErrnoException() - - Nan::TryCatch - - -### Buffers - -NAN's `node::Buffer` helpers exist as the API has changed across supported Node versions. Use these methods to ensure compatibility. - - - Nan::NewBuffer() - - Nan::CopyBuffer() - - Nan::FreeCallback() - -### Nan::Callback - -`Nan::Callback` makes it easier to use `v8::Function` handles as callbacks. A class that wraps a `v8::Function` handle, protecting it from garbage collection and making it particularly useful for storage and use across asynchronous execution. - - - Nan::Callback - -### Asynchronous work helpers - -`Nan::AsyncWorker`, `Nan::AsyncProgressWorker` and `Nan::AsyncProgressQueueWorker` are helper classes that make working with asynchronous code easier. - - - Nan::AsyncWorker - - Nan::AsyncProgressWorkerBase & Nan::AsyncProgressWorker - - Nan::AsyncProgressQueueWorker - - Nan::AsyncQueueWorker - -### Strings & Bytes - -Miscellaneous string & byte encoding and decoding functionality provided for compatibility across supported versions of V8 and Node. Implemented by NAN to ensure that all encoding types are supported, even for older versions of Node where they are missing. - - - Nan::Encoding - - Nan::Encode() - - Nan::DecodeBytes() - - Nan::DecodeWrite() - - -### Object Wrappers - -The `ObjectWrap` class can be used to make wrapped C++ objects and a factory of wrapped objects. - - - Nan::ObjectWrap - - -### V8 internals - -The hooks to access V8 internals—including GC and statistics—are different across the supported versions of V8, therefore NAN provides its own hooks that call the appropriate V8 methods. - - - NAN_GC_CALLBACK() - - Nan::AddGCEpilogueCallback() - - Nan::RemoveGCEpilogueCallback() - - Nan::AddGCPrologueCallback() - - Nan::RemoveGCPrologueCallback() - - Nan::GetHeapStatistics() - - Nan::SetCounterFunction() - - Nan::SetCreateHistogramFunction() - - Nan::SetAddHistogramSampleFunction() - - Nan::IdleNotification() - - Nan::LowMemoryNotification() - - Nan::ContextDisposedNotification() - - Nan::GetInternalFieldPointer() - - Nan::SetInternalFieldPointer() - - Nan::AdjustExternalMemory() - - -### Miscellaneous V8 Helpers - - - Nan::Utf8String - - Nan::GetCurrentContext() - - Nan::SetIsolateData() - - Nan::GetIsolateData() - - Nan::TypedArrayContents - - -### Miscellaneous Node Helpers - - - Nan::AsyncResource - - Nan::MakeCallback() - - NAN_MODULE_INIT() - - Nan::Export() - - - - - - -### Tests - -To run the NAN tests do: - -``` sh -npm install -npm run-script rebuild-tests -npm test -``` - -Or just: - -``` sh -npm install -make test -``` - - - -## Known issues - -### Compiling against Node.js 0.12 on OSX - -With new enough compilers available on OSX, the versions of V8 headers corresponding to Node.js 0.12 -do not compile anymore. The error looks something like: - -``` -❯ CXX(target) Release/obj.target/accessors/cpp/accessors.o -In file included from ../cpp/accessors.cpp:9: -In file included from ../../nan.h:51: -In file included from /Users/ofrobots/.node-gyp/0.12.18/include/node/node.h:61: -/Users/ofrobots/.node-gyp/0.12.18/include/node/v8.h:5800:54: error: 'CreateHandle' is a protected member of 'v8::HandleScope' - return Handle(reinterpret_cast(HandleScope::CreateHandle( - ~~~~~~~~~~~~~^~~~~~~~~~~~ -``` - -This can be worked around by patching your local versions of v8.h corresponding to Node 0.12 to make -`v8::Handle` a friend of `v8::HandleScope`. Since neither Node.js not V8 support this release line anymore -this patch cannot be released by either project in an official release. - -For this reason, we do not test against Node.js 0.12 on OSX in this project's CI. If you need to support -that configuration, you will need to either get an older compiler, or apply a source patch to the version -of V8 headers as a workaround. - - - -## Governance & Contributing - -NAN is governed by the [Node.js Addon API Working Group](https://github.com/nodejs/CTC/blob/master/WORKING_GROUPS.md#addon-api) - -### Addon API Working Group (WG) - -The NAN project is jointly governed by a Working Group which is responsible for high-level guidance of the project. - -Members of the WG are also known as Collaborators, there is no distinction between the two, unlike other Node.js projects. - -The WG has final authority over this project including: - -* Technical direction -* Project governance and process (including this policy) -* Contribution policy -* GitHub repository hosting -* Maintaining the list of additional Collaborators - -For the current list of WG members, see the project [README.md](./README.md#collaborators). - -Individuals making significant and valuable contributions are made members of the WG and given commit-access to the project. These individuals are identified by the WG and their addition to the WG is discussed via GitHub and requires unanimous consensus amongst those WG members participating in the discussion with a quorum of 50% of WG members required for acceptance of the vote. - -_Note:_ If you make a significant contribution and are not considered for commit-access log an issue or contact a WG member directly. - -For the current list of WG members / Collaborators, see the project [README.md](./README.md#collaborators). - -### Consensus Seeking Process - -The WG follows a [Consensus Seeking](https://en.wikipedia.org/wiki/Consensus-seeking_decision-making) decision making model. - -Modifications of the contents of the NAN repository are made on a collaborative basis. Anybody with a GitHub account may propose a modification via pull request and it will be considered by the WG. All pull requests must be reviewed and accepted by a WG member with sufficient expertise who is able to take full responsibility for the change. In the case of pull requests proposed by an existing WG member, an additional WG member is required for sign-off. Consensus should be sought if additional WG members participate and there is disagreement around a particular modification. - -If a change proposal cannot reach a consensus, a WG member can call for a vote amongst the members of the WG. Simple majority wins. - - - -## Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -* (a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -* (b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -* (c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -* (d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. - - - -### WG Members / Collaborators - - - - - - - - - - -
Rod VaggGitHub/rvaggTwitter/@rvagg
Benjamin ByholmGitHub/kkoopa-
Trevor NorrisGitHub/trevnorrisTwitter/@trevnorris
Nathan RajlichGitHub/TooTallNateTwitter/@TooTallNate
Brett LawsonGitHub/brett19Twitter/@brett19x
Ben NoordhuisGitHub/bnoordhuisTwitter/@bnoordhuis
David SiegelGitHub/agnatTwitter/@agnat
Michael Ira KrufkyGitHub/mkrufkyTwitter/@mkrufky
- -## Licence & copyright - -Copyright (c) 2018 NAN WG Members / Collaborators (listed above). - -Native Abstractions for Node.js is licensed under an MIT license. All rights not explicitly granted in the MIT license are reserved. See the included LICENSE file for more details. diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/asyncworker.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/asyncworker.md deleted file mode 100644 index 04231f83..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/asyncworker.md +++ /dev/null @@ -1,146 +0,0 @@ -## Asynchronous work helpers - -`Nan::AsyncWorker`, `Nan::AsyncProgressWorker` and `Nan::AsyncProgressQueueWorker` are helper classes that make working with asynchronous code easier. - - - Nan::AsyncWorker - - Nan::AsyncProgressWorkerBase & Nan::AsyncProgressWorker - - Nan::AsyncProgressQueueWorker - - Nan::AsyncQueueWorker - - -### Nan::AsyncWorker - -`Nan::AsyncWorker` is an _abstract_ class that you can subclass to have much of the annoying asynchronous queuing and handling taken care of for you. It can even store arbitrary V8 objects for you and have them persist while the asynchronous work is in progress. - -This class internally handles the details of creating an [`AsyncResource`][AsyncResource], and running the callback in the -correct async context. To be able to identify the async resources created by this class in async-hooks, provide a -`resource_name` to the constructor. It is recommended that the module name be used as a prefix to the `resource_name` to avoid -collisions in the names. For more details see [`AsyncResource`][AsyncResource] documentation. The `resource_name` needs to stay valid for the lifetime of the worker instance. - -Definition: - -```c++ -class AsyncWorker { - public: - explicit AsyncWorker(Callback *callback_, const char* resource_name = "nan:AsyncWorker"); - - virtual ~AsyncWorker(); - - virtual void WorkComplete(); - - void SaveToPersistent(const char *key, const v8::Local &value); - - void SaveToPersistent(const v8::Local &key, - const v8::Local &value); - - void SaveToPersistent(uint32_t index, - const v8::Local &value); - - v8::Local GetFromPersistent(const char *key) const; - - v8::Local GetFromPersistent(const v8::Local &key) const; - - v8::Local GetFromPersistent(uint32_t index) const; - - virtual void Execute() = 0; - - uv_work_t request; - - virtual void Destroy(); - - protected: - Persistent persistentHandle; - - Callback *callback; - - virtual void HandleOKCallback(); - - virtual void HandleErrorCallback(); - - void SetErrorMessage(const char *msg); - - const char* ErrorMessage(); -}; -``` - - -### Nan::AsyncProgressWorkerBase & Nan::AsyncProgressWorker - -`Nan::AsyncProgressWorkerBase` is an _abstract_ class template that extends `Nan::AsyncWorker` and adds additional progress reporting callbacks that can be used during the asynchronous work execution to provide progress data back to JavaScript. - -Previously the definition of `Nan::AsyncProgressWorker` only allowed sending `const char` data. Now extending `Nan::AsyncProgressWorker` will yield an instance of the implicit `Nan::AsyncProgressWorkerBase` template with type `` for compatibility. - -`Nan::AsyncProgressWorkerBase` & `Nan::AsyncProgressWorker` is intended for best-effort delivery of nonessential progress messages, e.g. a progress bar. The last event sent before the main thread is woken will be delivered. - -Definition: - -```c++ -template -class AsyncProgressWorkerBase : public AsyncWorker { - public: - explicit AsyncProgressWorkerBase(Callback *callback_, const char* resource_name = ...); - - virtual ~AsyncProgressWorkerBase(); - - void WorkProgress(); - - class ExecutionProgress { - public: - void Signal() const; - void Send(const T* data, size_t count) const; - }; - - virtual void Execute(const ExecutionProgress& progress) = 0; - - virtual void HandleProgressCallback(const T *data, size_t count) = 0; - - virtual void Destroy(); -}; - -typedef AsyncProgressWorkerBase AsyncProgressWorker; -``` - - -### Nan::AsyncProgressQueueWorker - -`Nan::AsyncProgressQueueWorker` is an _abstract_ class template that extends `Nan::AsyncWorker` and adds additional progress reporting callbacks that can be used during the asynchronous work execution to provide progress data back to JavaScript. - -`Nan::AsyncProgressQueueWorker` behaves exactly the same as `Nan::AsyncProgressWorker`, except all events are queued and delivered to the main thread. - -Definition: - -```c++ -template -class AsyncProgressQueueWorker : public AsyncWorker { - public: - explicit AsyncProgressQueueWorker(Callback *callback_, const char* resource_name = "nan:AsyncProgressQueueWorker"); - - virtual ~AsyncProgressQueueWorker(); - - void WorkProgress(); - - class ExecutionProgress { - public: - void Send(const T* data, size_t count) const; - }; - - virtual void Execute(const ExecutionProgress& progress) = 0; - - virtual void HandleProgressCallback(const T *data, size_t count) = 0; - - virtual void Destroy(); -}; -``` - - -### Nan::AsyncQueueWorker - -`Nan::AsyncQueueWorker` will run a `Nan::AsyncWorker` asynchronously via libuv. Both the `execute` and `after_work` steps are taken care of for you. Most of the logic for this is embedded in `Nan::AsyncWorker`. - -Definition: - -```c++ -void AsyncQueueWorker(AsyncWorker *); -``` - -[AsyncResource]: node_misc.md#api_nan_asyncresource diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/buffers.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/buffers.md deleted file mode 100644 index 8d8d25cf..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/buffers.md +++ /dev/null @@ -1,54 +0,0 @@ -## Buffers - -NAN's `node::Buffer` helpers exist as the API has changed across supported Node versions. Use these methods to ensure compatibility. - - - Nan::NewBuffer() - - Nan::CopyBuffer() - - Nan::FreeCallback() - - -### Nan::NewBuffer() - -Allocate a new `node::Buffer` object with the specified size and optional data. Calls `node::Buffer::New()`. - -Note that when creating a `Buffer` using `Nan::NewBuffer()` and an existing `char*`, it is assumed that the ownership of the pointer is being transferred to the new `Buffer` for management. -When a `node::Buffer` instance is garbage collected and a `FreeCallback` has not been specified, `data` will be disposed of via a call to `free()`. -You _must not_ free the memory space manually once you have created a `Buffer` in this way. - -Signature: - -```c++ -Nan::MaybeLocal Nan::NewBuffer(uint32_t size) -Nan::MaybeLocal Nan::NewBuffer(char* data, uint32_t size) -Nan::MaybeLocal Nan::NewBuffer(char *data, - size_t length, - Nan::FreeCallback callback, - void *hint) -``` - - - -### Nan::CopyBuffer() - -Similar to [`Nan::NewBuffer()`](#api_nan_new_buffer) except that an implicit memcpy will occur within Node. Calls `node::Buffer::Copy()`. - -Management of the `char*` is left to the user, you should manually free the memory space if necessary as the new `Buffer` will have its own copy. - -Signature: - -```c++ -Nan::MaybeLocal Nan::CopyBuffer(const char *data, uint32_t size) -``` - - - -### Nan::FreeCallback() - -A free callback that can be provided to [`Nan::NewBuffer()`](#api_nan_new_buffer). -The supplied callback will be invoked when the `Buffer` undergoes garbage collection. - -Signature: - -```c++ -typedef void (*FreeCallback)(char *data, void *hint); -``` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/callback.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/callback.md deleted file mode 100644 index f7af0bfd..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/callback.md +++ /dev/null @@ -1,76 +0,0 @@ -## Nan::Callback - -`Nan::Callback` makes it easier to use `v8::Function` handles as callbacks. A class that wraps a `v8::Function` handle, protecting it from garbage collection and making it particularly useful for storage and use across asynchronous execution. - - - Nan::Callback - - -### Nan::Callback - -```c++ -class Callback { - public: - Callback(); - - explicit Callback(const v8::Local &fn); - - ~Callback(); - - bool operator==(const Callback &other) const; - - bool operator!=(const Callback &other) const; - - v8::Local operator*() const; - - MaybeLocal operator()(AsyncResource* async_resource, - v8::Local target, - int argc = 0, - v8::Local argv[] = 0) const; - - MaybeLocal operator()(AsyncResource* async_resource, - int argc = 0, - v8::Local argv[] = 0) const; - - void SetFunction(const v8::Local &fn); - - v8::Local GetFunction() const; - - bool IsEmpty() const; - - void Reset(const v8::Local &fn); - - void Reset(); - - MaybeLocal Call(v8::Local target, - int argc, - v8::Local argv[], - AsyncResource* async_resource) const; - MaybeLocal Call(int argc, - v8::Local argv[], - AsyncResource* async_resource) const; - - // Deprecated versions. Use the versions that accept an async_resource instead - // as they run the callback in the correct async context as specified by the - // resource. If you want to call a synchronous JS function (i.e. on a - // non-empty JS stack), you can use Nan::Call instead. - v8::Local operator()(v8::Local target, - int argc = 0, - v8::Local argv[] = 0) const; - - v8::Local operator()(int argc = 0, - v8::Local argv[] = 0) const; - v8::Local Call(v8::Local target, - int argc, - v8::Local argv[]) const; - - v8::Local Call(int argc, v8::Local argv[]) const; -}; -``` - -Example usage: - -```c++ -v8::Local function; -Nan::Callback callback(function); -callback.Call(0, 0); -``` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/converters.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/converters.md deleted file mode 100644 index d20861b5..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/converters.md +++ /dev/null @@ -1,41 +0,0 @@ -## Converters - -NAN contains functions that convert `v8::Value`s to other `v8::Value` types and native types. Since type conversion is not guaranteed to succeed, they return `Nan::Maybe` types. These converters can be used in place of `value->ToX()` and `value->XValue()` (where `X` is one of the types, e.g. `Boolean`) in a way that provides a consistent interface across V8 versions. Newer versions of V8 use the new `v8::Maybe` and `v8::MaybeLocal` types for these conversions, older versions don't have this functionality so it is provided by NAN. - - - Nan::To() - - -### Nan::To() - -Converts a `v8::Local` to a different subtype of `v8::Value` or to a native data type. Returns a `Nan::MaybeLocal<>` or a `Nan::Maybe<>` accordingly. - -See [maybe_types.md](./maybe_types.md) for more information on `Nan::Maybe` types. - -Signatures: - -```c++ -// V8 types -Nan::MaybeLocal Nan::To(v8::Local val); -Nan::MaybeLocal Nan::To(v8::Local val); -Nan::MaybeLocal Nan::To(v8::Local val); -Nan::MaybeLocal Nan::To(v8::Local val); -Nan::MaybeLocal Nan::To(v8::Local val); -Nan::MaybeLocal Nan::To(v8::Local val); -Nan::MaybeLocal Nan::To(v8::Local val); - -// Native types -Nan::Maybe Nan::To(v8::Local val); -Nan::Maybe Nan::To(v8::Local val); -Nan::Maybe Nan::To(v8::Local val); -Nan::Maybe Nan::To(v8::Local val); -Nan::Maybe Nan::To(v8::Local val); -``` - -### Example - -```c++ -v8::Local val; -Nan::MaybeLocal str = Nan::To(val); -Nan::Maybe d = Nan::To(val); -``` - diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/errors.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/errors.md deleted file mode 100644 index 843435b2..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/errors.md +++ /dev/null @@ -1,226 +0,0 @@ -## Errors - -NAN includes helpers for creating, throwing and catching Errors as much of this functionality varies across the supported versions of V8 and must be abstracted. - -Note that an Error object is simply a specialized form of `v8::Value`. - -Also consult the V8 Embedders Guide section on [Exceptions](https://developers.google.com/v8/embed#exceptions) for more information. - - - Nan::Error() - - Nan::RangeError() - - Nan::ReferenceError() - - Nan::SyntaxError() - - Nan::TypeError() - - Nan::ThrowError() - - Nan::ThrowRangeError() - - Nan::ThrowReferenceError() - - Nan::ThrowSyntaxError() - - Nan::ThrowTypeError() - - Nan::FatalException() - - Nan::ErrnoException() - - Nan::TryCatch - - - -### Nan::Error() - -Create a new Error object using the [v8::Exception](https://v8docs.nodesource.com/node-8.16/da/d6a/classv8_1_1_exception.html) class in a way that is compatible across the supported versions of V8. - -Note that an Error object is simply a specialized form of `v8::Value`. - -Signature: - -```c++ -v8::Local Nan::Error(const char *msg); -v8::Local Nan::Error(v8::Local msg); -``` - - - -### Nan::RangeError() - -Create a new RangeError object using the [v8::Exception](https://v8docs.nodesource.com/node-8.16/da/d6a/classv8_1_1_exception.html) class in a way that is compatible across the supported versions of V8. - -Note that an RangeError object is simply a specialized form of `v8::Value`. - -Signature: - -```c++ -v8::Local Nan::RangeError(const char *msg); -v8::Local Nan::RangeError(v8::Local msg); -``` - - - -### Nan::ReferenceError() - -Create a new ReferenceError object using the [v8::Exception](https://v8docs.nodesource.com/node-8.16/da/d6a/classv8_1_1_exception.html) class in a way that is compatible across the supported versions of V8. - -Note that an ReferenceError object is simply a specialized form of `v8::Value`. - -Signature: - -```c++ -v8::Local Nan::ReferenceError(const char *msg); -v8::Local Nan::ReferenceError(v8::Local msg); -``` - - - -### Nan::SyntaxError() - -Create a new SyntaxError object using the [v8::Exception](https://v8docs.nodesource.com/node-8.16/da/d6a/classv8_1_1_exception.html) class in a way that is compatible across the supported versions of V8. - -Note that an SyntaxError object is simply a specialized form of `v8::Value`. - -Signature: - -```c++ -v8::Local Nan::SyntaxError(const char *msg); -v8::Local Nan::SyntaxError(v8::Local msg); -``` - - - -### Nan::TypeError() - -Create a new TypeError object using the [v8::Exception](https://v8docs.nodesource.com/node-8.16/da/d6a/classv8_1_1_exception.html) class in a way that is compatible across the supported versions of V8. - -Note that an TypeError object is simply a specialized form of `v8::Value`. - -Signature: - -```c++ -v8::Local Nan::TypeError(const char *msg); -v8::Local Nan::TypeError(v8::Local msg); -``` - - - -### Nan::ThrowError() - -Throw an Error object (a specialized `v8::Value` as above) in the current context. If a `msg` is provided, a new Error object will be created. - -Signature: - -```c++ -void Nan::ThrowError(const char *msg); -void Nan::ThrowError(v8::Local msg); -void Nan::ThrowError(v8::Local error); -``` - - - -### Nan::ThrowRangeError() - -Throw an RangeError object (a specialized `v8::Value` as above) in the current context. If a `msg` is provided, a new RangeError object will be created. - -Signature: - -```c++ -void Nan::ThrowRangeError(const char *msg); -void Nan::ThrowRangeError(v8::Local msg); -void Nan::ThrowRangeError(v8::Local error); -``` - - - -### Nan::ThrowReferenceError() - -Throw an ReferenceError object (a specialized `v8::Value` as above) in the current context. If a `msg` is provided, a new ReferenceError object will be created. - -Signature: - -```c++ -void Nan::ThrowReferenceError(const char *msg); -void Nan::ThrowReferenceError(v8::Local msg); -void Nan::ThrowReferenceError(v8::Local error); -``` - - - -### Nan::ThrowSyntaxError() - -Throw an SyntaxError object (a specialized `v8::Value` as above) in the current context. If a `msg` is provided, a new SyntaxError object will be created. - -Signature: - -```c++ -void Nan::ThrowSyntaxError(const char *msg); -void Nan::ThrowSyntaxError(v8::Local msg); -void Nan::ThrowSyntaxError(v8::Local error); -``` - - - -### Nan::ThrowTypeError() - -Throw an TypeError object (a specialized `v8::Value` as above) in the current context. If a `msg` is provided, a new TypeError object will be created. - -Signature: - -```c++ -void Nan::ThrowTypeError(const char *msg); -void Nan::ThrowTypeError(v8::Local msg); -void Nan::ThrowTypeError(v8::Local error); -``` - - -### Nan::FatalException() - -Replaces `node::FatalException()` which has a different API across supported versions of Node. For use with [`Nan::TryCatch`](#api_nan_try_catch). - -Signature: - -```c++ -void Nan::FatalException(const Nan::TryCatch& try_catch); -``` - - -### Nan::ErrnoException() - -Replaces `node::ErrnoException()` which has a different API across supported versions of Node. - -Signature: - -```c++ -v8::Local Nan::ErrnoException(int errorno, - const char* syscall = NULL, - const char* message = NULL, - const char* path = NULL); -``` - - - -### Nan::TryCatch - -A simple wrapper around [`v8::TryCatch`](https://v8docs.nodesource.com/node-8.16/d4/dc6/classv8_1_1_try_catch.html) compatible with all supported versions of V8. Can be used as a direct replacement in most cases. See also [`Nan::FatalException()`](#api_nan_fatal_exception) for an internal use compatible with `node::FatalException`. - -Signature: - -```c++ -class Nan::TryCatch { - public: - Nan::TryCatch(); - - bool HasCaught() const; - - bool CanContinue() const; - - v8::Local ReThrow(); - - v8::Local Exception() const; - - // Nan::MaybeLocal for older versions of V8 - v8::MaybeLocal StackTrace() const; - - v8::Local Message() const; - - void Reset(); - - void SetVerbose(bool value); - - void SetCaptureMessage(bool value); -}; -``` - diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/json.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/json.md deleted file mode 100644 index 55beb262..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/json.md +++ /dev/null @@ -1,62 +0,0 @@ -## JSON - -The _JSON_ object provides the C++ versions of the methods offered by the `JSON` object in javascript. V8 exposes these methods via the `v8::JSON` object. - - - Nan::JSON.Parse - - Nan::JSON.Stringify - -Refer to the V8 JSON object in the [V8 documentation](https://v8docs.nodesource.com/node-8.16/da/d6f/classv8_1_1_j_s_o_n.html) for more information about these methods and their arguments. - - - -### Nan::JSON.Parse - -A simple wrapper around [`v8::JSON::Parse`](https://v8docs.nodesource.com/node-8.16/da/d6f/classv8_1_1_j_s_o_n.html#a936310d2540fb630ed37d3ee3ffe4504). - -Definition: - -```c++ -Nan::MaybeLocal Nan::JSON::Parse(v8::Local json_string); -``` - -Use `JSON.Parse(json_string)` to parse a string into a `v8::Value`. - -Example: - -```c++ -v8::Local json_string = Nan::New("{ \"JSON\": \"object\" }").ToLocalChecked(); - -Nan::JSON NanJSON; -Nan::MaybeLocal result = NanJSON.Parse(json_string); -if (!result.IsEmpty()) { - v8::Local val = result.ToLocalChecked(); -} -``` - - - -### Nan::JSON.Stringify - -A simple wrapper around [`v8::JSON::Stringify`](https://v8docs.nodesource.com/node-8.16/da/d6f/classv8_1_1_j_s_o_n.html#a44b255c3531489ce43f6110209138860). - -Definition: - -```c++ -Nan::MaybeLocal Nan::JSON::Stringify(v8::Local json_object, v8::Local gap = v8::Local()); -``` - -Use `JSON.Stringify(value)` to stringify a `v8::Object`. - -Example: - -```c++ -// using `v8::Local val` from the `JSON::Parse` example -v8::Local obj = Nan::To(val).ToLocalChecked(); - -Nan::JSON NanJSON; -Nan::MaybeLocal result = NanJSON.Stringify(obj); -if (!result.IsEmpty()) { - v8::Local stringified = result.ToLocalChecked(); -} -``` - diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/maybe_types.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/maybe_types.md deleted file mode 100644 index 142851a1..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/maybe_types.md +++ /dev/null @@ -1,583 +0,0 @@ -## Maybe Types - -The `Nan::MaybeLocal` and `Nan::Maybe` types are monads that encapsulate `v8::Local` handles that _may be empty_. - -* **Maybe Types** - - Nan::MaybeLocal - - Nan::Maybe - - Nan::Nothing - - Nan::Just -* **Maybe Helpers** - - Nan::Call() - - Nan::ToDetailString() - - Nan::ToArrayIndex() - - Nan::Equals() - - Nan::NewInstance() - - Nan::GetFunction() - - Nan::Set() - - Nan::DefineOwnProperty() - - Nan::ForceSet() - - Nan::Get() - - Nan::GetPropertyAttributes() - - Nan::Has() - - Nan::Delete() - - Nan::GetPropertyNames() - - Nan::GetOwnPropertyNames() - - Nan::SetPrototype() - - Nan::ObjectProtoToString() - - Nan::HasOwnProperty() - - Nan::HasRealNamedProperty() - - Nan::HasRealIndexedProperty() - - Nan::HasRealNamedCallbackProperty() - - Nan::GetRealNamedPropertyInPrototypeChain() - - Nan::GetRealNamedProperty() - - Nan::CallAsFunction() - - Nan::CallAsConstructor() - - Nan::GetSourceLine() - - Nan::GetLineNumber() - - Nan::GetStartColumn() - - Nan::GetEndColumn() - - Nan::CloneElementAt() - - Nan::HasPrivate() - - Nan::GetPrivate() - - Nan::SetPrivate() - - Nan::DeletePrivate() - - Nan::MakeMaybe() - - -### Nan::MaybeLocal - -A `Nan::MaybeLocal` is a wrapper around [`v8::Local`](https://v8docs.nodesource.com/node-8.16/de/deb/classv8_1_1_local.html) that enforces a check that determines whether the `v8::Local` is empty before it can be used. - -If an API method returns a `Nan::MaybeLocal`, the API method can potentially fail either because an exception is thrown, or because an exception is pending, e.g. because a previous API call threw an exception that hasn't been caught yet, or because a `v8::TerminateExecution` exception was thrown. In that case, an empty `Nan::MaybeLocal` is returned. - -Definition: - -```c++ -template class Nan::MaybeLocal { - public: - MaybeLocal(); - - template MaybeLocal(v8::Local that); - - bool IsEmpty() const; - - template bool ToLocal(v8::Local *out); - - // Will crash if the MaybeLocal<> is empty. - v8::Local ToLocalChecked(); - - template v8::Local FromMaybe(v8::Local default_value) const; -}; -``` - -See the documentation for [`v8::MaybeLocal`](https://v8docs.nodesource.com/node-8.16/d8/d7d/classv8_1_1_maybe_local.html) for further details. - - -### Nan::Maybe - -A simple `Nan::Maybe` type, representing an object which may or may not have a value, see https://hackage.haskell.org/package/base/docs/Data-Maybe.html. - -If an API method returns a `Nan::Maybe<>`, the API method can potentially fail either because an exception is thrown, or because an exception is pending, e.g. because a previous API call threw an exception that hasn't been caught yet, or because a `v8::TerminateExecution` exception was thrown. In that case, a "Nothing" value is returned. - -Definition: - -```c++ -template class Nan::Maybe { - public: - bool IsNothing() const; - bool IsJust() const; - - // Will crash if the Maybe<> is nothing. - T FromJust(); - - T FromMaybe(const T& default_value); - - bool operator==(const Maybe &other); - - bool operator!=(const Maybe &other); -}; -``` - -See the documentation for [`v8::Maybe`](https://v8docs.nodesource.com/node-8.16/d9/d4b/classv8_1_1_maybe.html) for further details. - - -### Nan::Nothing - -Construct an empty `Nan::Maybe` type representing _nothing_. - -```c++ -template Nan::Maybe Nan::Nothing(); -``` - - -### Nan::Just - -Construct a `Nan::Maybe` type representing _just_ a value. - -```c++ -template Nan::Maybe Nan::Just(const T &t); -``` - - -### Nan::Call() - -A helper method for calling a synchronous [`v8::Function#Call()`](https://v8docs.nodesource.com/node-8.16/d5/d54/classv8_1_1_function.html#a9c3d0e4e13ddd7721fce238aa5b94a11) in a way compatible across supported versions of V8. - -For asynchronous callbacks, use Nan::Callback::Call along with an AsyncResource. - -Signature: - -```c++ -Nan::MaybeLocal Nan::Call(v8::Local fun, v8::Local recv, int argc, v8::Local argv[]); -Nan::MaybeLocal Nan::Call(const Nan::Callback& callback, v8::Local recv, - int argc, v8::Local argv[]); -Nan::MaybeLocal Nan::Call(const Nan::Callback& callback, int argc, v8::Local argv[]); -``` - - - -### Nan::ToDetailString() - -A helper method for calling [`v8::Value#ToDetailString()`](https://v8docs.nodesource.com/node-8.16/dc/d0a/classv8_1_1_value.html#a2f9770296dc2c8d274bc8cc0dca243e5) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::ToDetailString(v8::Local val); -``` - - - -### Nan::ToArrayIndex() - -A helper method for calling [`v8::Value#ToArrayIndex()`](https://v8docs.nodesource.com/node-8.16/dc/d0a/classv8_1_1_value.html#acc5bbef3c805ec458470c0fcd6f13493) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::ToArrayIndex(v8::Local val); -``` - - - -### Nan::Equals() - -A helper method for calling [`v8::Value#Equals()`](https://v8docs.nodesource.com/node-8.16/dc/d0a/classv8_1_1_value.html#a08fba1d776a59bbf6864b25f9152c64b) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::Equals(v8::Local a, v8::Local(b)); -``` - - - -### Nan::NewInstance() - -A helper method for calling [`v8::Function#NewInstance()`](https://v8docs.nodesource.com/node-8.16/d5/d54/classv8_1_1_function.html#ae477558b10c14b76ed00e8dbab44ce5b) and [`v8::ObjectTemplate#NewInstance()`](https://v8docs.nodesource.com/node-8.16/db/d5f/classv8_1_1_object_template.html#ad605a7543cfbc5dab54cdb0883d14ae4) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::NewInstance(v8::Local h); -Nan::MaybeLocal Nan::NewInstance(v8::Local h, int argc, v8::Local argv[]); -Nan::MaybeLocal Nan::NewInstance(v8::Local h); -``` - - - -### Nan::GetFunction() - -A helper method for calling [`v8::FunctionTemplate#GetFunction()`](https://v8docs.nodesource.com/node-8.16/d8/d83/classv8_1_1_function_template.html#a56d904662a86eca78da37d9bb0ed3705) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::GetFunction(v8::Local t); -``` - - - -### Nan::Set() - -A helper method for calling [`v8::Object#Set()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a67604ea3734f170c66026064ea808f20) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::Set(v8::Local obj, - v8::Local key, - v8::Local value) -Nan::Maybe Nan::Set(v8::Local obj, - uint32_t index, - v8::Local value); -``` - - - -### Nan::DefineOwnProperty() - -A helper method for calling [`v8::Object#DefineOwnProperty()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a6f76b2ed605cb8f9185b92de0033a820) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::DefineOwnProperty(v8::Local obj, - v8::Local key, - v8::Local value, - v8::PropertyAttribute attribs = v8::None); -``` - - - -### Nan::ForceSet() - -Deprecated, use Nan::DefineOwnProperty(). - -A helper method for calling [`v8::Object#ForceSet()`](https://v8docs.nodesource.com/node-0.12/db/d85/classv8_1_1_object.html#acfbdfd7427b516ebdb5c47c4df5ed96c) in a way compatible across supported versions of V8. - -Signature: - -```c++ -NAN_DEPRECATED Nan::Maybe Nan::ForceSet(v8::Local obj, - v8::Local key, - v8::Local value, - v8::PropertyAttribute attribs = v8::None); -``` - - - -### Nan::Get() - -A helper method for calling [`v8::Object#Get()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a2565f03e736694f6b1e1cf22a0b4eac2) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::Get(v8::Local obj, - v8::Local key); -Nan::MaybeLocal Nan::Get(v8::Local obj, uint32_t index); -``` - - - -### Nan::GetPropertyAttributes() - -A helper method for calling [`v8::Object#GetPropertyAttributes()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a9b898894da3d1db2714fd9325a54fe57) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::GetPropertyAttributes( - v8::Local obj, - v8::Local key); -``` - - - -### Nan::Has() - -A helper method for calling [`v8::Object#Has()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#ab3c3d89ea7c2f9afd08965bd7299a41d) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::Has(v8::Local obj, v8::Local key); -Nan::Maybe Nan::Has(v8::Local obj, uint32_t index); -``` - - - -### Nan::Delete() - -A helper method for calling [`v8::Object#Delete()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a48e4a19b2cedff867eecc73ddb7d377f) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::Delete(v8::Local obj, - v8::Local key); -Nan::Maybe Nan::Delete(v8::Local obj, uint32_t index); -``` - - - -### Nan::GetPropertyNames() - -A helper method for calling [`v8::Object#GetPropertyNames()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#aced885270cfd2c956367b5eedc7fbfe8) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::GetPropertyNames(v8::Local obj); -``` - - - -### Nan::GetOwnPropertyNames() - -A helper method for calling [`v8::Object#GetOwnPropertyNames()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a79a6e4d66049b9aa648ed4dfdb23e6eb) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::GetOwnPropertyNames(v8::Local obj); -``` - - - -### Nan::SetPrototype() - -A helper method for calling [`v8::Object#SetPrototype()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a442706b22fceda6e6d1f632122a9a9f4) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::SetPrototype(v8::Local obj, - v8::Local prototype); -``` - - - -### Nan::ObjectProtoToString() - -A helper method for calling [`v8::Object#ObjectProtoToString()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#ab7a92b4dcf822bef72f6c0ac6fea1f0b) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::ObjectProtoToString(v8::Local obj); -``` - - - -### Nan::HasOwnProperty() - -A helper method for calling [`v8::Object#HasOwnProperty()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#ab7b7245442ca6de1e1c145ea3fd653ff) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::HasOwnProperty(v8::Local obj, - v8::Local key); -``` - - - -### Nan::HasRealNamedProperty() - -A helper method for calling [`v8::Object#HasRealNamedProperty()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#ad8b80a59c9eb3c1e6c3cd6c84571f767) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::HasRealNamedProperty(v8::Local obj, - v8::Local key); -``` - - - -### Nan::HasRealIndexedProperty() - -A helper method for calling [`v8::Object#HasRealIndexedProperty()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#af94fc1135a5e74a2193fb72c3a1b9855) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::HasRealIndexedProperty(v8::Local obj, - uint32_t index); -``` - - - -### Nan::HasRealNamedCallbackProperty() - -A helper method for calling [`v8::Object#HasRealNamedCallbackProperty()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#af743b7ea132b89f84d34d164d0668811) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::HasRealNamedCallbackProperty( - v8::Local obj, - v8::Local key); -``` - - - -### Nan::GetRealNamedPropertyInPrototypeChain() - -A helper method for calling [`v8::Object#GetRealNamedPropertyInPrototypeChain()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a8700b1862e6b4783716964ba4d5e6172) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::GetRealNamedPropertyInPrototypeChain( - v8::Local obj, - v8::Local key); -``` - - - -### Nan::GetRealNamedProperty() - -A helper method for calling [`v8::Object#GetRealNamedProperty()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a84471a824576a5994fdd0ffcbf99ccc0) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::GetRealNamedProperty(v8::Local obj, - v8::Local key); -``` - - - -### Nan::CallAsFunction() - -A helper method for calling [`v8::Object#CallAsFunction()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#ad3ffc36f3dfc3592ce2a96bc047ee2cd) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::CallAsFunction(v8::Local obj, - v8::Local recv, - int argc, - v8::Local argv[]); -``` - - - -### Nan::CallAsConstructor() - -A helper method for calling [`v8::Object#CallAsConstructor()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a50d571de50d0b0dfb28795619d07a01b) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::CallAsConstructor(v8::Local obj, - int argc, - v8::Local argv[]); -``` - - - -### Nan::GetSourceLine() - -A helper method for calling [`v8::Message#GetSourceLine()`](https://v8docs.nodesource.com/node-8.16/d9/d28/classv8_1_1_message.html#a849f7a6c41549d83d8159825efccd23a) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::GetSourceLine(v8::Local msg); -``` - - - -### Nan::GetLineNumber() - -A helper method for calling [`v8::Message#GetLineNumber()`](https://v8docs.nodesource.com/node-8.16/d9/d28/classv8_1_1_message.html#adbe46c10a88a6565f2732a2d2adf99b9) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::GetLineNumber(v8::Local msg); -``` - - - -### Nan::GetStartColumn() - -A helper method for calling [`v8::Message#GetStartColumn()`](https://v8docs.nodesource.com/node-8.16/d9/d28/classv8_1_1_message.html#a60ede616ba3822d712e44c7a74487ba6) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::GetStartColumn(v8::Local msg); -``` - - - -### Nan::GetEndColumn() - -A helper method for calling [`v8::Message#GetEndColumn()`](https://v8docs.nodesource.com/node-8.16/d9/d28/classv8_1_1_message.html#aaa004cf19e529da980bc19fcb76d93be) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::GetEndColumn(v8::Local msg); -``` - - - -### Nan::CloneElementAt() - -A helper method for calling [`v8::Array#CloneElementAt()`](https://v8docs.nodesource.com/node-4.8/d3/d32/classv8_1_1_array.html#a1d3a878d4c1c7cae974dd50a1639245e) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::CloneElementAt(v8::Local array, uint32_t index); -``` - - -### Nan::HasPrivate() - -A helper method for calling [`v8::Object#HasPrivate()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#af68a0b98066cfdeb8f943e98a40ba08d) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::HasPrivate(v8::Local object, v8::Local key); -``` - - -### Nan::GetPrivate() - -A helper method for calling [`v8::Object#GetPrivate()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a169f2da506acbec34deadd9149a1925a) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::GetPrivate(v8::Local object, v8::Local key); -``` - - -### Nan::SetPrivate() - -A helper method for calling [`v8::Object#SetPrivate()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#ace1769b0f3b86bfe9fda1010916360ee) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::SetPrivate(v8::Local object, v8::Local key, v8::Local value); -``` - - -### Nan::DeletePrivate() - -A helper method for calling [`v8::Object#DeletePrivate()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a138bb32a304f3982be02ad499693b8fd) in a way compatible across supported versions of V8. - -Signature: - -```c++ -Nan::Maybe Nan::DeletePrivate(v8::Local object, v8::Local key); -``` - - -### Nan::MakeMaybe() - -Wraps a `v8::Local<>` in a `Nan::MaybeLocal<>`. When called with a `Nan::MaybeLocal<>` it just returns its argument. This is useful in generic template code that builds on NAN. - -Synopsis: - -```c++ - MaybeLocal someNumber = MakeMaybe(New(3.141592654)); - MaybeLocal someString = MakeMaybe(New("probably")); -``` - -Signature: - -```c++ -template class MaybeMaybe> -Nan::MaybeLocal Nan::MakeMaybe(MaybeMaybe v); -``` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/methods.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/methods.md deleted file mode 100644 index 9642d027..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/methods.md +++ /dev/null @@ -1,664 +0,0 @@ -## JavaScript-accessible methods - -A _template_ is a blueprint for JavaScript functions and objects in a context. You can use a template to wrap C++ functions and data structures within JavaScript objects so that they can be manipulated from JavaScript. See the V8 Embedders Guide section on [Templates](https://github.com/v8/v8/wiki/Embedder%27s-Guide#templates) for further information. - -In order to expose functionality to JavaScript via a template, you must provide it to V8 in a form that it understands. Across the versions of V8 supported by NAN, JavaScript-accessible method signatures vary widely, NAN fully abstracts method declaration and provides you with an interface that is similar to the most recent V8 API but is backward-compatible with older versions that still use the now-deceased `v8::Argument` type. - -* **Method argument types** - - Nan::FunctionCallbackInfo - - Nan::PropertyCallbackInfo - - Nan::ReturnValue -* **Method declarations** - - Method declaration - - Getter declaration - - Setter declaration - - Property getter declaration - - Property setter declaration - - Property enumerator declaration - - Property deleter declaration - - Property query declaration - - Index getter declaration - - Index setter declaration - - Index enumerator declaration - - Index deleter declaration - - Index query declaration -* Method and template helpers - - Nan::SetMethod() - - Nan::SetPrototypeMethod() - - Nan::SetAccessor() - - Nan::SetNamedPropertyHandler() - - Nan::SetIndexedPropertyHandler() - - Nan::SetTemplate() - - Nan::SetPrototypeTemplate() - - Nan::SetInstanceTemplate() - - Nan::SetCallHandler() - - Nan::SetCallAsFunctionHandler() - - -### Nan::FunctionCallbackInfo - -`Nan::FunctionCallbackInfo` should be used in place of [`v8::FunctionCallbackInfo`](https://v8docs.nodesource.com/node-8.16/dd/d0d/classv8_1_1_function_callback_info.html), even with older versions of Node where `v8::FunctionCallbackInfo` does not exist. - -Definition: - -```c++ -template class FunctionCallbackInfo { - public: - ReturnValue GetReturnValue() const; - v8::Local Callee(); // NOTE: Not available in NodeJS >= 10.0.0 - v8::Local Data(); - v8::Local Holder(); - bool IsConstructCall(); - int Length() const; - v8::Local operator[](int i) const; - v8::Local This() const; - v8::Isolate *GetIsolate() const; -}; -``` - -See the [`v8::FunctionCallbackInfo`](https://v8docs.nodesource.com/node-8.16/dd/d0d/classv8_1_1_function_callback_info.html) documentation for usage details on these. See [`Nan::ReturnValue`](#api_nan_return_value) for further information on how to set a return value from methods. - -**Note:** `FunctionCallbackInfo::Callee` is removed in Node.js after `10.0.0` because it is was deprecated in V8. Consider using `info.Data()` to pass any information you need. - - -### Nan::PropertyCallbackInfo - -`Nan::PropertyCallbackInfo` should be used in place of [`v8::PropertyCallbackInfo`](https://v8docs.nodesource.com/node-8.16/d7/dc5/classv8_1_1_property_callback_info.html), even with older versions of Node where `v8::PropertyCallbackInfo` does not exist. - -Definition: - -```c++ -template class PropertyCallbackInfo : public PropertyCallbackInfoBase { - public: - ReturnValue GetReturnValue() const; - v8::Isolate* GetIsolate() const; - v8::Local Data() const; - v8::Local This() const; - v8::Local Holder() const; -}; -``` - -See the [`v8::PropertyCallbackInfo`](https://v8docs.nodesource.com/node-8.16/d7/dc5/classv8_1_1_property_callback_info.html) documentation for usage details on these. See [`Nan::ReturnValue`](#api_nan_return_value) for further information on how to set a return value from property accessor methods. - - -### Nan::ReturnValue - -`Nan::ReturnValue` is used in place of [`v8::ReturnValue`](https://v8docs.nodesource.com/node-8.16/da/da7/classv8_1_1_return_value.html) on both [`Nan::FunctionCallbackInfo`](#api_nan_function_callback_info) and [`Nan::PropertyCallbackInfo`](#api_nan_property_callback_info) as the return type of `GetReturnValue()`. - -Example usage: - -```c++ -void EmptyArray(const Nan::FunctionCallbackInfo& info) { - info.GetReturnValue().Set(Nan::New()); -} -``` - -Definition: - -```c++ -template class ReturnValue { - public: - // Handle setters - template void Set(const v8::Local &handle); - template void Set(const Nan::Global &handle); - - // Fast primitive setters - void Set(bool value); - void Set(double i); - void Set(int32_t i); - void Set(uint32_t i); - - // Fast JS primitive setters - void SetNull(); - void SetUndefined(); - void SetEmptyString(); - - // Convenience getter for isolate - v8::Isolate *GetIsolate() const; -}; -``` - -See the documentation on [`v8::ReturnValue`](https://v8docs.nodesource.com/node-8.16/da/da7/classv8_1_1_return_value.html) for further information on this. - - -### Method declaration - -JavaScript-accessible methods should be declared with the following signature to form a `Nan::FunctionCallback`: - -```c++ -typedef void(*FunctionCallback)(const FunctionCallbackInfo&); -``` - -Example: - -```c++ -void MethodName(const Nan::FunctionCallbackInfo& info) { - ... -} -``` - -You do not need to declare a new `HandleScope` within a method as one is implicitly created for you. - -**Example usage** - -```c++ -// .h: -class Foo : public Nan::ObjectWrap { - ... - - static void Bar(const Nan::FunctionCallbackInfo& info); - static void Baz(const Nan::FunctionCallbackInfo& info); -} - - -// .cc: -void Foo::Bar(const Nan::FunctionCallbackInfo& info) { - ... -} - -void Foo::Baz(const Nan::FunctionCallbackInfo& info) { - ... -} -``` - -A helper macro `NAN_METHOD(methodname)` exists, compatible with NAN v1 method declarations. - -**Example usage with `NAN_METHOD(methodname)`** - -```c++ -// .h: -class Foo : public Nan::ObjectWrap { - ... - - static NAN_METHOD(Bar); - static NAN_METHOD(Baz); -} - - -// .cc: -NAN_METHOD(Foo::Bar) { - ... -} - -NAN_METHOD(Foo::Baz) { - ... -} -``` - -Use [`Nan::SetPrototypeMethod`](#api_nan_set_prototype_method) to attach a method to a JavaScript function prototype or [`Nan::SetMethod`](#api_nan_set_method) to attach a method directly on a JavaScript object. - - -### Getter declaration - -JavaScript-accessible getters should be declared with the following signature to form a `Nan::GetterCallback`: - -```c++ -typedef void(*GetterCallback)(v8::Local, - const PropertyCallbackInfo&); -``` - -Example: - -```c++ -void GetterName(v8::Local property, - const Nan::PropertyCallbackInfo& info) { - ... -} -``` - -You do not need to declare a new `HandleScope` within a getter as one is implicitly created for you. - -A helper macro `NAN_GETTER(methodname)` exists, compatible with NAN v1 method declarations. - -Also see the V8 Embedders Guide documentation on [Accessors](https://developers.google.com/v8/embed#accesssors). - - -### Setter declaration - -JavaScript-accessible setters should be declared with the following signature to form a Nan::SetterCallback: - -```c++ -typedef void(*SetterCallback)(v8::Local, - v8::Local, - const PropertyCallbackInfo&); -``` - -Example: - -```c++ -void SetterName(v8::Local property, - v8::Local value, - const Nan::PropertyCallbackInfo& info) { - ... -} -``` - -You do not need to declare a new `HandleScope` within a setter as one is implicitly created for you. - -A helper macro `NAN_SETTER(methodname)` exists, compatible with NAN v1 method declarations. - -Also see the V8 Embedders Guide documentation on [Accessors](https://developers.google.com/v8/embed#accesssors). - - -### Property getter declaration - -JavaScript-accessible property getters should be declared with the following signature to form a Nan::PropertyGetterCallback: - -```c++ -typedef void(*PropertyGetterCallback)(v8::Local, - const PropertyCallbackInfo&); -``` - -Example: - -```c++ -void PropertyGetterName(v8::Local property, - const Nan::PropertyCallbackInfo& info) { - ... -} -``` - -You do not need to declare a new `HandleScope` within a property getter as one is implicitly created for you. - -A helper macro `NAN_PROPERTY_GETTER(methodname)` exists, compatible with NAN v1 method declarations. - -Also see the V8 Embedders Guide documentation on named property [Interceptors](https://developers.google.com/v8/embed#interceptors). - - -### Property setter declaration - -JavaScript-accessible property setters should be declared with the following signature to form a Nan::PropertySetterCallback: - -```c++ -typedef void(*PropertySetterCallback)(v8::Local, - v8::Local, - const PropertyCallbackInfo&); -``` - -Example: - -```c++ -void PropertySetterName(v8::Local property, - v8::Local value, - const Nan::PropertyCallbackInfo& info); -``` - -You do not need to declare a new `HandleScope` within a property setter as one is implicitly created for you. - -A helper macro `NAN_PROPERTY_SETTER(methodname)` exists, compatible with NAN v1 method declarations. - -Also see the V8 Embedders Guide documentation on named property [Interceptors](https://developers.google.com/v8/embed#interceptors). - - -### Property enumerator declaration - -JavaScript-accessible property enumerators should be declared with the following signature to form a Nan::PropertyEnumeratorCallback: - -```c++ -typedef void(*PropertyEnumeratorCallback)(const PropertyCallbackInfo&); -``` - -Example: - -```c++ -void PropertyEnumeratorName(const Nan::PropertyCallbackInfo& info); -``` - -You do not need to declare a new `HandleScope` within a property enumerator as one is implicitly created for you. - -A helper macro `NAN_PROPERTY_ENUMERATOR(methodname)` exists, compatible with NAN v1 method declarations. - -Also see the V8 Embedders Guide documentation on named property [Interceptors](https://developers.google.com/v8/embed#interceptors). - - -### Property deleter declaration - -JavaScript-accessible property deleters should be declared with the following signature to form a Nan::PropertyDeleterCallback: - -```c++ -typedef void(*PropertyDeleterCallback)(v8::Local, - const PropertyCallbackInfo&); -``` - -Example: - -```c++ -void PropertyDeleterName(v8::Local property, - const Nan::PropertyCallbackInfo& info); -``` - -You do not need to declare a new `HandleScope` within a property deleter as one is implicitly created for you. - -A helper macro `NAN_PROPERTY_DELETER(methodname)` exists, compatible with NAN v1 method declarations. - -Also see the V8 Embedders Guide documentation on named property [Interceptors](https://developers.google.com/v8/embed#interceptors). - - -### Property query declaration - -JavaScript-accessible property query methods should be declared with the following signature to form a Nan::PropertyQueryCallback: - -```c++ -typedef void(*PropertyQueryCallback)(v8::Local, - const PropertyCallbackInfo&); -``` - -Example: - -```c++ -void PropertyQueryName(v8::Local property, - const Nan::PropertyCallbackInfo& info); -``` - -You do not need to declare a new `HandleScope` within a property query method as one is implicitly created for you. - -A helper macro `NAN_PROPERTY_QUERY(methodname)` exists, compatible with NAN v1 method declarations. - -Also see the V8 Embedders Guide documentation on named property [Interceptors](https://developers.google.com/v8/embed#interceptors). - - -### Index getter declaration - -JavaScript-accessible index getter methods should be declared with the following signature to form a Nan::IndexGetterCallback: - -```c++ -typedef void(*IndexGetterCallback)(uint32_t, - const PropertyCallbackInfo&); -``` - -Example: - -```c++ -void IndexGetterName(uint32_t index, const PropertyCallbackInfo& info); -``` - -You do not need to declare a new `HandleScope` within a index getter as one is implicitly created for you. - -A helper macro `NAN_INDEX_GETTER(methodname)` exists, compatible with NAN v1 method declarations. - -Also see the V8 Embedders Guide documentation on indexed property [Interceptors](https://developers.google.com/v8/embed#interceptors). - - -### Index setter declaration - -JavaScript-accessible index setter methods should be declared with the following signature to form a Nan::IndexSetterCallback: - -```c++ -typedef void(*IndexSetterCallback)(uint32_t, - v8::Local, - const PropertyCallbackInfo&); -``` - -Example: - -```c++ -void IndexSetterName(uint32_t index, - v8::Local value, - const PropertyCallbackInfo& info); -``` - -You do not need to declare a new `HandleScope` within a index setter as one is implicitly created for you. - -A helper macro `NAN_INDEX_SETTER(methodname)` exists, compatible with NAN v1 method declarations. - -Also see the V8 Embedders Guide documentation on indexed property [Interceptors](https://developers.google.com/v8/embed#interceptors). - - -### Index enumerator declaration - -JavaScript-accessible index enumerator methods should be declared with the following signature to form a Nan::IndexEnumeratorCallback: - -```c++ -typedef void(*IndexEnumeratorCallback)(const PropertyCallbackInfo&); -``` - -Example: - -```c++ -void IndexEnumeratorName(const PropertyCallbackInfo& info); -``` - -You do not need to declare a new `HandleScope` within a index enumerator as one is implicitly created for you. - -A helper macro `NAN_INDEX_ENUMERATOR(methodname)` exists, compatible with NAN v1 method declarations. - -Also see the V8 Embedders Guide documentation on indexed property [Interceptors](https://developers.google.com/v8/embed#interceptors). - - -### Index deleter declaration - -JavaScript-accessible index deleter methods should be declared with the following signature to form a Nan::IndexDeleterCallback: - -```c++ -typedef void(*IndexDeleterCallback)(uint32_t, - const PropertyCallbackInfo&); -``` - -Example: - -```c++ -void IndexDeleterName(uint32_t index, const PropertyCallbackInfo& info); -``` - -You do not need to declare a new `HandleScope` within a index deleter as one is implicitly created for you. - -A helper macro `NAN_INDEX_DELETER(methodname)` exists, compatible with NAN v1 method declarations. - -Also see the V8 Embedders Guide documentation on indexed property [Interceptors](https://developers.google.com/v8/embed#interceptors). - - -### Index query declaration - -JavaScript-accessible index query methods should be declared with the following signature to form a Nan::IndexQueryCallback: - -```c++ -typedef void(*IndexQueryCallback)(uint32_t, - const PropertyCallbackInfo&); -``` - -Example: - -```c++ -void IndexQueryName(uint32_t index, const PropertyCallbackInfo& info); -``` - -You do not need to declare a new `HandleScope` within a index query method as one is implicitly created for you. - -A helper macro `NAN_INDEX_QUERY(methodname)` exists, compatible with NAN v1 method declarations. - -Also see the V8 Embedders Guide documentation on indexed property [Interceptors](https://developers.google.com/v8/embed#interceptors). - - -### Nan::SetMethod() - -Sets a method with a given name directly on a JavaScript object where the method has the `Nan::FunctionCallback` signature (see Method declaration). - -Signature: - -```c++ -void Nan::SetMethod(v8::Local recv, - const char *name, - Nan::FunctionCallback callback, - v8::Local data = v8::Local()) -void Nan::SetMethod(v8::Local templ, - const char *name, - Nan::FunctionCallback callback, - v8::Local data = v8::Local()) -``` - - -### Nan::SetPrototypeMethod() - -Sets a method with a given name on a `FunctionTemplate`'s prototype where the method has the `Nan::FunctionCallback` signature (see Method declaration). - -Signature: - -```c++ -void Nan::SetPrototypeMethod(v8::Local recv, - const char* name, - Nan::FunctionCallback callback, - v8::Local data = v8::Local()) -``` - - -### Nan::SetAccessor() - -Sets getters and setters for a property with a given name on an `ObjectTemplate` or a plain `Object`. Accepts getters with the `Nan::GetterCallback` signature (see Getter declaration) and setters with the `Nan::SetterCallback` signature (see Setter declaration). - -Signature: - -```c++ -void SetAccessor(v8::Local tpl, - v8::Local name, - Nan::GetterCallback getter, - Nan::SetterCallback setter = 0, - v8::Local data = v8::Local(), - v8::AccessControl settings = v8::DEFAULT, - v8::PropertyAttribute attribute = v8::None, - imp::Sig signature = imp::Sig()); -bool SetAccessor(v8::Local obj, - v8::Local name, - Nan::GetterCallback getter, - Nan::SetterCallback setter = 0, - v8::Local data = v8::Local(), - v8::AccessControl settings = v8::DEFAULT, - v8::PropertyAttribute attribute = v8::None) -``` - -See the V8 [`ObjectTemplate#SetAccessor()`](https://v8docs.nodesource.com/node-8.16/db/d5f/classv8_1_1_object_template.html#aca0ed196f8a9adb1f68b1aadb6c9cd77) and [`Object#SetAccessor()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#ae91b3b56b357f285288c89fbddc46d1b) for further information about how to use `Nan::SetAccessor()`. - - -### Nan::SetNamedPropertyHandler() - -Sets named property getters, setters, query, deleter and enumerator methods on an `ObjectTemplate`. Accepts: - -* Property getters with the `Nan::PropertyGetterCallback` signature (see Property getter declaration) -* Property setters with the `Nan::PropertySetterCallback` signature (see Property setter declaration) -* Property query methods with the `Nan::PropertyQueryCallback` signature (see Property query declaration) -* Property deleters with the `Nan::PropertyDeleterCallback` signature (see Property deleter declaration) -* Property enumerators with the `Nan::PropertyEnumeratorCallback` signature (see Property enumerator declaration) - -Signature: - -```c++ -void SetNamedPropertyHandler(v8::Local tpl, - Nan::PropertyGetterCallback getter, - Nan::PropertySetterCallback setter = 0, - Nan::PropertyQueryCallback query = 0, - Nan::PropertyDeleterCallback deleter = 0, - Nan::PropertyEnumeratorCallback enumerator = 0, - v8::Local data = v8::Local()) -``` - -See the V8 [`ObjectTemplate#SetNamedPropertyHandler()`](https://v8docs.nodesource.com/node-8.16/db/d5f/classv8_1_1_object_template.html#a33b3ebd7de641f6cc6414b7de01fc1c7) for further information about how to use `Nan::SetNamedPropertyHandler()`. - - -### Nan::SetIndexedPropertyHandler() - -Sets indexed property getters, setters, query, deleter and enumerator methods on an `ObjectTemplate`. Accepts: - -* Indexed property getters with the `Nan::IndexGetterCallback` signature (see Index getter declaration) -* Indexed property setters with the `Nan::IndexSetterCallback` signature (see Index setter declaration) -* Indexed property query methods with the `Nan::IndexQueryCallback` signature (see Index query declaration) -* Indexed property deleters with the `Nan::IndexDeleterCallback` signature (see Index deleter declaration) -* Indexed property enumerators with the `Nan::IndexEnumeratorCallback` signature (see Index enumerator declaration) - -Signature: - -```c++ -void SetIndexedPropertyHandler(v8::Local tpl, - Nan::IndexGetterCallback getter, - Nan::IndexSetterCallback setter = 0, - Nan::IndexQueryCallback query = 0, - Nan::IndexDeleterCallback deleter = 0, - Nan::IndexEnumeratorCallback enumerator = 0, - v8::Local data = v8::Local()) -``` - -See the V8 [`ObjectTemplate#SetIndexedPropertyHandler()`](https://v8docs.nodesource.com/node-8.16/db/d5f/classv8_1_1_object_template.html#ac89f06d634add0e890452033f7d17ff1) for further information about how to use `Nan::SetIndexedPropertyHandler()`. - - -### Nan::SetTemplate() - -Adds properties on an `Object`'s or `Function`'s template. - -Signature: - -```c++ -void Nan::SetTemplate(v8::Local templ, - const char *name, - v8::Local value); -void Nan::SetTemplate(v8::Local templ, - v8::Local name, - v8::Local value, - v8::PropertyAttribute attributes) -``` - -Calls the `Template`'s [`Set()`](https://v8docs.nodesource.com/node-8.16/db/df7/classv8_1_1_template.html#ae3fbaff137557aa6a0233bc7e52214ac). - - -### Nan::SetPrototypeTemplate() - -Adds properties on an `Object`'s or `Function`'s prototype template. - -Signature: - -```c++ -void Nan::SetPrototypeTemplate(v8::Local templ, - const char *name, - v8::Local value); -void Nan::SetPrototypeTemplate(v8::Local templ, - v8::Local name, - v8::Local value, - v8::PropertyAttribute attributes) -``` - -Calls the `FunctionTemplate`'s _PrototypeTemplate's_ [`Set()`](https://v8docs.nodesource.com/node-8.16/db/df7/classv8_1_1_template.html#a2db6a56597bf23c59659c0659e564ddf). - - -### Nan::SetInstanceTemplate() - -Use to add instance properties on `FunctionTemplate`'s. - -Signature: - -```c++ -void Nan::SetInstanceTemplate(v8::Local templ, - const char *name, - v8::Local value); -void Nan::SetInstanceTemplate(v8::Local templ, - v8::Local name, - v8::Local value, - v8::PropertyAttribute attributes) -``` - -Calls the `FunctionTemplate`'s _InstanceTemplate's_ [`Set()`](https://v8docs.nodesource.com/node-8.16/db/df7/classv8_1_1_template.html#a2db6a56597bf23c59659c0659e564ddf). - - -### Nan::SetCallHandler() - -Set the call-handler callback for a `v8::FunctionTemplate`. -This callback is called whenever the function created from this FunctionTemplate is called. - -Signature: - -```c++ -void Nan::SetCallHandler(v8::Local templ, Nan::FunctionCallback callback, v8::Local data = v8::Local()) -``` - -Calls the `FunctionTemplate`'s [`SetCallHandler()`](https://v8docs.nodesource.com/node-8.16/d8/d83/classv8_1_1_function_template.html#ab7574b298db3c27fbc2ed465c08ea2f8). - - -### Nan::SetCallAsFunctionHandler() - -Sets the callback to be used when calling instances created from the `v8::ObjectTemplate` as a function. -If no callback is set, instances behave like normal JavaScript objects that cannot be called as a function. - -Signature: - -```c++ -void Nan::SetCallAsFunctionHandler(v8::Local templ, Nan::FunctionCallback callback, v8::Local data = v8::Local()) -``` - -Calls the `ObjectTemplate`'s [`SetCallAsFunctionHandler()`](https://v8docs.nodesource.com/node-8.16/db/d5f/classv8_1_1_object_template.html#a5e9612fc80bf6db8f2da199b9b0bd04e). - diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/new.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/new.md deleted file mode 100644 index 0f28a0e9..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/new.md +++ /dev/null @@ -1,147 +0,0 @@ -## New - -NAN provides a `Nan::New()` helper for the creation of new JavaScript objects in a way that's compatible across the supported versions of V8. - - - Nan::New() - - Nan::Undefined() - - Nan::Null() - - Nan::True() - - Nan::False() - - Nan::EmptyString() - - - -### Nan::New() - -`Nan::New()` should be used to instantiate new JavaScript objects. - -Refer to the specific V8 type in the [V8 documentation](https://v8docs.nodesource.com/node-8.16/d1/d83/classv8_1_1_data.html) for information on the types of arguments required for instantiation. - -Signatures: - -Return types are mostly omitted from the signatures for simplicity. In most cases the type will be contained within a `v8::Local`. The following types will be contained within a `Nan::MaybeLocal`: `v8::String`, `v8::Date`, `v8::RegExp`, `v8::Script`, `v8::UnboundScript`. - -Empty objects: - -```c++ -Nan::New(); -``` - -Generic single and multiple-argument: - -```c++ -Nan::New(A0 arg0); -Nan::New(A0 arg0, A1 arg1); -Nan::New(A0 arg0, A1 arg1, A2 arg2); -Nan::New(A0 arg0, A1 arg1, A2 arg2, A3 arg3); -``` - -For creating `v8::FunctionTemplate` and `v8::Function` objects: - -_The definition of `Nan::FunctionCallback` can be found in the [Method declaration](./methods.md#api_nan_method) documentation._ - -```c++ -Nan::New(Nan::FunctionCallback callback, - v8::Local data = v8::Local()); -Nan::New(Nan::FunctionCallback callback, - v8::Local data = v8::Local(), - A2 a2 = A2()); -``` - -Native number types: - -```c++ -v8::Local Nan::New(bool value); -v8::Local Nan::New(int32_t value); -v8::Local Nan::New(uint32_t value); -v8::Local Nan::New(double value); -``` - -String types: - -```c++ -Nan::MaybeLocal Nan::New(std::string const& value); -Nan::MaybeLocal Nan::New(const char * value, int length); -Nan::MaybeLocal Nan::New(const char * value); -Nan::MaybeLocal Nan::New(const uint16_t * value); -Nan::MaybeLocal Nan::New(const uint16_t * value, int length); -``` - -Specialized types: - -```c++ -v8::Local Nan::New(v8::String::ExternalStringResource * value); -v8::Local Nan::New(Nan::ExternalOneByteStringResource * value); -v8::Local Nan::New(v8::Local pattern, v8::RegExp::Flags flags); -``` - -Note that `Nan::ExternalOneByteStringResource` maps to [`v8::String::ExternalOneByteStringResource`](https://v8docs.nodesource.com/node-8.16/d9/db3/classv8_1_1_string_1_1_external_one_byte_string_resource.html), and `v8::String::ExternalAsciiStringResource` in older versions of V8. - - - -### Nan::Undefined() - -A helper method to reference the `v8::Undefined` object in a way that is compatible across all supported versions of V8. - -Signature: - -```c++ -v8::Local Nan::Undefined() -``` - - -### Nan::Null() - -A helper method to reference the `v8::Null` object in a way that is compatible across all supported versions of V8. - -Signature: - -```c++ -v8::Local Nan::Null() -``` - - -### Nan::True() - -A helper method to reference the `v8::Boolean` object representing the `true` value in a way that is compatible across all supported versions of V8. - -Signature: - -```c++ -v8::Local Nan::True() -``` - - -### Nan::False() - -A helper method to reference the `v8::Boolean` object representing the `false` value in a way that is compatible across all supported versions of V8. - -Signature: - -```c++ -v8::Local Nan::False() -``` - - -### Nan::EmptyString() - -Call [`v8::String::Empty`](https://v8docs.nodesource.com/node-8.16/d2/db3/classv8_1_1_string.html#a7c1bc8886115d7ee46f1d571dd6ebc6d) to reference the empty string in a way that is compatible across all supported versions of V8. - -Signature: - -```c++ -v8::Local Nan::EmptyString() -``` - - - -### Nan::NewOneByteString() - -An implementation of [`v8::String::NewFromOneByte()`](https://v8docs.nodesource.com/node-8.16/d2/db3/classv8_1_1_string.html#a5264d50b96d2c896ce525a734dc10f09) provided for consistent availability and API across supported versions of V8. Allocates a new string from Latin-1 data. - -Signature: - -```c++ -Nan::MaybeLocal Nan::NewOneByteString(const uint8_t * value, - int length = -1) -``` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/node_misc.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/node_misc.md deleted file mode 100644 index 17578e34..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/node_misc.md +++ /dev/null @@ -1,123 +0,0 @@ -## Miscellaneous Node Helpers - - - Nan::AsyncResource - - Nan::MakeCallback() - - NAN_MODULE_INIT() - - Nan::Export() - - -### Nan::AsyncResource - -This class is analogous to the `AsyncResource` JavaScript class exposed by Node's [async_hooks][] API. - -When calling back into JavaScript asynchronously, special care must be taken to ensure that the runtime can properly track -async hops. `Nan::AsyncResource` is a class that provides an RAII wrapper around `node::EmitAsyncInit`, `node::EmitAsyncDestroy`, -and `node::MakeCallback`. Using this mechanism to call back into JavaScript, as opposed to `Nan::MakeCallback` or -`v8::Function::Call` ensures that the callback is executed in the correct async context. This ensures that async mechanisms -such as domains and [async_hooks][] function correctly. - -Definition: - -```c++ -class AsyncResource { - public: - AsyncResource(v8::Local name, - v8::Local resource = New()); - AsyncResource(const char* name, - v8::Local resource = New()); - ~AsyncResource(); - - v8::MaybeLocal runInAsyncScope(v8::Local target, - v8::Local func, - int argc, - v8::Local* argv); - v8::MaybeLocal runInAsyncScope(v8::Local target, - v8::Local symbol, - int argc, - v8::Local* argv); - v8::MaybeLocal runInAsyncScope(v8::Local target, - const char* method, - int argc, - v8::Local* argv); -}; -``` - -* `name`: Identifier for the kind of resource that is being provided for diagnostics information exposed by the [async_hooks][] - API. This will be passed to the possible `init` hook as the `type`. To avoid name collisions with other modules we recommend - that the name include the name of the owning module as a prefix. For example `mysql` module could use something like - `mysql:batch-db-query-resource`. -* `resource`: An optional object associated with the async work that will be passed to the possible [async_hooks][] - `init` hook. If this parameter is omitted, or an empty handle is provided, this object will be created automatically. -* When calling JS on behalf of this resource, one can use `runInAsyncScope`. This will ensure that the callback runs in the - correct async execution context. -* `AsyncDestroy` is automatically called when an AsyncResource object is destroyed. - -For more details, see the Node [async_hooks][] documentation. You might also want to take a look at the documentation for the -[N-API counterpart][napi]. For example usage, see the `asyncresource.cpp` example in the `test/cpp` directory. - - -### Nan::MakeCallback() - -Deprecated wrappers around the legacy `node::MakeCallback()` APIs. Node.js 10+ -has deprecated these legacy APIs as they do not provide a mechanism to preserve -async context. - -We recommend that you use the `AsyncResource` class and `AsyncResource::runInAsyncScope` instead of using `Nan::MakeCallback` or -`v8::Function#Call()` directly. `AsyncResource` properly takes care of running the callback in the correct async execution -context – something that is essential for functionality like domains, async_hooks and async debugging. - -Signatures: - -```c++ -NAN_DEPRECATED -v8::Local Nan::MakeCallback(v8::Local target, - v8::Local func, - int argc, - v8::Local* argv); -NAN_DEPRECATED -v8::Local Nan::MakeCallback(v8::Local target, - v8::Local symbol, - int argc, - v8::Local* argv); -NAN_DEPRECATED -v8::Local Nan::MakeCallback(v8::Local target, - const char* method, - int argc, - v8::Local* argv); -``` - - - -### NAN_MODULE_INIT() - -Used to define the entry point function to a Node add-on. Creates a function with a given `name` that receives a `target` object representing the equivalent of the JavaScript `exports` object. - -See example below. - - -### Nan::Export() - -A simple helper to register a `v8::FunctionTemplate` from a JavaScript-accessible method (see [Methods](./methods.md)) as a property on an object. Can be used in a way similar to assigning properties to `module.exports` in JavaScript. - -Signature: - -```c++ -void Export(v8::Local target, const char *name, Nan::FunctionCallback f) -``` - -Also available as the shortcut `NAN_EXPORT` macro. - -Example: - -```c++ -NAN_METHOD(Foo) { - ... -} - -NAN_MODULE_INIT(Init) { - NAN_EXPORT(target, Foo); -} -``` - -[async_hooks]: https://nodejs.org/dist/latest-v9.x/docs/api/async_hooks.html -[napi]: https://nodejs.org/dist/latest-v9.x/docs/api/n-api.html#n_api_custom_asynchronous_operations diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/object_wrappers.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/object_wrappers.md deleted file mode 100644 index 07d8c058..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/object_wrappers.md +++ /dev/null @@ -1,263 +0,0 @@ -## Object Wrappers - -The `ObjectWrap` class can be used to make wrapped C++ objects and a factory of wrapped objects. - - - Nan::ObjectWrap - - - -### Nan::ObjectWrap() - -A reimplementation of `node::ObjectWrap` that adds some API not present in older versions of Node. Should be preferred over `node::ObjectWrap` in all cases for consistency. - -Definition: - -```c++ -class ObjectWrap { - public: - ObjectWrap(); - - virtual ~ObjectWrap(); - - template - static inline T* Unwrap(v8::Local handle); - - inline v8::Local handle(); - - inline Nan::Persistent& persistent(); - - protected: - inline void Wrap(v8::Local handle); - - inline void MakeWeak(); - - /* Ref() marks the object as being attached to an event loop. - * Refed objects will not be garbage collected, even if - * all references are lost. - */ - virtual void Ref(); - - /* Unref() marks an object as detached from the event loop. This is its - * default state. When an object with a "weak" reference changes from - * attached to detached state it will be freed. Be careful not to access - * the object after making this call as it might be gone! - * (A "weak reference" means an object that only has a - * persistent handle.) - * - * DO NOT CALL THIS FROM DESTRUCTOR - */ - virtual void Unref(); - - int refs_; // ro -}; -``` - -See the Node documentation on [Wrapping C++ Objects](https://nodejs.org/api/addons.html#addons_wrapping_c_objects) for more details. - -### This vs. Holder - -When calling `Unwrap`, it is important that the argument is indeed some JavaScript object which got wrapped by a `Wrap` call for this class or any derived class. -The `Signature` installed by [`Nan::SetPrototypeMethod()`](methods.md#api_nan_set_prototype_method) does ensure that `info.Holder()` is just such an instance. -In Node 0.12 and later, `info.This()` will also be of such a type, since otherwise the invocation will get rejected. -However, in Node 0.10 and before it was possible to invoke a method on a JavaScript object which just had the extension type in its prototype chain. -In such a situation, calling `Unwrap` on `info.This()` will likely lead to a failed assertion causing a crash, but could lead to even more serious corruption. - -On the other hand, calling `Unwrap` in an [accessor](methods.md#api_nan_set_accessor) should not use `Holder()` if the accessor is defined on the prototype. -So either define your accessors on the instance template, -or use `This()` after verifying that it is indeed a valid object. - -### Examples - -#### Basic - -```c++ -class MyObject : public Nan::ObjectWrap { - public: - static NAN_MODULE_INIT(Init) { - v8::Local tpl = Nan::New(New); - tpl->SetClassName(Nan::New("MyObject").ToLocalChecked()); - tpl->InstanceTemplate()->SetInternalFieldCount(1); - - Nan::SetPrototypeMethod(tpl, "getHandle", GetHandle); - Nan::SetPrototypeMethod(tpl, "getValue", GetValue); - - constructor().Reset(Nan::GetFunction(tpl).ToLocalChecked()); - Nan::Set(target, Nan::New("MyObject").ToLocalChecked(), - Nan::GetFunction(tpl).ToLocalChecked()); - } - - private: - explicit MyObject(double value = 0) : value_(value) {} - ~MyObject() {} - - static NAN_METHOD(New) { - if (info.IsConstructCall()) { - double value = info[0]->IsUndefined() ? 0 : Nan::To(info[0]).FromJust(); - MyObject *obj = new MyObject(value); - obj->Wrap(info.This()); - info.GetReturnValue().Set(info.This()); - } else { - const int argc = 1; - v8::Local argv[argc] = {info[0]}; - v8::Local cons = Nan::New(constructor()); - info.GetReturnValue().Set(Nan::NewInstance(cons, argc, argv).ToLocalChecked()); - } - } - - static NAN_METHOD(GetHandle) { - MyObject* obj = Nan::ObjectWrap::Unwrap(info.Holder()); - info.GetReturnValue().Set(obj->handle()); - } - - static NAN_METHOD(GetValue) { - MyObject* obj = Nan::ObjectWrap::Unwrap(info.Holder()); - info.GetReturnValue().Set(obj->value_); - } - - static inline Nan::Persistent & constructor() { - static Nan::Persistent my_constructor; - return my_constructor; - } - - double value_; -}; - -NODE_MODULE(objectwrapper, MyObject::Init) -``` - -To use in Javascript: - -```Javascript -var objectwrapper = require('bindings')('objectwrapper'); - -var obj = new objectwrapper.MyObject(5); -console.log('Should be 5: ' + obj.getValue()); -``` - -#### Factory of wrapped objects - -```c++ -class MyFactoryObject : public Nan::ObjectWrap { - public: - static NAN_MODULE_INIT(Init) { - v8::Local tpl = Nan::New(New); - tpl->InstanceTemplate()->SetInternalFieldCount(1); - - Nan::SetPrototypeMethod(tpl, "getValue", GetValue); - - constructor().Reset(Nan::GetFunction(tpl).ToLocalChecked()); - } - - static NAN_METHOD(NewInstance) { - v8::Local cons = Nan::New(constructor()); - double value = info[0]->IsNumber() ? Nan::To(info[0]).FromJust() : 0; - const int argc = 1; - v8::Local argv[1] = {Nan::New(value)}; - info.GetReturnValue().Set(Nan::NewInstance(cons, argc, argv).ToLocalChecked()); - } - - // Needed for the next example: - inline double value() const { - return value_; - } - - private: - explicit MyFactoryObject(double value = 0) : value_(value) {} - ~MyFactoryObject() {} - - static NAN_METHOD(New) { - if (info.IsConstructCall()) { - double value = info[0]->IsNumber() ? Nan::To(info[0]).FromJust() : 0; - MyFactoryObject * obj = new MyFactoryObject(value); - obj->Wrap(info.This()); - info.GetReturnValue().Set(info.This()); - } else { - const int argc = 1; - v8::Local argv[argc] = {info[0]}; - v8::Local cons = Nan::New(constructor()); - info.GetReturnValue().Set(Nan::NewInstance(cons, argc, argv).ToLocalChecked()); - } - } - - static NAN_METHOD(GetValue) { - MyFactoryObject* obj = ObjectWrap::Unwrap(info.Holder()); - info.GetReturnValue().Set(obj->value_); - } - - static inline Nan::Persistent & constructor() { - static Nan::Persistent my_constructor; - return my_constructor; - } - - double value_; -}; - -NAN_MODULE_INIT(Init) { - MyFactoryObject::Init(target); - Nan::Set(target, - Nan::New("newFactoryObjectInstance").ToLocalChecked(), - Nan::GetFunction( - Nan::New(MyFactoryObject::NewInstance)).ToLocalChecked() - ); -} - -NODE_MODULE(wrappedobjectfactory, Init) -``` - -To use in Javascript: - -```Javascript -var wrappedobjectfactory = require('bindings')('wrappedobjectfactory'); - -var obj = wrappedobjectfactory.newFactoryObjectInstance(10); -console.log('Should be 10: ' + obj.getValue()); -``` - -#### Passing wrapped objects around - -Use the `MyFactoryObject` class above along with the following: - -```c++ -static NAN_METHOD(Sum) { - Nan::MaybeLocal maybe1 = Nan::To(info[0]); - Nan::MaybeLocal maybe2 = Nan::To(info[1]); - - // Quick check: - if (maybe1.IsEmpty() || maybe2.IsEmpty()) { - // return value is undefined by default - return; - } - - MyFactoryObject* obj1 = - Nan::ObjectWrap::Unwrap(maybe1.ToLocalChecked()); - MyFactoryObject* obj2 = - Nan::ObjectWrap::Unwrap(maybe2.ToLocalChecked()); - - info.GetReturnValue().Set(Nan::New(obj1->value() + obj2->value())); -} - -NAN_MODULE_INIT(Init) { - MyFactoryObject::Init(target); - Nan::Set(target, - Nan::New("newFactoryObjectInstance").ToLocalChecked(), - Nan::GetFunction( - Nan::New(MyFactoryObject::NewInstance)).ToLocalChecked() - ); - Nan::Set(target, - Nan::New("sum").ToLocalChecked(), - Nan::GetFunction(Nan::New(Sum)).ToLocalChecked() - ); -} - -NODE_MODULE(myaddon, Init) -``` - -To use in Javascript: - -```Javascript -var myaddon = require('bindings')('myaddon'); - -var obj1 = myaddon.newFactoryObjectInstance(5); -var obj2 = myaddon.newFactoryObjectInstance(10); -console.log('sum of object values: ' + myaddon.sum(obj1, obj2)); -``` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/persistent.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/persistent.md deleted file mode 100644 index 2e13f6bb..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/persistent.md +++ /dev/null @@ -1,296 +0,0 @@ -## Persistent references - -An object reference that is independent of any `HandleScope` is a _persistent_ reference. Where a `Local` handle only lives as long as the `HandleScope` in which it was allocated, a `Persistent` handle remains valid until it is explicitly disposed. - -Due to the evolution of the V8 API, it is necessary for NAN to provide a wrapper implementation of the `Persistent` classes to supply compatibility across the V8 versions supported. - - - Nan::PersistentBase & v8::PersistentBase - - Nan::NonCopyablePersistentTraits & v8::NonCopyablePersistentTraits - - Nan::CopyablePersistentTraits & v8::CopyablePersistentTraits - - Nan::Persistent - - Nan::Global - - Nan::WeakCallbackInfo - - Nan::WeakCallbackType - -Also see the V8 Embedders Guide section on [Handles and Garbage Collection](https://developers.google.com/v8/embed#handles). - - -### Nan::PersistentBase & v8::PersistentBase - -A persistent handle contains a reference to a storage cell in V8 which holds an object value and which is updated by the garbage collector whenever the object is moved. A new storage cell can be created using the constructor or `Nan::PersistentBase::Reset()`. Existing handles can be disposed using an argument-less `Nan::PersistentBase::Reset()`. - -Definition: - -_(note: this is implemented as `Nan::PersistentBase` for older versions of V8 and the native `v8::PersistentBase` is used for newer versions of V8)_ - -```c++ -template class PersistentBase { - public: - /** - * If non-empty, destroy the underlying storage cell - */ - void Reset(); - - /** - * If non-empty, destroy the underlying storage cell and create a new one with - * the contents of another if it is also non-empty - */ - template void Reset(const v8::Local &other); - - /** - * If non-empty, destroy the underlying storage cell and create a new one with - * the contents of another if it is also non-empty - */ - template void Reset(const PersistentBase &other); - - /** Returns true if the handle is empty. */ - bool IsEmpty() const; - - /** - * If non-empty, destroy the underlying storage cell - * IsEmpty() will return true after this call. - */ - void Empty(); - - template bool operator==(const PersistentBase &that); - - template bool operator==(const v8::Local &that); - - template bool operator!=(const PersistentBase &that); - - template bool operator!=(const v8::Local &that); - - /** - * Install a finalization callback on this object. - * NOTE: There is no guarantee as to *when* or even *if* the callback is - * invoked. The invocation is performed solely on a best effort basis. - * As always, GC-based finalization should *not* be relied upon for any - * critical form of resource management! At the moment you can either - * specify a parameter for the callback or the location of two internal - * fields in the dying object. - */ - template - void SetWeak(P *parameter, - typename WeakCallbackInfo

::Callback callback, - WeakCallbackType type); - - void ClearWeak(); - - /** - * Marks the reference to this object independent. Garbage collector is free - * to ignore any object groups containing this object. Weak callback for an - * independent handle should not assume that it will be preceded by a global - * GC prologue callback or followed by a global GC epilogue callback. - */ - void MarkIndependent() const; - - bool IsIndependent() const; - - /** Checks if the handle holds the only reference to an object. */ - bool IsNearDeath() const; - - /** Returns true if the handle's reference is weak. */ - bool IsWeak() const -}; -``` - -See the V8 documentation for [`PersistentBase`](https://v8docs.nodesource.com/node-8.16/d4/dca/classv8_1_1_persistent_base.html) for further information. - -**Tip:** To get a `v8::Local` reference to the original object back from a `PersistentBase` or `Persistent` object: - -```c++ -v8::Local object = Nan::New(persistent); -``` - - -### Nan::NonCopyablePersistentTraits & v8::NonCopyablePersistentTraits - -Default traits for `Nan::Persistent`. This class does not allow use of the a copy constructor or assignment operator. At present `kResetInDestructor` is not set, but that will change in a future version. - -Definition: - -_(note: this is implemented as `Nan::NonCopyablePersistentTraits` for older versions of V8 and the native `v8::NonCopyablePersistentTraits` is used for newer versions of V8)_ - -```c++ -template class NonCopyablePersistentTraits { - public: - typedef Persistent > NonCopyablePersistent; - - static const bool kResetInDestructor = false; - - template - static void Copy(const Persistent &source, - NonCopyablePersistent *dest); - - template static void Uncompilable(); -}; -``` - -See the V8 documentation for [`NonCopyablePersistentTraits`](https://v8docs.nodesource.com/node-8.16/de/d73/classv8_1_1_non_copyable_persistent_traits.html) for further information. - - -### Nan::CopyablePersistentTraits & v8::CopyablePersistentTraits - -A helper class of traits to allow copying and assignment of `Persistent`. This will clone the contents of storage cell, but not any of the flags, etc.. - -Definition: - -_(note: this is implemented as `Nan::CopyablePersistentTraits` for older versions of V8 and the native `v8::NonCopyablePersistentTraits` is used for newer versions of V8)_ - -```c++ -template -class CopyablePersistentTraits { - public: - typedef Persistent > CopyablePersistent; - - static const bool kResetInDestructor = true; - - template - static void Copy(const Persistent &source, - CopyablePersistent *dest); -}; -``` - -See the V8 documentation for [`CopyablePersistentTraits`](https://v8docs.nodesource.com/node-8.16/da/d5c/structv8_1_1_copyable_persistent_traits.html) for further information. - - -### Nan::Persistent - -A type of `PersistentBase` which allows copy and assignment. Copy, assignment and destructor behavior is controlled by the traits class `M`. - -Definition: - -```c++ -template > -class Persistent; - -template class Persistent : public PersistentBase { - public: - /** - * A Persistent with no storage cell. - */ - Persistent(); - - /** - * Construct a Persistent from a v8::Local. When the v8::Local is non-empty, a - * new storage cell is created pointing to the same object, and no flags are - * set. - */ - template Persistent(v8::Local that); - - /** - * Construct a Persistent from a Persistent. When the Persistent is non-empty, - * a new storage cell is created pointing to the same object, and no flags are - * set. - */ - Persistent(const Persistent &that); - - /** - * The copy constructors and assignment operator create a Persistent exactly - * as the Persistent constructor, but the Copy function from the traits class - * is called, allowing the setting of flags based on the copied Persistent. - */ - Persistent &operator=(const Persistent &that); - - template - Persistent &operator=(const Persistent &that); - - /** - * The destructor will dispose the Persistent based on the kResetInDestructor - * flags in the traits class. Since not calling dispose can result in a - * memory leak, it is recommended to always set this flag. - */ - ~Persistent(); -}; -``` - -See the V8 documentation for [`Persistent`](https://v8docs.nodesource.com/node-8.16/d2/d78/classv8_1_1_persistent.html) for further information. - - -### Nan::Global - -A type of `PersistentBase` which has move semantics. - -```c++ -template class Global : public PersistentBase { - public: - /** - * A Global with no storage cell. - */ - Global(); - - /** - * Construct a Global from a v8::Local. When the v8::Local is non-empty, a new - * storage cell is created pointing to the same object, and no flags are set. - */ - template Global(v8::Local that); - /** - * Construct a Global from a PersistentBase. When the Persistent is non-empty, - * a new storage cell is created pointing to the same object, and no flags are - * set. - */ - template Global(const PersistentBase &that); - - /** - * Pass allows returning globals from functions, etc. - */ - Global Pass(); -}; -``` - -See the V8 documentation for [`Global`](https://v8docs.nodesource.com/node-8.16/d5/d40/classv8_1_1_global.html) for further information. - - -### Nan::WeakCallbackInfo - -`Nan::WeakCallbackInfo` is used as an argument when setting a persistent reference as weak. You may need to free any external resources attached to the object. It is a mirror of `v8:WeakCallbackInfo` as found in newer versions of V8. - -Definition: - -```c++ -template class WeakCallbackInfo { - public: - typedef void (*Callback)(const WeakCallbackInfo& data); - - v8::Isolate *GetIsolate() const; - - /** - * Get the parameter that was associated with the weak handle. - */ - T *GetParameter() const; - - /** - * Get pointer from internal field, index can be 0 or 1. - */ - void *GetInternalField(int index) const; -}; -``` - -Example usage: - -```c++ -void weakCallback(const WeakCallbackInfo &data) { - int *parameter = data.GetParameter(); - delete parameter; -} - -Persistent obj; -int *data = new int(0); -obj.SetWeak(data, callback, WeakCallbackType::kParameter); -``` - -See the V8 documentation for [`WeakCallbackInfo`](https://v8docs.nodesource.com/node-8.16/d8/d06/classv8_1_1_weak_callback_info.html) for further information. - - -### Nan::WeakCallbackType - -Represents the type of a weak callback. -A weak callback of type `kParameter` makes the supplied parameter to `Nan::PersistentBase::SetWeak` available through `WeakCallbackInfo::GetParameter`. -A weak callback of type `kInternalFields` uses up to two internal fields at indices 0 and 1 on the `Nan::PersistentBase` being made weak. -Note that only `v8::Object`s and derivatives can have internal fields. - -Definition: - -```c++ -enum class WeakCallbackType { kParameter, kInternalFields }; -``` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/scopes.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/scopes.md deleted file mode 100644 index 84000eeb..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/scopes.md +++ /dev/null @@ -1,73 +0,0 @@ -## Scopes - -A _local handle_ is a pointer to an object. All V8 objects are accessed using handles, they are necessary because of the way the V8 garbage collector works. - -A handle scope can be thought of as a container for any number of handles. When you've finished with your handles, instead of deleting each one individually you can simply delete their scope. - -The creation of `HandleScope` objects is different across the supported versions of V8. Therefore, NAN provides its own implementations that can be used safely across these. - - - Nan::HandleScope - - Nan::EscapableHandleScope - -Also see the V8 Embedders Guide section on [Handles and Garbage Collection](https://github.com/v8/v8/wiki/Embedder%27s%20Guide#handles-and-garbage-collection). - - -### Nan::HandleScope - -A simple wrapper around [`v8::HandleScope`](https://v8docs.nodesource.com/node-8.16/d3/d95/classv8_1_1_handle_scope.html). - -Definition: - -```c++ -class Nan::HandleScope { - public: - Nan::HandleScope(); - static int NumberOfHandles(); -}; -``` - -Allocate a new `Nan::HandleScope` whenever you are creating new V8 JavaScript objects. Note that an implicit `HandleScope` is created for you on JavaScript-accessible methods so you do not need to insert one yourself. - -Example: - -```c++ -// new object is created, it needs a new scope: -void Pointless() { - Nan::HandleScope scope; - v8::Local obj = Nan::New(); -} - -// JavaScript-accessible method already has a HandleScope -NAN_METHOD(Pointless2) { - v8::Local obj = Nan::New(); -} -``` - - -### Nan::EscapableHandleScope - -Similar to [`Nan::HandleScope`](#api_nan_handle_scope) but should be used in cases where a function needs to return a V8 JavaScript type that has been created within it. - -Definition: - -```c++ -class Nan::EscapableHandleScope { - public: - Nan::EscapableHandleScope(); - static int NumberOfHandles(); - template v8::Local Escape(v8::Local value); -} -``` - -Use `Escape(value)` to return the object. - -Example: - -```c++ -v8::Local EmptyObj() { - Nan::EscapableHandleScope scope; - v8::Local obj = Nan::New(); - return scope.Escape(obj); -} -``` - diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/script.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/script.md deleted file mode 100644 index 301c1b3d..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/script.md +++ /dev/null @@ -1,58 +0,0 @@ -## Script - -NAN provides `v8::Script` helpers as the API has changed over the supported versions of V8. - - - Nan::CompileScript() - - Nan::RunScript() - - Nan::ScriptOrigin - - - -### Nan::CompileScript() - -A wrapper around [`v8::ScriptCompiler::Compile()`](https://v8docs.nodesource.com/node-8.16/da/da5/classv8_1_1_script_compiler.html#a93f5072a0db55d881b969e9fc98e564b). - -Note that `Nan::BoundScript` is an alias for `v8::Script`. - -Signature: - -```c++ -Nan::MaybeLocal Nan::CompileScript( - v8::Local s, - const v8::ScriptOrigin& origin); -Nan::MaybeLocal Nan::CompileScript(v8::Local s); -``` - - - -### Nan::RunScript() - -Calls `script->Run()` or `script->BindToCurrentContext()->Run(Nan::GetCurrentContext())`. - -Note that `Nan::BoundScript` is an alias for `v8::Script` and `Nan::UnboundScript` is an alias for `v8::UnboundScript` where available and `v8::Script` on older versions of V8. - -Signature: - -```c++ -Nan::MaybeLocal Nan::RunScript(v8::Local script) -Nan::MaybeLocal Nan::RunScript(v8::Local script) -``` - - -### Nan::ScriptOrigin - -A class transparently extending [`v8::ScriptOrigin`](https://v8docs.nodesource.com/node-16.0/db/d84/classv8_1_1_script_origin.html#pub-methods) -to provide backwards compatibility. Only the listed methods are guaranteed to -be available on all versions of Node. - -Declaration: - -```c++ -class Nan::ScriptOrigin : public v8::ScriptOrigin { - public: - ScriptOrigin(v8::Local name, v8::Local line = v8::Local(), v8::Local column = v8::Local()) - v8::Local ResourceName() const; - v8::Local ResourceLineOffset() const; - v8::Local ResourceColumnOffset() const; -} -``` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/string_bytes.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/string_bytes.md deleted file mode 100644 index 7c1bd325..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/string_bytes.md +++ /dev/null @@ -1,62 +0,0 @@ -## Strings & Bytes - -Miscellaneous string & byte encoding and decoding functionality provided for compatibility across supported versions of V8 and Node. Implemented by NAN to ensure that all encoding types are supported, even for older versions of Node where they are missing. - - - Nan::Encoding - - Nan::Encode() - - Nan::DecodeBytes() - - Nan::DecodeWrite() - - - -### Nan::Encoding - -An enum representing the supported encoding types. A copy of `node::encoding` that is consistent across versions of Node. - -Definition: - -```c++ -enum Nan::Encoding { ASCII, UTF8, BASE64, UCS2, BINARY, HEX, BUFFER } -``` - - - -### Nan::Encode() - -A wrapper around `node::Encode()` that provides a consistent implementation across supported versions of Node. - -Signature: - -```c++ -v8::Local Nan::Encode(const void *buf, - size_t len, - enum Nan::Encoding encoding = BINARY); -``` - - - -### Nan::DecodeBytes() - -A wrapper around `node::DecodeBytes()` that provides a consistent implementation across supported versions of Node. - -Signature: - -```c++ -ssize_t Nan::DecodeBytes(v8::Local val, - enum Nan::Encoding encoding = BINARY); -``` - - - -### Nan::DecodeWrite() - -A wrapper around `node::DecodeWrite()` that provides a consistent implementation across supported versions of Node. - -Signature: - -```c++ -ssize_t Nan::DecodeWrite(char *buf, - size_t len, - v8::Local val, - enum Nan::Encoding encoding = BINARY); -``` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/v8_internals.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/v8_internals.md deleted file mode 100644 index 08dd6d04..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/v8_internals.md +++ /dev/null @@ -1,199 +0,0 @@ -## V8 internals - -The hooks to access V8 internals—including GC and statistics—are different across the supported versions of V8, therefore NAN provides its own hooks that call the appropriate V8 methods. - - - NAN_GC_CALLBACK() - - Nan::AddGCEpilogueCallback() - - Nan::RemoveGCEpilogueCallback() - - Nan::AddGCPrologueCallback() - - Nan::RemoveGCPrologueCallback() - - Nan::GetHeapStatistics() - - Nan::SetCounterFunction() - - Nan::SetCreateHistogramFunction() - - Nan::SetAddHistogramSampleFunction() - - Nan::IdleNotification() - - Nan::LowMemoryNotification() - - Nan::ContextDisposedNotification() - - Nan::GetInternalFieldPointer() - - Nan::SetInternalFieldPointer() - - Nan::AdjustExternalMemory() - - - -### NAN_GC_CALLBACK(callbackname) - -Use `NAN_GC_CALLBACK` to declare your callbacks for `Nan::AddGCPrologueCallback()` and `Nan::AddGCEpilogueCallback()`. Your new method receives the arguments `v8::GCType type` and `v8::GCCallbackFlags flags`. - -```c++ -static Nan::Persistent callback; - -NAN_GC_CALLBACK(gcPrologueCallback) { - v8::Local argv[] = { Nan::New("prologue").ToLocalChecked() }; - Nan::MakeCallback(Nan::GetCurrentContext()->Global(), Nan::New(callback), 1, argv); -} - -NAN_METHOD(Hook) { - callback.Reset(To(args[0]).ToLocalChecked()); - Nan::AddGCPrologueCallback(gcPrologueCallback); - info.GetReturnValue().Set(info.Holder()); -} -``` - - -### Nan::AddGCEpilogueCallback() - -Signature: - -```c++ -void Nan::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, v8::GCType gc_type_filter = v8::kGCTypeAll) -``` - -Calls V8's [`AddGCEpilogueCallback()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#a580f976e4290cead62c2fc4dd396be3e). - - -### Nan::RemoveGCEpilogueCallback() - -Signature: - -```c++ -void Nan::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) -``` - -Calls V8's [`RemoveGCEpilogueCallback()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#adca9294555a3908e9f23c7bb0f0f284c). - - -### Nan::AddGCPrologueCallback() - -Signature: - -```c++ -void Nan::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback, v8::GCType gc_type_filter callback) -``` - -Calls V8's [`AddGCPrologueCallback()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#a6dbef303603ebdb03da6998794ea05b8). - - -### Nan::RemoveGCPrologueCallback() - -Signature: - -```c++ -void Nan::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) -``` - -Calls V8's [`RemoveGCPrologueCallback()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#a5f72c7cda21415ce062bbe5c58abe09e). - - -### Nan::GetHeapStatistics() - -Signature: - -```c++ -void Nan::GetHeapStatistics(v8::HeapStatistics *heap_statistics) -``` - -Calls V8's [`GetHeapStatistics()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#a5593ac74687b713095c38987e5950b34). - - -### Nan::SetCounterFunction() - -Signature: - -```c++ -void Nan::SetCounterFunction(v8::CounterLookupCallback cb) -``` - -Calls V8's [`SetCounterFunction()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#a045d7754e62fa0ec72ae6c259b29af94). - - -### Nan::SetCreateHistogramFunction() - -Signature: - -```c++ -void Nan::SetCreateHistogramFunction(v8::CreateHistogramCallback cb) -``` - -Calls V8's [`SetCreateHistogramFunction()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#a542d67e85089cb3f92aadf032f99e732). - - -### Nan::SetAddHistogramSampleFunction() - -Signature: - -```c++ -void Nan::SetAddHistogramSampleFunction(v8::AddHistogramSampleCallback cb) -``` - -Calls V8's [`SetAddHistogramSampleFunction()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#aeb420b690bc2c216882d6fdd00ddd3ea). - - -### Nan::IdleNotification() - -Signature: - -```c++ -bool Nan::IdleNotification(int idle_time_in_ms) -``` - -Calls V8's [`IdleNotification()` or `IdleNotificationDeadline()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#ad6a2a02657f5425ad460060652a5a118) depending on V8 version. - - -### Nan::LowMemoryNotification() - -Signature: - -```c++ -void Nan::LowMemoryNotification() -``` - -Calls V8's [`LowMemoryNotification()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#a24647f61d6b41f69668094bdcd6ea91f). - - -### Nan::ContextDisposedNotification() - -Signature: - -```c++ -void Nan::ContextDisposedNotification() -``` - -Calls V8's [`ContextDisposedNotification()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#ad7f5dc559866343fe6cd8db1f134d48b). - - -### Nan::GetInternalFieldPointer() - -Gets a pointer to the internal field with at `index` from a V8 `Object` handle. - -Signature: - -```c++ -void* Nan::GetInternalFieldPointer(v8::Local object, int index) -``` - -Calls the Object's [`GetAlignedPointerFromInternalField()` or `GetPointerFromInternalField()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#a580ea84afb26c005d6762eeb9e3c308f) depending on the version of V8. - - -### Nan::SetInternalFieldPointer() - -Sets the value of the internal field at `index` on a V8 `Object` handle. - -Signature: - -```c++ -void Nan::SetInternalFieldPointer(v8::Local object, int index, void* value) -``` - -Calls the Object's [`SetAlignedPointerInInternalField()` or `SetPointerInInternalField()`](https://v8docs.nodesource.com/node-8.16/db/d85/classv8_1_1_object.html#ab3c57184263cf29963ef0017bec82281) depending on the version of V8. - - -### Nan::AdjustExternalMemory() - -Signature: - -```c++ -int Nan::AdjustExternalMemory(int bytesChange) -``` - -Calls V8's [`AdjustAmountOfExternalAllocatedMemory()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#ae1a59cac60409d3922582c4af675473e). - diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/v8_misc.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/v8_misc.md deleted file mode 100644 index 1bd46d35..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/doc/v8_misc.md +++ /dev/null @@ -1,85 +0,0 @@ -## Miscellaneous V8 Helpers - - - Nan::Utf8String - - Nan::GetCurrentContext() - - Nan::SetIsolateData() - - Nan::GetIsolateData() - - Nan::TypedArrayContents - - - -### Nan::Utf8String - -Converts an object to a UTF-8-encoded character array. If conversion to a string fails (e.g. due to an exception in the toString() method of the object) then the length() method returns 0 and the * operator returns NULL. The underlying memory used for this object is managed by the object. - -An implementation of [`v8::String::Utf8Value`](https://v8docs.nodesource.com/node-8.16/d4/d1b/classv8_1_1_string_1_1_utf8_value.html) that is consistent across all supported versions of V8. - -Definition: - -```c++ -class Nan::Utf8String { - public: - Nan::Utf8String(v8::Local from); - - int length() const; - - char* operator*(); - const char* operator*() const; -}; -``` - - -### Nan::GetCurrentContext() - -A call to [`v8::Isolate::GetCurrent()->GetCurrentContext()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#a81c7a1ed7001ae2a65e89107f75fd053) that works across all supported versions of V8. - -Signature: - -```c++ -v8::Local Nan::GetCurrentContext() -``` - - -### Nan::SetIsolateData() - -A helper to provide a consistent API to [`v8::Isolate#SetData()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#a7acadfe7965997e9c386a05f098fbe36). - -Signature: - -```c++ -void Nan::SetIsolateData(v8::Isolate *isolate, T *data) -``` - - - -### Nan::GetIsolateData() - -A helper to provide a consistent API to [`v8::Isolate#GetData()`](https://v8docs.nodesource.com/node-8.16/d5/dda/classv8_1_1_isolate.html#aabd223436bc1100a787dadaa024c6257). - -Signature: - -```c++ -T *Nan::GetIsolateData(v8::Isolate *isolate) -``` - - -### Nan::TypedArrayContents - -A helper class for accessing the contents of an ArrayBufferView (aka a typedarray) from C++. If the input array is not a valid typedarray, then the data pointer of TypedArrayContents will default to `NULL` and the length will be 0. If the data pointer is not compatible with the alignment requirements of type, an assertion error will fail. - -Note that you must store a reference to the `array` object while you are accessing its contents. - -Definition: - -```c++ -template -class Nan::TypedArrayContents { - public: - TypedArrayContents(v8::Local array); - - size_t length() const; - - T* const operator*(); - const T* const operator*() const; -}; -``` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/include_dirs.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/include_dirs.js deleted file mode 100644 index 4f1dfb41..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/include_dirs.js +++ /dev/null @@ -1 +0,0 @@ -console.log(require('path').relative('.', __dirname)); diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan.h deleted file mode 100644 index 9e7c59e4..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan.h +++ /dev/null @@ -1,2909 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors: - * - Rod Vagg - * - Benjamin Byholm - * - Trevor Norris - * - Nathan Rajlich - * - Brett Lawson - * - Ben Noordhuis - * - David Siegel - * - Michael Ira Krufky - * - * MIT License - * - * Version 2.16.0: current Node 18.2.0, Node 0.12: 0.12.18, Node 0.10: 0.10.48, iojs: 3.3.1 - * - * See https://github.com/nodejs/nan for the latest update to this file - **********************************************************************************/ - -#ifndef NAN_H_ -#define NAN_H_ - -#include - -#define NODE_0_10_MODULE_VERSION 11 -#define NODE_0_12_MODULE_VERSION 14 -#define ATOM_0_21_MODULE_VERSION 41 -#define IOJS_1_0_MODULE_VERSION 42 -#define IOJS_1_1_MODULE_VERSION 43 -#define IOJS_2_0_MODULE_VERSION 44 -#define IOJS_3_0_MODULE_VERSION 45 -#define NODE_4_0_MODULE_VERSION 46 -#define NODE_5_0_MODULE_VERSION 47 -#define NODE_6_0_MODULE_VERSION 48 -#define NODE_7_0_MODULE_VERSION 51 -#define NODE_8_0_MODULE_VERSION 57 -#define NODE_9_0_MODULE_VERSION 59 -#define NODE_10_0_MODULE_VERSION 64 -#define NODE_11_0_MODULE_VERSION 67 -#define NODE_12_0_MODULE_VERSION 72 -#define NODE_13_0_MODULE_VERSION 79 -#define NODE_14_0_MODULE_VERSION 83 -#define NODE_15_0_MODULE_VERSION 88 -#define NODE_16_0_MODULE_VERSION 93 -#define NODE_17_0_MODULE_VERSION 102 -#define NODE_18_0_MODULE_VERSION 108 - -#ifdef _MSC_VER -# define NAN_HAS_CPLUSPLUS_11 (_MSC_VER >= 1800) -#else -# define NAN_HAS_CPLUSPLUS_11 (__cplusplus >= 201103L) -#endif - -#if NODE_MODULE_VERSION >= IOJS_3_0_MODULE_VERSION && !NAN_HAS_CPLUSPLUS_11 -# error This version of node/NAN/v8 requires a C++11 compiler -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#if defined(_MSC_VER) -# pragma warning( push ) -# pragma warning( disable : 4530 ) -# include -# include -# include -# pragma warning( pop ) -#else -# include -# include -# include -#endif - -// uv helpers -#ifdef UV_VERSION_MAJOR -# ifndef UV_VERSION_PATCH -# define UV_VERSION_PATCH 0 -# endif -# define NAUV_UVVERSION ((UV_VERSION_MAJOR << 16) | \ - (UV_VERSION_MINOR << 8) | \ - (UV_VERSION_PATCH)) -#else -# define NAUV_UVVERSION 0x000b00 -#endif - -#if NAUV_UVVERSION < 0x000b0b -# ifdef WIN32 -# include -# else -# include -# endif -#endif - -namespace Nan { - -#define NAN_CONCAT(a, b) NAN_CONCAT_HELPER(a, b) -#define NAN_CONCAT_HELPER(a, b) a##b - -#define NAN_INLINE inline // TODO(bnoordhuis) Remove in v3.0.0. - -#if defined(__GNUC__) && \ - !(defined(V8_DISABLE_DEPRECATIONS) && V8_DISABLE_DEPRECATIONS) -# define NAN_DEPRECATED __attribute__((deprecated)) -#elif defined(_MSC_VER) && \ - !(defined(V8_DISABLE_DEPRECATIONS) && V8_DISABLE_DEPRECATIONS) -# define NAN_DEPRECATED __declspec(deprecated) -#else -# define NAN_DEPRECATED -#endif - -#if NAN_HAS_CPLUSPLUS_11 -# define NAN_DISALLOW_ASSIGN(CLASS) void operator=(const CLASS&) = delete; -# define NAN_DISALLOW_COPY(CLASS) CLASS(const CLASS&) = delete; -# define NAN_DISALLOW_MOVE(CLASS) \ - CLASS(CLASS&&) = delete; /* NOLINT(build/c++11) */ \ - void operator=(CLASS&&) = delete; -#else -# define NAN_DISALLOW_ASSIGN(CLASS) void operator=(const CLASS&); -# define NAN_DISALLOW_COPY(CLASS) CLASS(const CLASS&); -# define NAN_DISALLOW_MOVE(CLASS) -#endif - -#define NAN_DISALLOW_ASSIGN_COPY(CLASS) \ - NAN_DISALLOW_ASSIGN(CLASS) \ - NAN_DISALLOW_COPY(CLASS) - -#define NAN_DISALLOW_ASSIGN_MOVE(CLASS) \ - NAN_DISALLOW_ASSIGN(CLASS) \ - NAN_DISALLOW_MOVE(CLASS) - -#define NAN_DISALLOW_COPY_MOVE(CLASS) \ - NAN_DISALLOW_COPY(CLASS) \ - NAN_DISALLOW_MOVE(CLASS) - -#define NAN_DISALLOW_ASSIGN_COPY_MOVE(CLASS) \ - NAN_DISALLOW_ASSIGN(CLASS) \ - NAN_DISALLOW_COPY(CLASS) \ - NAN_DISALLOW_MOVE(CLASS) - -#define TYPE_CHECK(T, S) \ - while (false) { \ - *(static_cast(0)) = static_cast(0); \ - } - -//=== RegistrationFunction ===================================================== - -#if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - typedef v8::Handle ADDON_REGISTER_FUNCTION_ARGS_TYPE; -#else - typedef v8::Local ADDON_REGISTER_FUNCTION_ARGS_TYPE; -#endif - -#define NAN_MODULE_INIT(name) \ - void name(Nan::ADDON_REGISTER_FUNCTION_ARGS_TYPE target) - -#if NODE_MAJOR_VERSION >= 10 || \ - NODE_MAJOR_VERSION == 9 && NODE_MINOR_VERSION >= 3 -#define NAN_MODULE_WORKER_ENABLED(module_name, registration) \ - extern "C" NODE_MODULE_EXPORT void \ - NAN_CONCAT(node_register_module_v, NODE_MODULE_VERSION)( \ - v8::Local exports, v8::Local module, \ - v8::Local context) \ - { \ - registration(exports); \ - } -#else -#define NAN_MODULE_WORKER_ENABLED(module_name, registration) \ - NODE_MODULE(module_name, registration) -#endif - -//=== CallbackInfo ============================================================= - -#include "nan_callbacks.h" // NOLINT(build/include) - -//============================================================================== - -#if (NODE_MODULE_VERSION < NODE_0_12_MODULE_VERSION) -typedef v8::Script UnboundScript; -typedef v8::Script BoundScript; -#else -typedef v8::UnboundScript UnboundScript; -typedef v8::Script BoundScript; -#endif - -#if (NODE_MODULE_VERSION < ATOM_0_21_MODULE_VERSION) -typedef v8::String::ExternalAsciiStringResource - ExternalOneByteStringResource; -#else -typedef v8::String::ExternalOneByteStringResource - ExternalOneByteStringResource; -#endif - -#if (NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION) -template -class NonCopyablePersistentTraits : - public v8::NonCopyablePersistentTraits {}; -template -class CopyablePersistentTraits : - public v8::CopyablePersistentTraits {}; - -template -class PersistentBase : - public v8::PersistentBase {}; - -template > -class Persistent; -#else -template class NonCopyablePersistentTraits; -template class PersistentBase; -template class WeakCallbackData; -template > -class Persistent; -#endif // NODE_MODULE_VERSION - -template -class Maybe { - public: - inline bool IsNothing() const { return !has_value_; } - inline bool IsJust() const { return has_value_; } - - inline T ToChecked() const { return FromJust(); } - inline void Check() const { FromJust(); } - - inline bool To(T* out) const { - if (IsJust()) *out = value_; - return IsJust(); - } - - inline T FromJust() const { -#if defined(V8_ENABLE_CHECKS) - assert(IsJust() && "FromJust is Nothing"); -#endif // V8_ENABLE_CHECKS - return value_; - } - - inline T FromMaybe(const T& default_value) const { - return has_value_ ? value_ : default_value; - } - - inline bool operator==(const Maybe &other) const { - return (IsJust() == other.IsJust()) && - (!IsJust() || FromJust() == other.FromJust()); - } - - inline bool operator!=(const Maybe &other) const { - return !operator==(other); - } - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) - // Allow implicit conversions from v8::Maybe to Nan::Maybe. - Maybe(const v8::Maybe& that) // NOLINT(runtime/explicit) - : has_value_(that.IsJust()) - , value_(that.FromMaybe(T())) {} -#endif - - private: - Maybe() : has_value_(false) {} - explicit Maybe(const T& t) : has_value_(true), value_(t) {} - bool has_value_; - T value_; - - template - friend Maybe Nothing(); - template - friend Maybe Just(const U& u); -}; - -template -inline Maybe Nothing() { - return Maybe(); -} - -template -inline Maybe Just(const T& t) { - return Maybe(t); -} - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) -# include "nan_maybe_43_inl.h" // NOLINT(build/include) -#else -# include "nan_maybe_pre_43_inl.h" // NOLINT(build/include) -#endif - -#include "nan_converters.h" // NOLINT(build/include) -#include "nan_new.h" // NOLINT(build/include) - -#if NAUV_UVVERSION < 0x000b17 -#define NAUV_WORK_CB(func) \ - void func(uv_async_t *async, int) -#else -#define NAUV_WORK_CB(func) \ - void func(uv_async_t *async) -#endif - -#if NAUV_UVVERSION >= 0x000b0b - -typedef uv_key_t nauv_key_t; - -inline int nauv_key_create(nauv_key_t *key) { - return uv_key_create(key); -} - -inline void nauv_key_delete(nauv_key_t *key) { - uv_key_delete(key); -} - -inline void* nauv_key_get(nauv_key_t *key) { - return uv_key_get(key); -} - -inline void nauv_key_set(nauv_key_t *key, void *value) { - uv_key_set(key, value); -} - -#else - -/* Implement thread local storage for older versions of libuv. - * This is essentially a backport of libuv commit 5d2434bf - * written by Ben Noordhuis, adjusted for names and inline. - */ - -#ifndef WIN32 - -typedef pthread_key_t nauv_key_t; - -inline int nauv_key_create(nauv_key_t* key) { - return -pthread_key_create(key, NULL); -} - -inline void nauv_key_delete(nauv_key_t* key) { - if (pthread_key_delete(*key)) - abort(); -} - -inline void* nauv_key_get(nauv_key_t* key) { - return pthread_getspecific(*key); -} - -inline void nauv_key_set(nauv_key_t* key, void* value) { - if (pthread_setspecific(*key, value)) - abort(); -} - -#else - -typedef struct { - DWORD tls_index; -} nauv_key_t; - -inline int nauv_key_create(nauv_key_t* key) { - key->tls_index = TlsAlloc(); - if (key->tls_index == TLS_OUT_OF_INDEXES) - return UV_ENOMEM; - return 0; -} - -inline void nauv_key_delete(nauv_key_t* key) { - if (TlsFree(key->tls_index) == FALSE) - abort(); - key->tls_index = TLS_OUT_OF_INDEXES; -} - -inline void* nauv_key_get(nauv_key_t* key) { - void* value = TlsGetValue(key->tls_index); - if (value == NULL) - if (GetLastError() != ERROR_SUCCESS) - abort(); - return value; -} - -inline void nauv_key_set(nauv_key_t* key, void* value) { - if (TlsSetValue(key->tls_index, value) == FALSE) - abort(); -} - -#endif -#endif - -#if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION -template -v8::Local New(v8::Handle); -#endif - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) - typedef v8::WeakCallbackType WeakCallbackType; -#else -struct WeakCallbackType { - enum E {kParameter, kInternalFields}; - E type; - WeakCallbackType(E other) : type(other) {} // NOLINT(runtime/explicit) - inline bool operator==(E other) { return other == this->type; } - inline bool operator!=(E other) { return !operator==(other); } -}; -#endif - -template class WeakCallbackInfo; - -#if NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION -# include "nan_persistent_12_inl.h" // NOLINT(build/include) -#else -# include "nan_persistent_pre_12_inl.h" // NOLINT(build/include) -#endif - -namespace imp { - static const size_t kMaxLength = 0x3fffffff; - // v8::String::REPLACE_INVALID_UTF8 was introduced - // in node.js v0.10.29 and v0.8.27. -#if NODE_MAJOR_VERSION > 0 || \ - NODE_MINOR_VERSION > 10 || \ - NODE_MINOR_VERSION == 10 && NODE_PATCH_VERSION >= 29 || \ - NODE_MINOR_VERSION == 8 && NODE_PATCH_VERSION >= 27 - static const unsigned kReplaceInvalidUtf8 = v8::String::REPLACE_INVALID_UTF8; -#else - static const unsigned kReplaceInvalidUtf8 = 0; -#endif -} // end of namespace imp - -//=== HandleScope ============================================================== - -class HandleScope { - v8::HandleScope scope; - - public: -#if NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - inline HandleScope() : scope(v8::Isolate::GetCurrent()) {} - inline static int NumberOfHandles() { - return v8::HandleScope::NumberOfHandles(v8::Isolate::GetCurrent()); - } -#else - inline HandleScope() : scope() {} - inline static int NumberOfHandles() { - return v8::HandleScope::NumberOfHandles(); - } -#endif - - private: - // Make it hard to create heap-allocated or illegal handle scopes by - // disallowing certain operations. - HandleScope(const HandleScope &); - void operator=(const HandleScope &); - void *operator new(size_t size); - void operator delete(void *, size_t) { - abort(); - } -}; - -class EscapableHandleScope { - public: -#if NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - inline EscapableHandleScope() : scope(v8::Isolate::GetCurrent()) {} - - inline static int NumberOfHandles() { - return v8::EscapableHandleScope::NumberOfHandles(v8::Isolate::GetCurrent()); - } - - template - inline v8::Local Escape(v8::Local value) { - return scope.Escape(value); - } - - private: - v8::EscapableHandleScope scope; -#else - inline EscapableHandleScope() : scope() {} - - inline static int NumberOfHandles() { - return v8::HandleScope::NumberOfHandles(); - } - - template - inline v8::Local Escape(v8::Local value) { - return scope.Close(value); - } - - private: - v8::HandleScope scope; -#endif - - private: - // Make it hard to create heap-allocated or illegal handle scopes by - // disallowing certain operations. - EscapableHandleScope(const EscapableHandleScope &); - void operator=(const EscapableHandleScope &); - void *operator new(size_t size); - void operator delete(void *, size_t) { - abort(); - } -}; - -//=== TryCatch ================================================================= - -class TryCatch { - v8::TryCatch try_catch_; - friend void FatalException(const TryCatch&); - - public: -#if NODE_MODULE_VERSION > NODE_0_12_MODULE_VERSION - TryCatch() : try_catch_(v8::Isolate::GetCurrent()) {} -#endif - - inline bool HasCaught() const { return try_catch_.HasCaught(); } - - inline bool CanContinue() const { return try_catch_.CanContinue(); } - - inline v8::Local ReThrow() { -#if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - return New(try_catch_.ReThrow()); -#else - return try_catch_.ReThrow(); -#endif - } - - inline v8::Local Exception() const { - return try_catch_.Exception(); - } - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) - inline v8::MaybeLocal StackTrace() const { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(try_catch_.StackTrace(isolate->GetCurrentContext()) - .FromMaybe(v8::Local())); - } -#else - inline MaybeLocal StackTrace() const { - return try_catch_.StackTrace(); - } -#endif - - inline v8::Local Message() const { - return try_catch_.Message(); - } - - inline void Reset() { try_catch_.Reset(); } - - inline void SetVerbose(bool value) { try_catch_.SetVerbose(value); } - - inline void SetCaptureMessage(bool value) { - try_catch_.SetCaptureMessage(value); - } -}; - -v8::Local MakeCallback(v8::Local target, - v8::Local func, - int argc, - v8::Local* argv); -v8::Local MakeCallback(v8::Local target, - v8::Local symbol, - int argc, - v8::Local* argv); -v8::Local MakeCallback(v8::Local target, - const char* method, - int argc, - v8::Local* argv); - -// === AsyncResource =========================================================== - -class AsyncResource { - public: - AsyncResource( - v8::Local name - , v8::Local resource = New()) { -#if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - v8::Isolate* isolate = v8::Isolate::GetCurrent(); - - if (resource.IsEmpty()) { - resource = New(); - } - - context = node::EmitAsyncInit(isolate, resource, name); -#endif - } - - AsyncResource( - const char* name - , v8::Local resource = New()) { -#if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - v8::Isolate* isolate = v8::Isolate::GetCurrent(); - - if (resource.IsEmpty()) { - resource = New(); - } - - v8::Local name_string = - New(name).ToLocalChecked(); - context = node::EmitAsyncInit(isolate, resource, name_string); -#endif - } - - ~AsyncResource() { -#if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - v8::Isolate* isolate = v8::Isolate::GetCurrent(); - node::EmitAsyncDestroy(isolate, context); -#endif - } - - inline MaybeLocal runInAsyncScope( - v8::Local target - , v8::Local func - , int argc - , v8::Local* argv) { -#if NODE_MODULE_VERSION < NODE_9_0_MODULE_VERSION - return MakeCallback(target, func, argc, argv); -#else - return node::MakeCallback( - v8::Isolate::GetCurrent(), target, func, argc, argv, context); -#endif - } - - inline MaybeLocal runInAsyncScope( - v8::Local target - , v8::Local symbol - , int argc - , v8::Local* argv) { -#if NODE_MODULE_VERSION < NODE_9_0_MODULE_VERSION - return MakeCallback(target, symbol, argc, argv); -#else - return node::MakeCallback( - v8::Isolate::GetCurrent(), target, symbol, argc, argv, context); -#endif - } - - inline MaybeLocal runInAsyncScope( - v8::Local target - , const char* method - , int argc - , v8::Local* argv) { -#if NODE_MODULE_VERSION < NODE_9_0_MODULE_VERSION - return MakeCallback(target, method, argc, argv); -#else - return node::MakeCallback( - v8::Isolate::GetCurrent(), target, method, argc, argv, context); -#endif - } - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(AsyncResource) -#if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - node::async_context context; -#endif -}; - -inline uv_loop_t* GetCurrentEventLoop() { -#if NODE_MAJOR_VERSION >= 10 || \ - NODE_MAJOR_VERSION == 9 && NODE_MINOR_VERSION >= 3 || \ - NODE_MAJOR_VERSION == 8 && NODE_MINOR_VERSION >= 10 - return node::GetCurrentEventLoop(v8::Isolate::GetCurrent()); -#else - return uv_default_loop(); -#endif -} - -//============ ================================================================= - -/* node 0.12 */ -#if NODE_MODULE_VERSION >= NODE_0_12_MODULE_VERSION - inline - void SetCounterFunction(v8::CounterLookupCallback cb) { - v8::Isolate::GetCurrent()->SetCounterFunction(cb); - } - - inline - void SetCreateHistogramFunction(v8::CreateHistogramCallback cb) { - v8::Isolate::GetCurrent()->SetCreateHistogramFunction(cb); - } - - inline - void SetAddHistogramSampleFunction(v8::AddHistogramSampleCallback cb) { - v8::Isolate::GetCurrent()->SetAddHistogramSampleFunction(cb); - } - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) - inline bool IdleNotification(int idle_time_in_ms) { - return v8::Isolate::GetCurrent()->IdleNotificationDeadline( - idle_time_in_ms * 0.001); - } -# else - inline bool IdleNotification(int idle_time_in_ms) { - return v8::Isolate::GetCurrent()->IdleNotification(idle_time_in_ms); - } -#endif - - inline void LowMemoryNotification() { - v8::Isolate::GetCurrent()->LowMemoryNotification(); - } - - inline void ContextDisposedNotification() { - v8::Isolate::GetCurrent()->ContextDisposedNotification(); - } -#else - inline - void SetCounterFunction(v8::CounterLookupCallback cb) { - v8::V8::SetCounterFunction(cb); - } - - inline - void SetCreateHistogramFunction(v8::CreateHistogramCallback cb) { - v8::V8::SetCreateHistogramFunction(cb); - } - - inline - void SetAddHistogramSampleFunction(v8::AddHistogramSampleCallback cb) { - v8::V8::SetAddHistogramSampleFunction(cb); - } - - inline bool IdleNotification(int idle_time_in_ms) { - return v8::V8::IdleNotification(idle_time_in_ms); - } - - inline void LowMemoryNotification() { - v8::V8::LowMemoryNotification(); - } - - inline void ContextDisposedNotification() { - v8::V8::ContextDisposedNotification(); - } -#endif - -#if (NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION) // Node 0.12 - inline v8::Local Undefined() { -# if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - EscapableHandleScope scope; - return scope.Escape(New(v8::Undefined(v8::Isolate::GetCurrent()))); -# else - return v8::Undefined(v8::Isolate::GetCurrent()); -# endif - } - - inline v8::Local Null() { -# if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - EscapableHandleScope scope; - return scope.Escape(New(v8::Null(v8::Isolate::GetCurrent()))); -# else - return v8::Null(v8::Isolate::GetCurrent()); -# endif - } - - inline v8::Local True() { -# if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - EscapableHandleScope scope; - return scope.Escape(New(v8::True(v8::Isolate::GetCurrent()))); -# else - return v8::True(v8::Isolate::GetCurrent()); -# endif - } - - inline v8::Local False() { -# if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - EscapableHandleScope scope; - return scope.Escape(New(v8::False(v8::Isolate::GetCurrent()))); -# else - return v8::False(v8::Isolate::GetCurrent()); -# endif - } - - inline v8::Local EmptyString() { - return v8::String::Empty(v8::Isolate::GetCurrent()); - } - - inline int AdjustExternalMemory(int bc) { - return static_cast( - v8::Isolate::GetCurrent()->AdjustAmountOfExternalAllocatedMemory(bc)); - } - - inline void SetTemplate( - v8::Local templ - , const char *name - , v8::Local value) { - templ->Set(v8::Isolate::GetCurrent(), name, value); - } - - inline void SetTemplate( - v8::Local templ - , v8::Local name - , v8::Local value - , v8::PropertyAttribute attributes) { - templ->Set(name, value, attributes); - } - - inline v8::Local GetCurrentContext() { - return v8::Isolate::GetCurrent()->GetCurrentContext(); - } - - inline void* GetInternalFieldPointer( - v8::Local object - , int index) { - return object->GetAlignedPointerFromInternalField(index); - } - - inline void SetInternalFieldPointer( - v8::Local object - , int index - , void* value) { - object->SetAlignedPointerInInternalField(index, value); - } - -# define NAN_GC_CALLBACK(name) \ - void name(v8::Isolate *isolate, v8::GCType type, v8::GCCallbackFlags flags) - -#if NODE_MODULE_VERSION <= NODE_4_0_MODULE_VERSION - typedef v8::Isolate::GCEpilogueCallback GCEpilogueCallback; - typedef v8::Isolate::GCPrologueCallback GCPrologueCallback; -#else - typedef v8::Isolate::GCCallback GCEpilogueCallback; - typedef v8::Isolate::GCCallback GCPrologueCallback; -#endif - - inline void AddGCEpilogueCallback( - GCEpilogueCallback callback - , v8::GCType gc_type_filter = v8::kGCTypeAll) { - v8::Isolate::GetCurrent()->AddGCEpilogueCallback(callback, gc_type_filter); - } - - inline void RemoveGCEpilogueCallback( - GCEpilogueCallback callback) { - v8::Isolate::GetCurrent()->RemoveGCEpilogueCallback(callback); - } - - inline void AddGCPrologueCallback( - GCPrologueCallback callback - , v8::GCType gc_type_filter = v8::kGCTypeAll) { - v8::Isolate::GetCurrent()->AddGCPrologueCallback(callback, gc_type_filter); - } - - inline void RemoveGCPrologueCallback( - GCPrologueCallback callback) { - v8::Isolate::GetCurrent()->RemoveGCPrologueCallback(callback); - } - - inline void GetHeapStatistics( - v8::HeapStatistics *heap_statistics) { - v8::Isolate::GetCurrent()->GetHeapStatistics(heap_statistics); - } - -# define X(NAME) \ - inline v8::Local NAME(const char *msg) { \ - EscapableHandleScope scope; \ - return scope.Escape(v8::Exception::NAME(New(msg).ToLocalChecked())); \ - } \ - \ - inline \ - v8::Local NAME(v8::Local msg) { \ - return v8::Exception::NAME(msg); \ - } \ - \ - inline void Throw ## NAME(const char *msg) { \ - HandleScope scope; \ - v8::Isolate::GetCurrent()->ThrowException( \ - v8::Exception::NAME(New(msg).ToLocalChecked())); \ - } \ - \ - inline void Throw ## NAME(v8::Local msg) { \ - HandleScope scope; \ - v8::Isolate::GetCurrent()->ThrowException( \ - v8::Exception::NAME(msg)); \ - } - - X(Error) - X(RangeError) - X(ReferenceError) - X(SyntaxError) - X(TypeError) - -# undef X - - inline void ThrowError(v8::Local error) { - v8::Isolate::GetCurrent()->ThrowException(error); - } - - inline MaybeLocal NewBuffer( - char *data - , size_t length -#if NODE_MODULE_VERSION > IOJS_2_0_MODULE_VERSION - , node::Buffer::FreeCallback callback -#else - , node::smalloc::FreeCallback callback -#endif - , void *hint - ) { - // arbitrary buffer lengths requires - // NODE_MODULE_VERSION >= IOJS_3_0_MODULE_VERSION - assert(length <= imp::kMaxLength && "too large buffer"); -#if NODE_MODULE_VERSION > IOJS_2_0_MODULE_VERSION - return node::Buffer::New( - v8::Isolate::GetCurrent(), data, length, callback, hint); -#else - return node::Buffer::New(v8::Isolate::GetCurrent(), data, length, callback, - hint); -#endif - } - - inline MaybeLocal CopyBuffer( - const char *data - , uint32_t size - ) { - // arbitrary buffer lengths requires - // NODE_MODULE_VERSION >= IOJS_3_0_MODULE_VERSION - assert(size <= imp::kMaxLength && "too large buffer"); -#if NODE_MODULE_VERSION > IOJS_2_0_MODULE_VERSION - return node::Buffer::Copy( - v8::Isolate::GetCurrent(), data, size); -#else - return node::Buffer::New(v8::Isolate::GetCurrent(), data, size); -#endif - } - - inline MaybeLocal NewBuffer(uint32_t size) { - // arbitrary buffer lengths requires - // NODE_MODULE_VERSION >= IOJS_3_0_MODULE_VERSION - assert(size <= imp::kMaxLength && "too large buffer"); -#if NODE_MODULE_VERSION > IOJS_2_0_MODULE_VERSION - return node::Buffer::New( - v8::Isolate::GetCurrent(), size); -#else - return node::Buffer::New(v8::Isolate::GetCurrent(), size); -#endif - } - - inline MaybeLocal NewBuffer( - char* data - , uint32_t size - ) { - // arbitrary buffer lengths requires - // NODE_MODULE_VERSION >= IOJS_3_0_MODULE_VERSION - assert(size <= imp::kMaxLength && "too large buffer"); -#if NODE_MODULE_VERSION > IOJS_2_0_MODULE_VERSION - return node::Buffer::New(v8::Isolate::GetCurrent(), data, size); -#else - return node::Buffer::Use(v8::Isolate::GetCurrent(), data, size); -#endif - } - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) - inline MaybeLocal - NewOneByteString(const uint8_t * value, int length = -1) { - return v8::String::NewFromOneByte(v8::Isolate::GetCurrent(), value, - v8::NewStringType::kNormal, length); - } - - inline MaybeLocal CompileScript( - v8::Local s - , const v8::ScriptOrigin& origin - ) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - v8::ScriptCompiler::Source source(s, origin); - return scope.Escape( - v8::ScriptCompiler::Compile(isolate->GetCurrentContext(), &source) - .FromMaybe(v8::Local())); - } - - inline MaybeLocal CompileScript( - v8::Local s - ) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - v8::ScriptCompiler::Source source(s); - return scope.Escape( - v8::ScriptCompiler::Compile(isolate->GetCurrentContext(), &source) - .FromMaybe(v8::Local())); - } - - inline MaybeLocal RunScript( - v8::Local script - ) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(script->BindToCurrentContext() - ->Run(isolate->GetCurrentContext()) - .FromMaybe(v8::Local())); - } - - inline MaybeLocal RunScript( - v8::Local script - ) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(script->Run(isolate->GetCurrentContext()) - .FromMaybe(v8::Local())); - } -#else - inline MaybeLocal - NewOneByteString(const uint8_t * value, int length = -1) { - return v8::String::NewFromOneByte(v8::Isolate::GetCurrent(), value, - v8::String::kNormalString, length); - } - - inline MaybeLocal CompileScript( - v8::Local s - , const v8::ScriptOrigin& origin - ) { - v8::ScriptCompiler::Source source(s, origin); - return v8::ScriptCompiler::Compile(v8::Isolate::GetCurrent(), &source); - } - - inline MaybeLocal CompileScript( - v8::Local s - ) { - v8::ScriptCompiler::Source source(s); - return v8::ScriptCompiler::Compile(v8::Isolate::GetCurrent(), &source); - } - - inline MaybeLocal RunScript( - v8::Local script - ) { - EscapableHandleScope scope; - return scope.Escape(script->BindToCurrentContext()->Run()); - } - - inline MaybeLocal RunScript( - v8::Local script - ) { - return script->Run(); - } -#endif - - NAN_DEPRECATED inline v8::Local MakeCallback( - v8::Local target - , v8::Local func - , int argc - , v8::Local* argv) { -#if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - EscapableHandleScope scope; - return scope.Escape(New(node::MakeCallback( - v8::Isolate::GetCurrent(), target, func, argc, argv))); -#else -# if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - AsyncResource res("nan:makeCallback"); - return res.runInAsyncScope(target, func, argc, argv) - .FromMaybe(v8::Local()); -# else - return node::MakeCallback( - v8::Isolate::GetCurrent(), target, func, argc, argv); -# endif // NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION -#endif // NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - } - - NAN_DEPRECATED inline v8::Local MakeCallback( - v8::Local target - , v8::Local symbol - , int argc - , v8::Local* argv) { -#if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - EscapableHandleScope scope; - return scope.Escape(New(node::MakeCallback( - v8::Isolate::GetCurrent(), target, symbol, argc, argv))); -#else -# if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - AsyncResource res("nan:makeCallback"); - return res.runInAsyncScope(target, symbol, argc, argv) - .FromMaybe(v8::Local()); -# else - return node::MakeCallback( - v8::Isolate::GetCurrent(), target, symbol, argc, argv); -# endif // NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION -#endif // NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - } - - NAN_DEPRECATED inline v8::Local MakeCallback( - v8::Local target - , const char* method - , int argc - , v8::Local* argv) { -#if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - EscapableHandleScope scope; - return scope.Escape(New(node::MakeCallback( - v8::Isolate::GetCurrent(), target, method, argc, argv))); -#else -# if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - AsyncResource res("nan:makeCallback"); - return res.runInAsyncScope(target, method, argc, argv) - .FromMaybe(v8::Local()); -# else - return node::MakeCallback( - v8::Isolate::GetCurrent(), target, method, argc, argv); -# endif // NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION -#endif // NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - } - - inline void FatalException(const TryCatch& try_catch) { - node::FatalException(v8::Isolate::GetCurrent(), try_catch.try_catch_); - } - - inline v8::Local ErrnoException( - int errorno - , const char* syscall = NULL - , const char* message = NULL - , const char* path = NULL) { - return node::ErrnoException(v8::Isolate::GetCurrent(), errorno, syscall, - message, path); - } - - NAN_DEPRECATED inline v8::Local NanErrnoException( - int errorno - , const char* syscall = NULL - , const char* message = NULL - , const char* path = NULL) { - return ErrnoException(errorno, syscall, message, path); - } - - template - inline void SetIsolateData( - v8::Isolate *isolate - , T *data - ) { - isolate->SetData(0, data); - } - - template - inline T *GetIsolateData( - v8::Isolate *isolate - ) { - return static_cast(isolate->GetData(0)); - } - -class Utf8String { - public: - inline explicit Utf8String(v8::Local from) : - length_(0), str_(str_st_) { - HandleScope scope; - if (!from.IsEmpty()) { -#if NODE_MAJOR_VERSION >= 10 - v8::Local context = GetCurrentContext(); - v8::Local string = - from->ToString(context).FromMaybe(v8::Local()); -#else - v8::Local string = from->ToString(); -#endif - if (!string.IsEmpty()) { - size_t len = 3 * string->Length() + 1; - assert(len <= INT_MAX); - if (len > sizeof (str_st_)) { - str_ = static_cast(malloc(len)); - assert(str_ != 0); - } - const int flags = - v8::String::NO_NULL_TERMINATION | imp::kReplaceInvalidUtf8; -#if NODE_MAJOR_VERSION >= 11 - length_ = string->WriteUtf8(v8::Isolate::GetCurrent(), str_, - static_cast(len), 0, flags); -#else - // See https://github.com/nodejs/nan/issues/832. - // Disable the warning as there is no way around it. -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#endif - length_ = string->WriteUtf8(str_, static_cast(len), 0, flags); -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif -#ifdef _MSC_VER -#pragma warning(pop) -#endif -#endif // NODE_MAJOR_VERSION < 11 - str_[length_] = '\0'; - } - } - } - - inline int length() const { - return length_; - } - - inline char* operator*() { return str_; } - inline const char* operator*() const { return str_; } - - inline ~Utf8String() { - if (str_ != str_st_) { - free(str_); - } - } - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(Utf8String) - - int length_; - char *str_; - char str_st_[1024]; -}; - -#else // Node 0.8 and 0.10 - inline v8::Local Undefined() { - EscapableHandleScope scope; - return scope.Escape(New(v8::Undefined())); - } - - inline v8::Local Null() { - EscapableHandleScope scope; - return scope.Escape(New(v8::Null())); - } - - inline v8::Local True() { - EscapableHandleScope scope; - return scope.Escape(New(v8::True())); - } - - inline v8::Local False() { - EscapableHandleScope scope; - return scope.Escape(New(v8::False())); - } - - inline v8::Local EmptyString() { - return v8::String::Empty(); - } - - inline int AdjustExternalMemory(int bc) { - return static_cast(v8::V8::AdjustAmountOfExternalAllocatedMemory(bc)); - } - - inline void SetTemplate( - v8::Local templ - , const char *name - , v8::Local value) { - templ->Set(name, value); - } - - inline void SetTemplate( - v8::Local templ - , v8::Local name - , v8::Local value - , v8::PropertyAttribute attributes) { - templ->Set(name, value, attributes); - } - - inline v8::Local GetCurrentContext() { - return v8::Context::GetCurrent(); - } - - inline void* GetInternalFieldPointer( - v8::Local object - , int index) { - return object->GetPointerFromInternalField(index); - } - - inline void SetInternalFieldPointer( - v8::Local object - , int index - , void* value) { - object->SetPointerInInternalField(index, value); - } - -# define NAN_GC_CALLBACK(name) \ - void name(v8::GCType type, v8::GCCallbackFlags flags) - - inline void AddGCEpilogueCallback( - v8::GCEpilogueCallback callback - , v8::GCType gc_type_filter = v8::kGCTypeAll) { - v8::V8::AddGCEpilogueCallback(callback, gc_type_filter); - } - inline void RemoveGCEpilogueCallback( - v8::GCEpilogueCallback callback) { - v8::V8::RemoveGCEpilogueCallback(callback); - } - inline void AddGCPrologueCallback( - v8::GCPrologueCallback callback - , v8::GCType gc_type_filter = v8::kGCTypeAll) { - v8::V8::AddGCPrologueCallback(callback, gc_type_filter); - } - inline void RemoveGCPrologueCallback( - v8::GCPrologueCallback callback) { - v8::V8::RemoveGCPrologueCallback(callback); - } - inline void GetHeapStatistics( - v8::HeapStatistics *heap_statistics) { - v8::V8::GetHeapStatistics(heap_statistics); - } - -# define X(NAME) \ - inline v8::Local NAME(const char *msg) { \ - EscapableHandleScope scope; \ - return scope.Escape(v8::Exception::NAME(New(msg).ToLocalChecked())); \ - } \ - \ - inline \ - v8::Local NAME(v8::Local msg) { \ - return v8::Exception::NAME(msg); \ - } \ - \ - inline void Throw ## NAME(const char *msg) { \ - HandleScope scope; \ - v8::ThrowException(v8::Exception::NAME(New(msg).ToLocalChecked())); \ - } \ - \ - inline \ - void Throw ## NAME(v8::Local errmsg) { \ - HandleScope scope; \ - v8::ThrowException(v8::Exception::NAME(errmsg)); \ - } - - X(Error) - X(RangeError) - X(ReferenceError) - X(SyntaxError) - X(TypeError) - -# undef X - - inline void ThrowError(v8::Local error) { - v8::ThrowException(error); - } - - inline MaybeLocal NewBuffer( - char *data - , size_t length - , node::Buffer::free_callback callback - , void *hint - ) { - EscapableHandleScope scope; - // arbitrary buffer lengths requires - // NODE_MODULE_VERSION >= IOJS_3_0_MODULE_VERSION - assert(length <= imp::kMaxLength && "too large buffer"); - return scope.Escape( - New(node::Buffer::New(data, length, callback, hint)->handle_)); - } - - inline MaybeLocal CopyBuffer( - const char *data - , uint32_t size - ) { - EscapableHandleScope scope; - // arbitrary buffer lengths requires - // NODE_MODULE_VERSION >= IOJS_3_0_MODULE_VERSION - assert(size <= imp::kMaxLength && "too large buffer"); -#if NODE_MODULE_VERSION >= NODE_0_10_MODULE_VERSION - return scope.Escape(New(node::Buffer::New(data, size)->handle_)); -#else - return scope.Escape( - New(node::Buffer::New(const_cast(data), size)->handle_)); -#endif - } - - inline MaybeLocal NewBuffer(uint32_t size) { - // arbitrary buffer lengths requires - // NODE_MODULE_VERSION >= IOJS_3_0_MODULE_VERSION - EscapableHandleScope scope; - assert(size <= imp::kMaxLength && "too large buffer"); - return scope.Escape(New(node::Buffer::New(size)->handle_)); - } - - inline void FreeData(char *data, void *hint) { - (void) hint; // unused - delete[] data; - } - - inline MaybeLocal NewBuffer( - char* data - , uint32_t size - ) { - EscapableHandleScope scope; - // arbitrary buffer lengths requires - // NODE_MODULE_VERSION >= IOJS_3_0_MODULE_VERSION - assert(size <= imp::kMaxLength && "too large buffer"); - return scope.Escape( - New(node::Buffer::New(data, size, FreeData, NULL)->handle_)); - } - -namespace imp { -inline void -widenString(std::vector *ws, const uint8_t *s, int l) { - size_t len = static_cast(l); - if (l < 0) { - len = strlen(reinterpret_cast(s)); - } - assert(len <= INT_MAX && "string too long"); - ws->resize(len); - std::copy(s, s + len, ws->begin()); // NOLINT(build/include_what_you_use) -} -} // end of namespace imp - - inline MaybeLocal - NewOneByteString(const uint8_t * value, int length = -1) { - std::vector wideString; // NOLINT(build/include_what_you_use) - imp::widenString(&wideString, value, length); - return v8::String::New(wideString.data(), - static_cast(wideString.size())); - } - - inline MaybeLocal CompileScript( - v8::Local s - , const v8::ScriptOrigin& origin - ) { - return v8::Script::Compile(s, const_cast(&origin)); - } - - inline MaybeLocal CompileScript( - v8::Local s - ) { - return v8::Script::Compile(s); - } - - inline - MaybeLocal RunScript(v8::Local script) { - return script->Run(); - } - - inline v8::Local MakeCallback( - v8::Local target - , v8::Local func - , int argc - , v8::Local* argv) { - v8::HandleScope scope; - return scope.Close(New(node::MakeCallback(target, func, argc, argv))); - } - - inline v8::Local MakeCallback( - v8::Local target - , v8::Local symbol - , int argc - , v8::Local* argv) { - v8::HandleScope scope; - return scope.Close(New(node::MakeCallback(target, symbol, argc, argv))); - } - - inline v8::Local MakeCallback( - v8::Local target - , const char* method - , int argc - , v8::Local* argv) { - v8::HandleScope scope; - return scope.Close(New(node::MakeCallback(target, method, argc, argv))); - } - - inline void FatalException(const TryCatch& try_catch) { - node::FatalException(const_cast(try_catch.try_catch_)); - } - - inline v8::Local ErrnoException( - int errorno - , const char* syscall = NULL - , const char* message = NULL - , const char* path = NULL) { - return node::ErrnoException(errorno, syscall, message, path); - } - - NAN_DEPRECATED inline v8::Local NanErrnoException( - int errorno - , const char* syscall = NULL - , const char* message = NULL - , const char* path = NULL) { - return ErrnoException(errorno, syscall, message, path); - } - - - template - inline void SetIsolateData( - v8::Isolate *isolate - , T *data - ) { - isolate->SetData(data); - } - - template - inline T *GetIsolateData( - v8::Isolate *isolate - ) { - return static_cast(isolate->GetData()); - } - -class Utf8String { - public: - inline explicit Utf8String(v8::Local from) : - length_(0), str_(str_st_) { - v8::HandleScope scope; - if (!from.IsEmpty()) { - v8::Local string = from->ToString(); - if (!string.IsEmpty()) { - size_t len = 3 * string->Length() + 1; - assert(len <= INT_MAX); - if (len > sizeof (str_st_)) { - str_ = static_cast(malloc(len)); - assert(str_ != 0); - } - const int flags = - v8::String::NO_NULL_TERMINATION | imp::kReplaceInvalidUtf8; - length_ = string->WriteUtf8(str_, static_cast(len), 0, flags); - str_[length_] = '\0'; - } - } - } - - inline int length() const { - return length_; - } - - inline char* operator*() { return str_; } - inline const char* operator*() const { return str_; } - - inline ~Utf8String() { - if (str_ != str_st_) { - free(str_); - } - } - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(Utf8String) - - int length_; - char *str_; - char str_st_[1024]; -}; - -#endif // NODE_MODULE_VERSION - -typedef void (*FreeCallback)(char *data, void *hint); - -typedef const FunctionCallbackInfo& NAN_METHOD_ARGS_TYPE; -typedef void NAN_METHOD_RETURN_TYPE; - -typedef const PropertyCallbackInfo& NAN_GETTER_ARGS_TYPE; -typedef void NAN_GETTER_RETURN_TYPE; - -typedef const PropertyCallbackInfo& NAN_SETTER_ARGS_TYPE; -typedef void NAN_SETTER_RETURN_TYPE; - -typedef const PropertyCallbackInfo& - NAN_PROPERTY_GETTER_ARGS_TYPE; -typedef void NAN_PROPERTY_GETTER_RETURN_TYPE; - -typedef const PropertyCallbackInfo& - NAN_PROPERTY_SETTER_ARGS_TYPE; -typedef void NAN_PROPERTY_SETTER_RETURN_TYPE; - -typedef const PropertyCallbackInfo& - NAN_PROPERTY_ENUMERATOR_ARGS_TYPE; -typedef void NAN_PROPERTY_ENUMERATOR_RETURN_TYPE; - -typedef const PropertyCallbackInfo& - NAN_PROPERTY_DELETER_ARGS_TYPE; -typedef void NAN_PROPERTY_DELETER_RETURN_TYPE; - -typedef const PropertyCallbackInfo& - NAN_PROPERTY_QUERY_ARGS_TYPE; -typedef void NAN_PROPERTY_QUERY_RETURN_TYPE; - -typedef const PropertyCallbackInfo& NAN_INDEX_GETTER_ARGS_TYPE; -typedef void NAN_INDEX_GETTER_RETURN_TYPE; - -typedef const PropertyCallbackInfo& NAN_INDEX_SETTER_ARGS_TYPE; -typedef void NAN_INDEX_SETTER_RETURN_TYPE; - -typedef const PropertyCallbackInfo& - NAN_INDEX_ENUMERATOR_ARGS_TYPE; -typedef void NAN_INDEX_ENUMERATOR_RETURN_TYPE; - -typedef const PropertyCallbackInfo& - NAN_INDEX_DELETER_ARGS_TYPE; -typedef void NAN_INDEX_DELETER_RETURN_TYPE; - -typedef const PropertyCallbackInfo& - NAN_INDEX_QUERY_ARGS_TYPE; -typedef void NAN_INDEX_QUERY_RETURN_TYPE; - -#define NAN_METHOD(name) \ - Nan::NAN_METHOD_RETURN_TYPE name(Nan::NAN_METHOD_ARGS_TYPE info) -#define NAN_GETTER(name) \ - Nan::NAN_GETTER_RETURN_TYPE name( \ - v8::Local property \ - , Nan::NAN_GETTER_ARGS_TYPE info) -#define NAN_SETTER(name) \ - Nan::NAN_SETTER_RETURN_TYPE name( \ - v8::Local property \ - , v8::Local value \ - , Nan::NAN_SETTER_ARGS_TYPE info) -#define NAN_PROPERTY_GETTER(name) \ - Nan::NAN_PROPERTY_GETTER_RETURN_TYPE name( \ - v8::Local property \ - , Nan::NAN_PROPERTY_GETTER_ARGS_TYPE info) -#define NAN_PROPERTY_SETTER(name) \ - Nan::NAN_PROPERTY_SETTER_RETURN_TYPE name( \ - v8::Local property \ - , v8::Local value \ - , Nan::NAN_PROPERTY_SETTER_ARGS_TYPE info) -#define NAN_PROPERTY_ENUMERATOR(name) \ - Nan::NAN_PROPERTY_ENUMERATOR_RETURN_TYPE name( \ - Nan::NAN_PROPERTY_ENUMERATOR_ARGS_TYPE info) -#define NAN_PROPERTY_DELETER(name) \ - Nan::NAN_PROPERTY_DELETER_RETURN_TYPE name( \ - v8::Local property \ - , Nan::NAN_PROPERTY_DELETER_ARGS_TYPE info) -#define NAN_PROPERTY_QUERY(name) \ - Nan::NAN_PROPERTY_QUERY_RETURN_TYPE name( \ - v8::Local property \ - , Nan::NAN_PROPERTY_QUERY_ARGS_TYPE info) -# define NAN_INDEX_GETTER(name) \ - Nan::NAN_INDEX_GETTER_RETURN_TYPE name( \ - uint32_t index \ - , Nan::NAN_INDEX_GETTER_ARGS_TYPE info) -#define NAN_INDEX_SETTER(name) \ - Nan::NAN_INDEX_SETTER_RETURN_TYPE name( \ - uint32_t index \ - , v8::Local value \ - , Nan::NAN_INDEX_SETTER_ARGS_TYPE info) -#define NAN_INDEX_ENUMERATOR(name) \ - Nan::NAN_INDEX_ENUMERATOR_RETURN_TYPE \ - name(Nan::NAN_INDEX_ENUMERATOR_ARGS_TYPE info) -#define NAN_INDEX_DELETER(name) \ - Nan::NAN_INDEX_DELETER_RETURN_TYPE name( \ - uint32_t index \ - , Nan::NAN_INDEX_DELETER_ARGS_TYPE info) -#define NAN_INDEX_QUERY(name) \ - Nan::NAN_INDEX_QUERY_RETURN_TYPE name( \ - uint32_t index \ - , Nan::NAN_INDEX_QUERY_ARGS_TYPE info) - -class Callback { - public: - Callback() {} - - explicit Callback(const v8::Local &fn) : handle_(fn) {} - - ~Callback() { - handle_.Reset(); - } - - bool operator==(const Callback &other) const { - return handle_ == other.handle_; - } - - bool operator!=(const Callback &other) const { - return !operator==(other); - } - - inline - v8::Local operator*() const { return GetFunction(); } - - NAN_DEPRECATED inline v8::Local operator()( - v8::Local target - , int argc = 0 - , v8::Local argv[] = 0) const { -#if NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - v8::Isolate *isolate = v8::Isolate::GetCurrent(); -# if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - AsyncResource async("nan:Callback:operator()"); - return Call_(isolate, target, argc, argv, &async) - .FromMaybe(v8::Local()); -# else - return Call_(isolate, target, argc, argv); -# endif // NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION -#else - return Call_(target, argc, argv); -#endif // NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - } - - NAN_DEPRECATED inline v8::Local operator()( - int argc = 0 - , v8::Local argv[] = 0) const { -#if NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); -# if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - AsyncResource async("nan:Callback:operator()"); - return scope.Escape(Call_(isolate, isolate->GetCurrentContext()->Global(), - argc, argv, &async) - .FromMaybe(v8::Local())); -# else - return scope.Escape( - Call_(isolate, isolate->GetCurrentContext()->Global(), argc, argv)); -# endif // NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION -#else - v8::HandleScope scope; - return scope.Close(Call_(v8::Context::GetCurrent()->Global(), argc, argv)); -#endif // NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - } - - inline MaybeLocal operator()( - AsyncResource* resource - , int argc = 0 - , v8::Local argv[] = 0) const { - return this->Call(argc, argv, resource); - } - - inline MaybeLocal operator()( - AsyncResource* resource - , v8::Local target - , int argc = 0 - , v8::Local argv[] = 0) const { - return this->Call(target, argc, argv, resource); - } - - // TODO(kkoopa): remove - inline void SetFunction(const v8::Local &fn) { - Reset(fn); - } - - inline void Reset(const v8::Local &fn) { - handle_.Reset(fn); - } - - inline void Reset() { - handle_.Reset(); - } - - inline v8::Local GetFunction() const { - return New(handle_); - } - - inline bool IsEmpty() const { - return handle_.IsEmpty(); - } - - // Deprecated: For async callbacks Use the versions that accept an - // AsyncResource. If this callback does not correspond to an async resource, - // that is, it is a synchronous function call on a non-empty JS stack, you - // should Nan::Call instead. - NAN_DEPRECATED inline v8::Local - Call(v8::Local target - , int argc - , v8::Local argv[]) const { -#if NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - v8::Isolate *isolate = v8::Isolate::GetCurrent(); -# if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - AsyncResource async("nan:Callback:Call"); - return Call_(isolate, target, argc, argv, &async) - .FromMaybe(v8::Local()); -# else - return Call_(isolate, target, argc, argv); -# endif // NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION -#else - return Call_(target, argc, argv); -#endif - } - - // Deprecated: For async callbacks Use the versions that accept an - // AsyncResource. If this callback does not correspond to an async resource, - // that is, it is a synchronous function call on a non-empty JS stack, you - // should Nan::Call instead. - NAN_DEPRECATED inline v8::Local - Call(int argc, v8::Local argv[]) const { -#if NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); -# if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - AsyncResource async("nan:Callback:Call"); - return scope.Escape(Call_(isolate, isolate->GetCurrentContext()->Global(), - argc, argv, &async) - .FromMaybe(v8::Local())); -# else - return scope.Escape( - Call_(isolate, isolate->GetCurrentContext()->Global(), argc, argv)); -# endif // NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION -#else - v8::HandleScope scope; - return scope.Close(Call_(v8::Context::GetCurrent()->Global(), argc, argv)); -#endif - } - - inline MaybeLocal - Call(v8::Local target - , int argc - , v8::Local argv[] - , AsyncResource* resource) const { -#if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - v8::Isolate* isolate = v8::Isolate::GetCurrent(); - return Call_(isolate, target, argc, argv, resource); -#elif NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - return Call_(isolate, target, argc, argv); -#else - return Call_(target, argc, argv); -#endif - } - - inline MaybeLocal - Call(int argc, v8::Local argv[], AsyncResource* resource) const { -#if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - v8::Isolate* isolate = v8::Isolate::GetCurrent(); - return Call(isolate->GetCurrentContext()->Global(), argc, argv, resource); -#elif NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape( - Call_(isolate, isolate->GetCurrentContext()->Global(), argc, argv)); -#else - v8::HandleScope scope; - return scope.Close(Call_(v8::Context::GetCurrent()->Global(), argc, argv)); -#endif - } - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(Callback) - Persistent handle_; - -#if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - MaybeLocal Call_(v8::Isolate *isolate - , v8::Local target - , int argc - , v8::Local argv[] - , AsyncResource* resource) const { - EscapableHandleScope scope; - v8::Local func = New(handle_); - auto maybe = resource->runInAsyncScope(target, func, argc, argv); - v8::Local local; - if (!maybe.ToLocal(&local)) return MaybeLocal(); - return scope.Escape(local); - } -#elif NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - v8::Local Call_(v8::Isolate *isolate - , v8::Local target - , int argc - , v8::Local argv[]) const { - EscapableHandleScope scope; - - v8::Local callback = New(handle_); -# if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION - return scope.Escape(New(node::MakeCallback( - isolate - , target - , callback - , argc - , argv - ))); -# else - return scope.Escape(node::MakeCallback( - isolate - , target - , callback - , argc - , argv - )); -# endif - } -#else - v8::Local Call_(v8::Local target - , int argc - , v8::Local argv[]) const { - EscapableHandleScope scope; - - v8::Local callback = New(handle_); - return scope.Escape(New(node::MakeCallback( - target - , callback - , argc - , argv - ))); - } -#endif -}; - -inline MaybeLocal Call( - const Nan::Callback& callback - , v8::Local recv - , int argc - , v8::Local argv[]) { - return Call(*callback, recv, argc, argv); -} - -inline MaybeLocal Call( - const Nan::Callback& callback - , int argc - , v8::Local argv[]) { -#if NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - v8::Isolate* isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape( - Call(*callback, isolate->GetCurrentContext()->Global(), argc, argv) - .FromMaybe(v8::Local())); -#else - EscapableHandleScope scope; - return scope.Escape( - Call(*callback, v8::Context::GetCurrent()->Global(), argc, argv) - .FromMaybe(v8::Local())); -#endif -} - -inline MaybeLocal Call( - v8::Local symbol - , v8::Local recv - , int argc - , v8::Local argv[]) { - EscapableHandleScope scope; - v8::Local fn_v = - Get(recv, symbol).FromMaybe(v8::Local()); - if (fn_v.IsEmpty() || !fn_v->IsFunction()) return v8::Local(); - v8::Local fn = fn_v.As(); - return scope.Escape( - Call(fn, recv, argc, argv).FromMaybe(v8::Local())); -} - -inline MaybeLocal Call( - const char* method - , v8::Local recv - , int argc - , v8::Local argv[]) { - EscapableHandleScope scope; - v8::Local method_string = - New(method).ToLocalChecked(); - return scope.Escape( - Call(method_string, recv, argc, argv).FromMaybe(v8::Local())); -} - -/* abstract */ class AsyncWorker { - public: - explicit AsyncWorker(Callback *callback_, - const char* resource_name = "nan:AsyncWorker") - : callback(callback_), errmsg_(NULL) { - request.data = this; - - HandleScope scope; - v8::Local obj = New(); - persistentHandle.Reset(obj); - async_resource = new AsyncResource(resource_name, obj); - } - - virtual ~AsyncWorker() { - HandleScope scope; - - if (!persistentHandle.IsEmpty()) - persistentHandle.Reset(); - delete callback; - delete[] errmsg_; - delete async_resource; - } - - virtual void WorkComplete() { - HandleScope scope; - - if (errmsg_ == NULL) - HandleOKCallback(); - else - HandleErrorCallback(); - delete callback; - callback = NULL; - } - - inline void SaveToPersistent( - const char *key, const v8::Local &value) { - HandleScope scope; - Set(New(persistentHandle), New(key).ToLocalChecked(), value).FromJust(); - } - - inline void SaveToPersistent( - const v8::Local &key, const v8::Local &value) { - HandleScope scope; - Set(New(persistentHandle), key, value).FromJust(); - } - - inline void SaveToPersistent( - uint32_t index, const v8::Local &value) { - HandleScope scope; - Set(New(persistentHandle), index, value).FromJust(); - } - - inline v8::Local GetFromPersistent(const char *key) const { - EscapableHandleScope scope; - return scope.Escape( - Get(New(persistentHandle), New(key).ToLocalChecked()) - .FromMaybe(v8::Local())); - } - - inline v8::Local - GetFromPersistent(const v8::Local &key) const { - EscapableHandleScope scope; - return scope.Escape( - Get(New(persistentHandle), key) - .FromMaybe(v8::Local())); - } - - inline v8::Local GetFromPersistent(uint32_t index) const { - EscapableHandleScope scope; - return scope.Escape( - Get(New(persistentHandle), index) - .FromMaybe(v8::Local())); - } - - virtual void Execute() = 0; - - uv_work_t request; - - virtual void Destroy() { - delete this; - } - - protected: - Persistent persistentHandle; - Callback *callback; - AsyncResource *async_resource; - - virtual void HandleOKCallback() { - HandleScope scope; - - callback->Call(0, NULL, async_resource); - } - - virtual void HandleErrorCallback() { - HandleScope scope; - - v8::Local argv[] = { - v8::Exception::Error(New(ErrorMessage()).ToLocalChecked()) - }; - callback->Call(1, argv, async_resource); - } - - void SetErrorMessage(const char *msg) { - delete[] errmsg_; - - size_t size = strlen(msg) + 1; - errmsg_ = new char[size]; - memcpy(errmsg_, msg, size); - } - - const char* ErrorMessage() const { - return errmsg_; - } - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(AsyncWorker) - char *errmsg_; -}; - -/* abstract */ class AsyncBareProgressWorkerBase : public AsyncWorker { - public: - explicit AsyncBareProgressWorkerBase( - Callback *callback_, - const char* resource_name = "nan:AsyncBareProgressWorkerBase") - : AsyncWorker(callback_, resource_name) { - uv_async_init( - GetCurrentEventLoop() - , &async - , AsyncProgress_ - ); - async.data = this; - } - - virtual ~AsyncBareProgressWorkerBase() { - } - - virtual void WorkProgress() = 0; - - virtual void Destroy() { - uv_close(reinterpret_cast(&async), AsyncClose_); - } - - private: - inline static NAUV_WORK_CB(AsyncProgress_) { - AsyncBareProgressWorkerBase *worker = - static_cast(async->data); - worker->WorkProgress(); - } - - inline static void AsyncClose_(uv_handle_t* handle) { - AsyncBareProgressWorkerBase *worker = - static_cast(handle->data); - delete worker; - } - - protected: - uv_async_t async; -}; - -template -/* abstract */ -class AsyncBareProgressWorker : public AsyncBareProgressWorkerBase { - public: - explicit AsyncBareProgressWorker( - Callback *callback_, - const char* resource_name = "nan:AsyncBareProgressWorker") - : AsyncBareProgressWorkerBase(callback_, resource_name) { - uv_mutex_init(&async_lock); - } - - virtual ~AsyncBareProgressWorker() { - uv_mutex_destroy(&async_lock); - } - - class ExecutionProgress { - friend class AsyncBareProgressWorker; - public: - void Signal() const { - uv_mutex_lock(&that_->async_lock); - uv_async_send(&that_->async); - uv_mutex_unlock(&that_->async_lock); - } - - void Send(const T* data, size_t count) const { - that_->SendProgress_(data, count); - } - - private: - explicit ExecutionProgress(AsyncBareProgressWorker *that) : that_(that) {} - NAN_DISALLOW_ASSIGN_COPY_MOVE(ExecutionProgress) - AsyncBareProgressWorker* const that_; - }; - - virtual void Execute(const ExecutionProgress& progress) = 0; - virtual void HandleProgressCallback(const T *data, size_t size) = 0; - - protected: - uv_mutex_t async_lock; - - private: - void Execute() /*final override*/ { - ExecutionProgress progress(this); - Execute(progress); - } - - virtual void SendProgress_(const T *data, size_t count) = 0; -}; - -template -/* abstract */ -class AsyncProgressWorkerBase : public AsyncBareProgressWorker { - public: - explicit AsyncProgressWorkerBase( - Callback *callback_, - const char* resource_name = "nan:AsyncProgressWorkerBase") - : AsyncBareProgressWorker(callback_, resource_name), asyncdata_(NULL), - asyncsize_(0) { - } - - virtual ~AsyncProgressWorkerBase() { - delete[] asyncdata_; - } - - void WorkProgress() { - uv_mutex_lock(&this->async_lock); - T *data = asyncdata_; - size_t size = asyncsize_; - asyncdata_ = NULL; - asyncsize_ = 0; - uv_mutex_unlock(&this->async_lock); - - // Don't send progress events after we've already completed. - if (this->callback) { - this->HandleProgressCallback(data, size); - } - delete[] data; - } - - private: - void SendProgress_(const T *data, size_t count) { - T *new_data = new T[count]; - std::copy(data, data + count, new_data); - - uv_mutex_lock(&this->async_lock); - T *old_data = asyncdata_; - asyncdata_ = new_data; - asyncsize_ = count; - uv_async_send(&this->async); - uv_mutex_unlock(&this->async_lock); - - delete[] old_data; - } - - T *asyncdata_; - size_t asyncsize_; -}; - -// This ensures compatibility to the previous un-templated AsyncProgressWorker -// class definition. -typedef AsyncProgressWorkerBase AsyncProgressWorker; - -template -/* abstract */ -class AsyncBareProgressQueueWorker : public AsyncBareProgressWorkerBase { - public: - explicit AsyncBareProgressQueueWorker( - Callback *callback_, - const char* resource_name = "nan:AsyncBareProgressQueueWorker") - : AsyncBareProgressWorkerBase(callback_, resource_name) { - } - - virtual ~AsyncBareProgressQueueWorker() { - } - - class ExecutionProgress { - friend class AsyncBareProgressQueueWorker; - public: - void Send(const T* data, size_t count) const { - that_->SendProgress_(data, count); - } - - private: - explicit ExecutionProgress(AsyncBareProgressQueueWorker *that) - : that_(that) {} - NAN_DISALLOW_ASSIGN_COPY_MOVE(ExecutionProgress) - AsyncBareProgressQueueWorker* const that_; - }; - - virtual void Execute(const ExecutionProgress& progress) = 0; - virtual void HandleProgressCallback(const T *data, size_t size) = 0; - - private: - void Execute() /*final override*/ { - ExecutionProgress progress(this); - Execute(progress); - } - - virtual void SendProgress_(const T *data, size_t count) = 0; -}; - -template -/* abstract */ -class AsyncProgressQueueWorker : public AsyncBareProgressQueueWorker { - public: - explicit AsyncProgressQueueWorker( - Callback *callback_, - const char* resource_name = "nan:AsyncProgressQueueWorker") - : AsyncBareProgressQueueWorker(callback_) { - uv_mutex_init(&async_lock); - } - - virtual ~AsyncProgressQueueWorker() { - uv_mutex_lock(&async_lock); - - while (!asyncdata_.empty()) { - std::pair &datapair = asyncdata_.front(); - T *data = datapair.first; - - asyncdata_.pop(); - - delete[] data; - } - - uv_mutex_unlock(&async_lock); - uv_mutex_destroy(&async_lock); - } - - void WorkComplete() { - WorkProgress(); - AsyncWorker::WorkComplete(); - } - - void WorkProgress() { - uv_mutex_lock(&async_lock); - - while (!asyncdata_.empty()) { - std::pair &datapair = asyncdata_.front(); - - T *data = datapair.first; - size_t size = datapair.second; - - asyncdata_.pop(); - uv_mutex_unlock(&async_lock); - - // Don't send progress events after we've already completed. - if (this->callback) { - this->HandleProgressCallback(data, size); - } - - delete[] data; - - uv_mutex_lock(&async_lock); - } - - uv_mutex_unlock(&async_lock); - } - - private: - void SendProgress_(const T *data, size_t count) { - T *new_data = new T[count]; - std::copy(data, data + count, new_data); - - uv_mutex_lock(&async_lock); - asyncdata_.push(std::pair(new_data, count)); - uv_mutex_unlock(&async_lock); - - uv_async_send(&this->async); - } - - uv_mutex_t async_lock; - std::queue > asyncdata_; -}; - -inline void AsyncExecute (uv_work_t* req) { - AsyncWorker *worker = static_cast(req->data); - worker->Execute(); -} - -/* uv_after_work_cb has 1 argument before node-v0.9.4 and - * 2 arguments since node-v0.9.4 - * https://github.com/libuv/libuv/commit/92fb84b751e18f032c02609467f44bfe927b80c5 - */ -inline void AsyncExecuteComplete(uv_work_t *req) { - AsyncWorker* worker = static_cast(req->data); - worker->WorkComplete(); - worker->Destroy(); -} -inline void AsyncExecuteComplete (uv_work_t* req, int status) { - AsyncExecuteComplete(req); -} - -inline void AsyncQueueWorker (AsyncWorker* worker) { - uv_queue_work( - GetCurrentEventLoop() - , &worker->request - , AsyncExecute - , AsyncExecuteComplete - ); -} - -namespace imp { - -inline -ExternalOneByteStringResource const* -GetExternalResource(v8::Local str) { -#if NODE_MODULE_VERSION < ATOM_0_21_MODULE_VERSION - return str->GetExternalAsciiStringResource(); -#else - return str->GetExternalOneByteStringResource(); -#endif -} - -inline -bool -IsExternal(v8::Local str) { -#if NODE_MODULE_VERSION < ATOM_0_21_MODULE_VERSION - return str->IsExternalAscii(); -#else - return str->IsExternalOneByte(); -#endif -} - -} // end of namespace imp - -enum Encoding {ASCII, UTF8, BASE64, UCS2, BINARY, HEX, BUFFER}; - -#if NODE_MODULE_VERSION < NODE_0_10_MODULE_VERSION -# include "nan_string_bytes.h" // NOLINT(build/include) -#endif - -inline v8::Local Encode( - const void *buf, size_t len, enum Encoding encoding = BINARY) { -#if (NODE_MODULE_VERSION >= ATOM_0_21_MODULE_VERSION) - v8::Isolate* isolate = v8::Isolate::GetCurrent(); - node::encoding node_enc = static_cast(encoding); - - if (encoding == UCS2) { - return node::Encode( - isolate - , reinterpret_cast(buf) - , len / 2); - } else { - return node::Encode( - isolate - , reinterpret_cast(buf) - , len - , node_enc); - } -#elif (NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION) - return node::Encode( - v8::Isolate::GetCurrent() - , buf, len - , static_cast(encoding)); -#else -# if NODE_MODULE_VERSION >= NODE_0_10_MODULE_VERSION - return node::Encode(buf, len, static_cast(encoding)); -# else - return imp::Encode(reinterpret_cast(buf), len, encoding); -# endif -#endif -} - -inline ssize_t DecodeBytes( - v8::Local val, enum Encoding encoding = BINARY) { -#if (NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION) - return node::DecodeBytes( - v8::Isolate::GetCurrent() - , val - , static_cast(encoding)); -#else -# if (NODE_MODULE_VERSION < NODE_0_10_MODULE_VERSION) - if (encoding == BUFFER) { - return node::DecodeBytes(val, node::BINARY); - } -# endif - return node::DecodeBytes(val, static_cast(encoding)); -#endif -} - -inline ssize_t DecodeWrite( - char *buf - , size_t len - , v8::Local val - , enum Encoding encoding = BINARY) { -#if (NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION) - return node::DecodeWrite( - v8::Isolate::GetCurrent() - , buf - , len - , val - , static_cast(encoding)); -#else -# if (NODE_MODULE_VERSION < NODE_0_10_MODULE_VERSION) - if (encoding == BUFFER) { - return node::DecodeWrite(buf, len, val, node::BINARY); - } -# endif - return node::DecodeWrite( - buf - , len - , val - , static_cast(encoding)); -#endif -} - -inline void SetPrototypeTemplate( - v8::Local templ - , const char *name - , v8::Local value -) { - HandleScope scope; - SetTemplate(templ->PrototypeTemplate(), name, value); -} - -inline void SetPrototypeTemplate( - v8::Local templ - , v8::Local name - , v8::Local value - , v8::PropertyAttribute attributes -) { - HandleScope scope; - SetTemplate(templ->PrototypeTemplate(), name, value, attributes); -} - -inline void SetInstanceTemplate( - v8::Local templ - , const char *name - , v8::Local value -) { - HandleScope scope; - SetTemplate(templ->InstanceTemplate(), name, value); -} - -inline void SetInstanceTemplate( - v8::Local templ - , v8::Local name - , v8::Local value - , v8::PropertyAttribute attributes -) { - HandleScope scope; - SetTemplate(templ->InstanceTemplate(), name, value, attributes); -} - -namespace imp { - -// Note(@agnat): Helper to distinguish different receiver types. The first -// version deals with receivers derived from v8::Template. The second version -// handles everything else. The final argument only serves as discriminator and -// is unused. -template -inline -void -SetMethodAux(T recv, - v8::Local name, - v8::Local tpl, - v8::Template *) { - recv->Set(name, tpl); -} - -template -inline -void -SetMethodAux(T recv, - v8::Local name, - v8::Local tpl, - ...) { - Set(recv, name, GetFunction(tpl).ToLocalChecked()); -} - -} // end of namespace imp - -template class HandleType> -inline void SetMethod( - HandleType recv - , const char *name - , FunctionCallback callback - , v8::Local data = v8::Local()) { - HandleScope scope; - v8::Local t = New(callback, data); - v8::Local fn_name = New(name).ToLocalChecked(); - t->SetClassName(fn_name); - // Note(@agnat): Pass an empty T* as discriminator. See note on - // SetMethodAux(...) above - imp::SetMethodAux(recv, fn_name, t, static_cast(0)); -} - -inline void SetPrototypeMethod( - v8::Local recv - , const char* name - , FunctionCallback callback - , v8::Local data = v8::Local()) { - HandleScope scope; - v8::Local t = New( - callback - , data - , New(recv)); - v8::Local fn_name = New(name).ToLocalChecked(); - recv->PrototypeTemplate()->Set(fn_name, t); - t->SetClassName(fn_name); -} - -//=== Accessors and Such ======================================================= - -inline void SetAccessor( - v8::Local tpl - , v8::Local name - , GetterCallback getter - , SetterCallback setter = 0 - , v8::Local data = v8::Local() - , v8::AccessControl settings = v8::DEFAULT - , v8::PropertyAttribute attribute = v8::None - , imp::Sig signature = imp::Sig()) { - HandleScope scope; - - imp::NativeGetter getter_ = - imp::GetterCallbackWrapper; - imp::NativeSetter setter_ = - setter ? imp::SetterCallbackWrapper : 0; - - v8::Local otpl = New(); - otpl->SetInternalFieldCount(imp::kAccessorFieldCount); - v8::Local obj = NewInstance(otpl).ToLocalChecked(); - - obj->SetInternalField( - imp::kGetterIndex - , New(reinterpret_cast(getter))); - - if (setter != 0) { - obj->SetInternalField( - imp::kSetterIndex - , New(reinterpret_cast(setter))); - } - - if (!data.IsEmpty()) { - obj->SetInternalField(imp::kDataIndex, data); - } - - tpl->SetAccessor( - name - , getter_ - , setter_ - , obj - , settings - , attribute -#if (NODE_MODULE_VERSION < NODE_18_0_MODULE_VERSION) - , signature -#endif - ); -} - -inline bool SetAccessor( - v8::Local obj - , v8::Local name - , GetterCallback getter - , SetterCallback setter = 0 - , v8::Local data = v8::Local() - , v8::AccessControl settings = v8::DEFAULT - , v8::PropertyAttribute attribute = v8::None) { - HandleScope scope; - - imp::NativeGetter getter_ = - imp::GetterCallbackWrapper; - imp::NativeSetter setter_ = - setter ? imp::SetterCallbackWrapper : 0; - - v8::Local otpl = New(); - otpl->SetInternalFieldCount(imp::kAccessorFieldCount); - v8::Local dataobj = NewInstance(otpl).ToLocalChecked(); - - dataobj->SetInternalField( - imp::kGetterIndex - , New(reinterpret_cast(getter))); - - if (!data.IsEmpty()) { - dataobj->SetInternalField(imp::kDataIndex, data); - } - - if (setter) { - dataobj->SetInternalField( - imp::kSetterIndex - , New(reinterpret_cast(setter))); - } - -#if (NODE_MODULE_VERSION >= NODE_6_0_MODULE_VERSION) - return obj->SetAccessor( - GetCurrentContext() - , name - , getter_ - , setter_ - , dataobj - , settings - , attribute).FromMaybe(false); -#else - return obj->SetAccessor( - name - , getter_ - , setter_ - , dataobj - , settings - , attribute); -#endif -} - -inline void SetNamedPropertyHandler( - v8::Local tpl - , PropertyGetterCallback getter - , PropertySetterCallback setter = 0 - , PropertyQueryCallback query = 0 - , PropertyDeleterCallback deleter = 0 - , PropertyEnumeratorCallback enumerator = 0 - , v8::Local data = v8::Local()) { - HandleScope scope; - - imp::NativePropertyGetter getter_ = - imp::PropertyGetterCallbackWrapper; - imp::NativePropertySetter setter_ = - setter ? imp::PropertySetterCallbackWrapper : 0; - imp::NativePropertyQuery query_ = - query ? imp::PropertyQueryCallbackWrapper : 0; - imp::NativePropertyDeleter *deleter_ = - deleter ? imp::PropertyDeleterCallbackWrapper : 0; - imp::NativePropertyEnumerator enumerator_ = - enumerator ? imp::PropertyEnumeratorCallbackWrapper : 0; - - v8::Local otpl = New(); - otpl->SetInternalFieldCount(imp::kPropertyFieldCount); - v8::Local obj = NewInstance(otpl).ToLocalChecked(); - obj->SetInternalField( - imp::kPropertyGetterIndex - , New(reinterpret_cast(getter))); - - if (setter) { - obj->SetInternalField( - imp::kPropertySetterIndex - , New(reinterpret_cast(setter))); - } - - if (query) { - obj->SetInternalField( - imp::kPropertyQueryIndex - , New(reinterpret_cast(query))); - } - - if (deleter) { - obj->SetInternalField( - imp::kPropertyDeleterIndex - , New(reinterpret_cast(deleter))); - } - - if (enumerator) { - obj->SetInternalField( - imp::kPropertyEnumeratorIndex - , New(reinterpret_cast(enumerator))); - } - - if (!data.IsEmpty()) { - obj->SetInternalField(imp::kDataIndex, data); - } - -#if NODE_MODULE_VERSION > NODE_0_12_MODULE_VERSION - tpl->SetHandler(v8::NamedPropertyHandlerConfiguration( - getter_, setter_, query_, deleter_, enumerator_, obj)); -#else - tpl->SetNamedPropertyHandler( - getter_ - , setter_ - , query_ - , deleter_ - , enumerator_ - , obj); -#endif -} - -inline void SetIndexedPropertyHandler( - v8::Local tpl - , IndexGetterCallback getter - , IndexSetterCallback setter = 0 - , IndexQueryCallback query = 0 - , IndexDeleterCallback deleter = 0 - , IndexEnumeratorCallback enumerator = 0 - , v8::Local data = v8::Local()) { - HandleScope scope; - - imp::NativeIndexGetter getter_ = - imp::IndexGetterCallbackWrapper; - imp::NativeIndexSetter setter_ = - setter ? imp::IndexSetterCallbackWrapper : 0; - imp::NativeIndexQuery query_ = - query ? imp::IndexQueryCallbackWrapper : 0; - imp::NativeIndexDeleter deleter_ = - deleter ? imp::IndexDeleterCallbackWrapper : 0; - imp::NativeIndexEnumerator enumerator_ = - enumerator ? imp::IndexEnumeratorCallbackWrapper : 0; - - v8::Local otpl = New(); - otpl->SetInternalFieldCount(imp::kIndexPropertyFieldCount); - v8::Local obj = NewInstance(otpl).ToLocalChecked(); - obj->SetInternalField( - imp::kIndexPropertyGetterIndex - , New(reinterpret_cast(getter))); - - if (setter) { - obj->SetInternalField( - imp::kIndexPropertySetterIndex - , New(reinterpret_cast(setter))); - } - - if (query) { - obj->SetInternalField( - imp::kIndexPropertyQueryIndex - , New(reinterpret_cast(query))); - } - - if (deleter) { - obj->SetInternalField( - imp::kIndexPropertyDeleterIndex - , New(reinterpret_cast(deleter))); - } - - if (enumerator) { - obj->SetInternalField( - imp::kIndexPropertyEnumeratorIndex - , New(reinterpret_cast(enumerator))); - } - - if (!data.IsEmpty()) { - obj->SetInternalField(imp::kDataIndex, data); - } - -#if NODE_MODULE_VERSION > NODE_0_12_MODULE_VERSION - tpl->SetHandler(v8::IndexedPropertyHandlerConfiguration( - getter_, setter_, query_, deleter_, enumerator_, obj)); -#else - tpl->SetIndexedPropertyHandler( - getter_ - , setter_ - , query_ - , deleter_ - , enumerator_ - , obj); -#endif -} - -inline void SetCallHandler( - v8::Local tpl - , FunctionCallback callback - , v8::Local data = v8::Local()) { - HandleScope scope; - - v8::Local otpl = New(); - otpl->SetInternalFieldCount(imp::kFunctionFieldCount); - v8::Local obj = NewInstance(otpl).ToLocalChecked(); - - obj->SetInternalField( - imp::kFunctionIndex - , New(reinterpret_cast(callback))); - - if (!data.IsEmpty()) { - obj->SetInternalField(imp::kDataIndex, data); - } - - tpl->SetCallHandler(imp::FunctionCallbackWrapper, obj); -} - - -inline void SetCallAsFunctionHandler( - v8::Local tpl, - FunctionCallback callback, - v8::Local data = v8::Local()) { - HandleScope scope; - - v8::Local otpl = New(); - otpl->SetInternalFieldCount(imp::kFunctionFieldCount); - v8::Local obj = NewInstance(otpl).ToLocalChecked(); - - obj->SetInternalField( - imp::kFunctionIndex - , New(reinterpret_cast(callback))); - - if (!data.IsEmpty()) { - obj->SetInternalField(imp::kDataIndex, data); - } - - tpl->SetCallAsFunctionHandler(imp::FunctionCallbackWrapper, obj); -} - -//=== Weak Persistent Handling ================================================= - -#include "nan_weak.h" // NOLINT(build/include) - -//=== ObjectWrap =============================================================== - -#include "nan_object_wrap.h" // NOLINT(build/include) - -//=== HiddenValue/Private ====================================================== - -#include "nan_private.h" // NOLINT(build/include) - -//=== Export ================================================================== - -inline -void -Export(ADDON_REGISTER_FUNCTION_ARGS_TYPE target, const char *name, - FunctionCallback f) { - HandleScope scope; - - Set(target, New(name).ToLocalChecked(), - GetFunction(New(f)).ToLocalChecked()); -} - -//=== Tap Reverse Binding ===================================================== - -struct Tap { - explicit Tap(v8::Local t) : t_() { - HandleScope scope; - - t_.Reset(To(t).ToLocalChecked()); - } - - ~Tap() { t_.Reset(); } // not sure if necessary - - inline void plan(int i) { - HandleScope scope; - v8::Local arg = New(i); - Call("plan", New(t_), 1, &arg); - } - - inline void ok(bool isOk, const char *msg = NULL) { - HandleScope scope; - v8::Local args[2]; - args[0] = New(isOk); - if (msg) args[1] = New(msg).ToLocalChecked(); - Call("ok", New(t_), msg ? 2 : 1, args); - } - - inline void pass(const char * msg = NULL) { - HandleScope scope; - v8::Local hmsg; - if (msg) hmsg = New(msg).ToLocalChecked(); - Call("pass", New(t_), msg ? 1 : 0, &hmsg); - } - - inline void end() { - HandleScope scope; - Call("end", New(t_), 0, NULL); - } - - private: - Persistent t_; -}; - -#define NAN_STRINGIZE2(x) #x -#define NAN_STRINGIZE(x) NAN_STRINGIZE2(x) -#define NAN_TEST_EXPRESSION(expression) \ - ( expression ), __FILE__ ":" NAN_STRINGIZE(__LINE__) ": " #expression - -#define NAN_EXPORT(target, function) Export(target, #function, function) - -#undef TYPE_CHECK - -//=== Generic Maybefication =================================================== - -namespace imp { - -template struct Maybefier; - -template struct Maybefier > { - inline static MaybeLocal convert(v8::Local v) { - return v; - } -}; - -template struct Maybefier > { - inline static MaybeLocal convert(MaybeLocal v) { - return v; - } -}; - -} // end of namespace imp - -template class MaybeMaybe> -inline MaybeLocal -MakeMaybe(MaybeMaybe v) { - return imp::Maybefier >::convert(v); -} - -//=== TypedArrayContents ======================================================= - -#include "nan_typedarray_contents.h" // NOLINT(build/include) - -//=== JSON ===================================================================== - -#include "nan_json.h" // NOLINT(build/include) - -//=== ScriptOrigin ============================================================= - -#include "nan_scriptorigin.h" // NOLINT(build/include) - -} // end of namespace Nan - -#endif // NAN_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_callbacks.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_callbacks.h deleted file mode 100644 index 53ede846..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_callbacks.h +++ /dev/null @@ -1,88 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_CALLBACKS_H_ -#define NAN_CALLBACKS_H_ - -template class FunctionCallbackInfo; -template class PropertyCallbackInfo; -template class Global; - -typedef void(*FunctionCallback)(const FunctionCallbackInfo&); -typedef void(*GetterCallback) - (v8::Local, const PropertyCallbackInfo&); -typedef void(*SetterCallback)( - v8::Local, - v8::Local, - const PropertyCallbackInfo&); -typedef void(*PropertyGetterCallback)( - v8::Local, - const PropertyCallbackInfo&); -typedef void(*PropertySetterCallback)( - v8::Local, - v8::Local, - const PropertyCallbackInfo&); -typedef void(*PropertyEnumeratorCallback) - (const PropertyCallbackInfo&); -typedef void(*PropertyDeleterCallback)( - v8::Local, - const PropertyCallbackInfo&); -typedef void(*PropertyQueryCallback)( - v8::Local, - const PropertyCallbackInfo&); -typedef void(*IndexGetterCallback)( - uint32_t, - const PropertyCallbackInfo&); -typedef void(*IndexSetterCallback)( - uint32_t, - v8::Local, - const PropertyCallbackInfo&); -typedef void(*IndexEnumeratorCallback) - (const PropertyCallbackInfo&); -typedef void(*IndexDeleterCallback)( - uint32_t, - const PropertyCallbackInfo&); -typedef void(*IndexQueryCallback)( - uint32_t, - const PropertyCallbackInfo&); - -namespace imp { -typedef v8::Local Sig; - -static const int kDataIndex = 0; - -static const int kFunctionIndex = 1; -static const int kFunctionFieldCount = 2; - -static const int kGetterIndex = 1; -static const int kSetterIndex = 2; -static const int kAccessorFieldCount = 3; - -static const int kPropertyGetterIndex = 1; -static const int kPropertySetterIndex = 2; -static const int kPropertyEnumeratorIndex = 3; -static const int kPropertyDeleterIndex = 4; -static const int kPropertyQueryIndex = 5; -static const int kPropertyFieldCount = 6; - -static const int kIndexPropertyGetterIndex = 1; -static const int kIndexPropertySetterIndex = 2; -static const int kIndexPropertyEnumeratorIndex = 3; -static const int kIndexPropertyDeleterIndex = 4; -static const int kIndexPropertyQueryIndex = 5; -static const int kIndexPropertyFieldCount = 6; - -} // end of namespace imp - -#if NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION -# include "nan_callbacks_12_inl.h" // NOLINT(build/include) -#else -# include "nan_callbacks_pre_12_inl.h" // NOLINT(build/include) -#endif - -#endif // NAN_CALLBACKS_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_callbacks_12_inl.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_callbacks_12_inl.h deleted file mode 100644 index c27b18d8..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_callbacks_12_inl.h +++ /dev/null @@ -1,514 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_CALLBACKS_12_INL_H_ -#define NAN_CALLBACKS_12_INL_H_ - -template -class ReturnValue { - v8::ReturnValue value_; - - public: - template - explicit inline ReturnValue(const v8::ReturnValue &value) : - value_(value) {} - template - explicit inline ReturnValue(const ReturnValue& that) - : value_(that.value_) { - TYPE_CHECK(T, S); - } - - // Handle setters - template inline void Set(const v8::Local &handle) { - TYPE_CHECK(T, S); - value_.Set(handle); - } - - template inline void Set(const Global &handle) { - TYPE_CHECK(T, S); -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && \ - (V8_MINOR_VERSION > 5 || (V8_MINOR_VERSION == 5 && \ - defined(V8_BUILD_NUMBER) && V8_BUILD_NUMBER >= 8)))) - value_.Set(handle); -#else - value_.Set(*reinterpret_cast*>(&handle)); - const_cast &>(handle).Reset(); -#endif - } - - // Fast primitive setters - inline void Set(bool value) { - TYPE_CHECK(T, v8::Boolean); - value_.Set(value); - } - - inline void Set(double i) { - TYPE_CHECK(T, v8::Number); - value_.Set(i); - } - - inline void Set(int32_t i) { - TYPE_CHECK(T, v8::Integer); - value_.Set(i); - } - - inline void Set(uint32_t i) { - TYPE_CHECK(T, v8::Integer); - value_.Set(i); - } - - // Fast JS primitive setters - inline void SetNull() { - TYPE_CHECK(T, v8::Primitive); - value_.SetNull(); - } - - inline void SetUndefined() { - TYPE_CHECK(T, v8::Primitive); - value_.SetUndefined(); - } - - inline void SetEmptyString() { - TYPE_CHECK(T, v8::String); - value_.SetEmptyString(); - } - - // Convenience getter for isolate - inline v8::Isolate *GetIsolate() const { - return value_.GetIsolate(); - } - - // Pointer setter: Uncompilable to prevent inadvertent misuse. - template - inline void Set(S *whatever) { TYPE_CHECK(S*, v8::Primitive); } -}; - -template -class FunctionCallbackInfo { - const v8::FunctionCallbackInfo &info_; - const v8::Local data_; - - public: - explicit inline FunctionCallbackInfo( - const v8::FunctionCallbackInfo &info - , v8::Local data) : - info_(info) - , data_(data) {} - - inline ReturnValue GetReturnValue() const { - return ReturnValue(info_.GetReturnValue()); - } - -#if NODE_MAJOR_VERSION < 10 - inline v8::Local Callee() const { return info_.Callee(); } -#endif - inline v8::Local Data() const { return data_; } - inline v8::Local Holder() const { return info_.Holder(); } - inline bool IsConstructCall() const { return info_.IsConstructCall(); } - inline int Length() const { return info_.Length(); } - inline v8::Local operator[](int i) const { return info_[i]; } - inline v8::Local This() const { return info_.This(); } - inline v8::Isolate *GetIsolate() const { return info_.GetIsolate(); } - - - protected: - static const int kHolderIndex = 0; - static const int kIsolateIndex = 1; - static const int kReturnValueDefaultValueIndex = 2; - static const int kReturnValueIndex = 3; - static const int kDataIndex = 4; - static const int kCalleeIndex = 5; - static const int kContextSaveIndex = 6; - static const int kArgsLength = 7; - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(FunctionCallbackInfo) -}; - -template -class PropertyCallbackInfo { - const v8::PropertyCallbackInfo &info_; - const v8::Local data_; - - public: - explicit inline PropertyCallbackInfo( - const v8::PropertyCallbackInfo &info - , const v8::Local data) : - info_(info) - , data_(data) {} - - inline v8::Isolate* GetIsolate() const { return info_.GetIsolate(); } - inline v8::Local Data() const { return data_; } - inline v8::Local This() const { return info_.This(); } - inline v8::Local Holder() const { return info_.Holder(); } - inline ReturnValue GetReturnValue() const { - return ReturnValue(info_.GetReturnValue()); - } - - protected: - static const int kHolderIndex = 0; - static const int kIsolateIndex = 1; - static const int kReturnValueDefaultValueIndex = 2; - static const int kReturnValueIndex = 3; - static const int kDataIndex = 4; - static const int kThisIndex = 5; - static const int kArgsLength = 6; - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(PropertyCallbackInfo) -}; - -namespace imp { -static -void FunctionCallbackWrapper(const v8::FunctionCallbackInfo &info) { - v8::Local obj = info.Data().As(); - FunctionCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kFunctionIndex).As()->Value())); - FunctionCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - callback(cbinfo); -} - -typedef void (*NativeFunction)(const v8::FunctionCallbackInfo &); - -#if NODE_MODULE_VERSION > NODE_0_12_MODULE_VERSION -static -void GetterCallbackWrapper( - v8::Local property - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - GetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kGetterIndex).As()->Value())); - callback(property.As(), cbinfo); -} - -typedef void (*NativeGetter) - (v8::Local, const v8::PropertyCallbackInfo &); - -static -void SetterCallbackWrapper( - v8::Local property - , v8::Local value - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - SetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kSetterIndex).As()->Value())); - callback(property.As(), value, cbinfo); -} - -typedef void (*NativeSetter)( - v8::Local - , v8::Local - , const v8::PropertyCallbackInfo &); -#else -static -void GetterCallbackWrapper( - v8::Local property - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - GetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kGetterIndex).As()->Value())); - callback(property, cbinfo); -} - -typedef void (*NativeGetter) - (v8::Local, const v8::PropertyCallbackInfo &); - -static -void SetterCallbackWrapper( - v8::Local property - , v8::Local value - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - SetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kSetterIndex).As()->Value())); - callback(property, value, cbinfo); -} - -typedef void (*NativeSetter)( - v8::Local - , v8::Local - , const v8::PropertyCallbackInfo &); -#endif - -#if NODE_MODULE_VERSION > NODE_0_12_MODULE_VERSION -static -void PropertyGetterCallbackWrapper( - v8::Local property - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertyGetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kPropertyGetterIndex) - .As()->Value())); - callback(property.As(), cbinfo); -} - -typedef void (*NativePropertyGetter) - (v8::Local, const v8::PropertyCallbackInfo &); - -static -void PropertySetterCallbackWrapper( - v8::Local property - , v8::Local value - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertySetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kPropertySetterIndex) - .As()->Value())); - callback(property.As(), value, cbinfo); -} - -typedef void (*NativePropertySetter)( - v8::Local - , v8::Local - , const v8::PropertyCallbackInfo &); - -static -void PropertyEnumeratorCallbackWrapper( - const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertyEnumeratorCallback callback = - reinterpret_cast(reinterpret_cast( - obj->GetInternalField(kPropertyEnumeratorIndex) - .As()->Value())); - callback(cbinfo); -} - -typedef void (*NativePropertyEnumerator) - (const v8::PropertyCallbackInfo &); - -static -void PropertyDeleterCallbackWrapper( - v8::Local property - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertyDeleterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kPropertyDeleterIndex) - .As()->Value())); - callback(property.As(), cbinfo); -} - -typedef void (NativePropertyDeleter) - (v8::Local, const v8::PropertyCallbackInfo &); - -static -void PropertyQueryCallbackWrapper( - v8::Local property - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertyQueryCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kPropertyQueryIndex) - .As()->Value())); - callback(property.As(), cbinfo); -} - -typedef void (*NativePropertyQuery) - (v8::Local, const v8::PropertyCallbackInfo &); -#else -static -void PropertyGetterCallbackWrapper( - v8::Local property - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertyGetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kPropertyGetterIndex) - .As()->Value())); - callback(property, cbinfo); -} - -typedef void (*NativePropertyGetter) - (v8::Local, const v8::PropertyCallbackInfo &); - -static -void PropertySetterCallbackWrapper( - v8::Local property - , v8::Local value - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertySetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kPropertySetterIndex) - .As()->Value())); - callback(property, value, cbinfo); -} - -typedef void (*NativePropertySetter)( - v8::Local - , v8::Local - , const v8::PropertyCallbackInfo &); - -static -void PropertyEnumeratorCallbackWrapper( - const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertyEnumeratorCallback callback = - reinterpret_cast(reinterpret_cast( - obj->GetInternalField(kPropertyEnumeratorIndex) - .As()->Value())); - callback(cbinfo); -} - -typedef void (*NativePropertyEnumerator) - (const v8::PropertyCallbackInfo &); - -static -void PropertyDeleterCallbackWrapper( - v8::Local property - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertyDeleterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kPropertyDeleterIndex) - .As()->Value())); - callback(property, cbinfo); -} - -typedef void (NativePropertyDeleter) - (v8::Local, const v8::PropertyCallbackInfo &); - -static -void PropertyQueryCallbackWrapper( - v8::Local property - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertyQueryCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kPropertyQueryIndex) - .As()->Value())); - callback(property, cbinfo); -} - -typedef void (*NativePropertyQuery) - (v8::Local, const v8::PropertyCallbackInfo &); -#endif - -static -void IndexGetterCallbackWrapper( - uint32_t index, const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - IndexGetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kIndexPropertyGetterIndex) - .As()->Value())); - callback(index, cbinfo); -} - -typedef void (*NativeIndexGetter) - (uint32_t, const v8::PropertyCallbackInfo &); - -static -void IndexSetterCallbackWrapper( - uint32_t index - , v8::Local value - , const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - IndexSetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kIndexPropertySetterIndex) - .As()->Value())); - callback(index, value, cbinfo); -} - -typedef void (*NativeIndexSetter)( - uint32_t - , v8::Local - , const v8::PropertyCallbackInfo &); - -static -void IndexEnumeratorCallbackWrapper( - const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - IndexEnumeratorCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField( - kIndexPropertyEnumeratorIndex).As()->Value())); - callback(cbinfo); -} - -typedef void (*NativeIndexEnumerator) - (const v8::PropertyCallbackInfo &); - -static -void IndexDeleterCallbackWrapper( - uint32_t index, const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - IndexDeleterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kIndexPropertyDeleterIndex) - .As()->Value())); - callback(index, cbinfo); -} - -typedef void (*NativeIndexDeleter) - (uint32_t, const v8::PropertyCallbackInfo &); - -static -void IndexQueryCallbackWrapper( - uint32_t index, const v8::PropertyCallbackInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - IndexQueryCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kIndexPropertyQueryIndex) - .As()->Value())); - callback(index, cbinfo); -} - -typedef void (*NativeIndexQuery) - (uint32_t, const v8::PropertyCallbackInfo &); -} // end of namespace imp - -#endif // NAN_CALLBACKS_12_INL_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_callbacks_pre_12_inl.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_callbacks_pre_12_inl.h deleted file mode 100644 index c9ba4993..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_callbacks_pre_12_inl.h +++ /dev/null @@ -1,520 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_CALLBACKS_PRE_12_INL_H_ -#define NAN_CALLBACKS_PRE_12_INL_H_ - -namespace imp { -template class ReturnValueImp; -} // end of namespace imp - -template -class ReturnValue { - v8::Isolate *isolate_; - v8::Persistent *value_; - friend class imp::ReturnValueImp; - - public: - template - explicit inline ReturnValue(v8::Isolate *isolate, v8::Persistent *p) : - isolate_(isolate), value_(p) {} - template - explicit inline ReturnValue(const ReturnValue& that) - : isolate_(that.isolate_), value_(that.value_) { - TYPE_CHECK(T, S); - } - - // Handle setters - template inline void Set(const v8::Local &handle) { - TYPE_CHECK(T, S); - value_->Dispose(); - *value_ = v8::Persistent::New(handle); - } - - template inline void Set(const Global &handle) { - TYPE_CHECK(T, S); - value_->Dispose(); - *value_ = v8::Persistent::New(handle.persistent); - const_cast &>(handle).Reset(); - } - - // Fast primitive setters - inline void Set(bool value) { - v8::HandleScope scope; - - TYPE_CHECK(T, v8::Boolean); - value_->Dispose(); - *value_ = v8::Persistent::New(v8::Boolean::New(value)); - } - - inline void Set(double i) { - v8::HandleScope scope; - - TYPE_CHECK(T, v8::Number); - value_->Dispose(); - *value_ = v8::Persistent::New(v8::Number::New(i)); - } - - inline void Set(int32_t i) { - v8::HandleScope scope; - - TYPE_CHECK(T, v8::Integer); - value_->Dispose(); - *value_ = v8::Persistent::New(v8::Int32::New(i)); - } - - inline void Set(uint32_t i) { - v8::HandleScope scope; - - TYPE_CHECK(T, v8::Integer); - value_->Dispose(); - *value_ = v8::Persistent::New(v8::Uint32::NewFromUnsigned(i)); - } - - // Fast JS primitive setters - inline void SetNull() { - v8::HandleScope scope; - - TYPE_CHECK(T, v8::Primitive); - value_->Dispose(); - *value_ = v8::Persistent::New(v8::Null()); - } - - inline void SetUndefined() { - v8::HandleScope scope; - - TYPE_CHECK(T, v8::Primitive); - value_->Dispose(); - *value_ = v8::Persistent::New(v8::Undefined()); - } - - inline void SetEmptyString() { - v8::HandleScope scope; - - TYPE_CHECK(T, v8::String); - value_->Dispose(); - *value_ = v8::Persistent::New(v8::String::Empty()); - } - - // Convenience getter for isolate - inline v8::Isolate *GetIsolate() const { - return isolate_; - } - - // Pointer setter: Uncompilable to prevent inadvertent misuse. - template - inline void Set(S *whatever) { TYPE_CHECK(S*, v8::Primitive); } -}; - -template -class FunctionCallbackInfo { - const v8::Arguments &args_; - v8::Local data_; - ReturnValue return_value_; - v8::Persistent retval_; - - public: - explicit inline FunctionCallbackInfo( - const v8::Arguments &args - , v8::Local data) : - args_(args) - , data_(data) - , return_value_(args.GetIsolate(), &retval_) - , retval_(v8::Persistent::New(v8::Undefined())) {} - - inline ~FunctionCallbackInfo() { - retval_.Dispose(); - retval_.Clear(); - } - - inline ReturnValue GetReturnValue() const { - return ReturnValue(return_value_); - } - - inline v8::Local Callee() const { return args_.Callee(); } - inline v8::Local Data() const { return data_; } - inline v8::Local Holder() const { return args_.Holder(); } - inline bool IsConstructCall() const { return args_.IsConstructCall(); } - inline int Length() const { return args_.Length(); } - inline v8::Local operator[](int i) const { return args_[i]; } - inline v8::Local This() const { return args_.This(); } - inline v8::Isolate *GetIsolate() const { return args_.GetIsolate(); } - - - protected: - static const int kHolderIndex = 0; - static const int kIsolateIndex = 1; - static const int kReturnValueDefaultValueIndex = 2; - static const int kReturnValueIndex = 3; - static const int kDataIndex = 4; - static const int kCalleeIndex = 5; - static const int kContextSaveIndex = 6; - static const int kArgsLength = 7; - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(FunctionCallbackInfo) -}; - -template -class PropertyCallbackInfoBase { - const v8::AccessorInfo &info_; - const v8::Local data_; - - public: - explicit inline PropertyCallbackInfoBase( - const v8::AccessorInfo &info - , const v8::Local data) : - info_(info) - , data_(data) {} - - inline v8::Isolate* GetIsolate() const { return info_.GetIsolate(); } - inline v8::Local Data() const { return data_; } - inline v8::Local This() const { return info_.This(); } - inline v8::Local Holder() const { return info_.Holder(); } - - protected: - static const int kHolderIndex = 0; - static const int kIsolateIndex = 1; - static const int kReturnValueDefaultValueIndex = 2; - static const int kReturnValueIndex = 3; - static const int kDataIndex = 4; - static const int kThisIndex = 5; - static const int kArgsLength = 6; - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(PropertyCallbackInfoBase) -}; - -template -class PropertyCallbackInfo : public PropertyCallbackInfoBase { - ReturnValue return_value_; - v8::Persistent retval_; - - public: - explicit inline PropertyCallbackInfo( - const v8::AccessorInfo &info - , const v8::Local data) : - PropertyCallbackInfoBase(info, data) - , return_value_(info.GetIsolate(), &retval_) - , retval_(v8::Persistent::New(v8::Undefined())) {} - - inline ~PropertyCallbackInfo() { - retval_.Dispose(); - retval_.Clear(); - } - - inline ReturnValue GetReturnValue() const { return return_value_; } -}; - -template<> -class PropertyCallbackInfo : - public PropertyCallbackInfoBase { - ReturnValue return_value_; - v8::Persistent retval_; - - public: - explicit inline PropertyCallbackInfo( - const v8::AccessorInfo &info - , const v8::Local data) : - PropertyCallbackInfoBase(info, data) - , return_value_(info.GetIsolate(), &retval_) - , retval_(v8::Persistent::New(v8::Local())) {} - - inline ~PropertyCallbackInfo() { - retval_.Dispose(); - retval_.Clear(); - } - - inline ReturnValue GetReturnValue() const { - return return_value_; - } -}; - -template<> -class PropertyCallbackInfo : - public PropertyCallbackInfoBase { - ReturnValue return_value_; - v8::Persistent retval_; - - public: - explicit inline PropertyCallbackInfo( - const v8::AccessorInfo &info - , const v8::Local data) : - PropertyCallbackInfoBase(info, data) - , return_value_(info.GetIsolate(), &retval_) - , retval_(v8::Persistent::New(v8::Local())) {} - - inline ~PropertyCallbackInfo() { - retval_.Dispose(); - retval_.Clear(); - } - - inline ReturnValue GetReturnValue() const { - return return_value_; - } -}; - -template<> -class PropertyCallbackInfo : - public PropertyCallbackInfoBase { - ReturnValue return_value_; - v8::Persistent retval_; - - public: - explicit inline PropertyCallbackInfo( - const v8::AccessorInfo &info - , const v8::Local data) : - PropertyCallbackInfoBase(info, data) - , return_value_(info.GetIsolate(), &retval_) - , retval_(v8::Persistent::New(v8::Local())) {} - - inline ~PropertyCallbackInfo() { - retval_.Dispose(); - retval_.Clear(); - } - - inline ReturnValue GetReturnValue() const { - return return_value_; - } -}; - -namespace imp { -template -class ReturnValueImp : public ReturnValue { - public: - explicit ReturnValueImp(ReturnValue that) : - ReturnValue(that) {} - inline v8::Handle Value() { - return *ReturnValue::value_; - } -}; - -static -v8::Handle FunctionCallbackWrapper(const v8::Arguments &args) { - v8::Local obj = args.Data().As(); - FunctionCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kFunctionIndex).As()->Value())); - FunctionCallbackInfo - cbinfo(args, obj->GetInternalField(kDataIndex)); - callback(cbinfo); - return ReturnValueImp(cbinfo.GetReturnValue()).Value(); -} - -typedef v8::Handle (*NativeFunction)(const v8::Arguments &); - -static -v8::Handle GetterCallbackWrapper( - v8::Local property, const v8::AccessorInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - GetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kGetterIndex).As()->Value())); - callback(property, cbinfo); - return ReturnValueImp(cbinfo.GetReturnValue()).Value(); -} - -typedef v8::Handle (*NativeGetter) - (v8::Local, const v8::AccessorInfo &); - -static -void SetterCallbackWrapper( - v8::Local property - , v8::Local value - , const v8::AccessorInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - SetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kSetterIndex).As()->Value())); - callback(property, value, cbinfo); -} - -typedef void (*NativeSetter) - (v8::Local, v8::Local, const v8::AccessorInfo &); - -static -v8::Handle PropertyGetterCallbackWrapper( - v8::Local property, const v8::AccessorInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertyGetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kPropertyGetterIndex) - .As()->Value())); - callback(property, cbinfo); - return ReturnValueImp(cbinfo.GetReturnValue()).Value(); -} - -typedef v8::Handle (*NativePropertyGetter) - (v8::Local, const v8::AccessorInfo &); - -static -v8::Handle PropertySetterCallbackWrapper( - v8::Local property - , v8::Local value - , const v8::AccessorInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertySetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kPropertySetterIndex) - .As()->Value())); - callback(property, value, cbinfo); - return ReturnValueImp(cbinfo.GetReturnValue()).Value(); -} - -typedef v8::Handle (*NativePropertySetter) - (v8::Local, v8::Local, const v8::AccessorInfo &); - -static -v8::Handle PropertyEnumeratorCallbackWrapper( - const v8::AccessorInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertyEnumeratorCallback callback = - reinterpret_cast(reinterpret_cast( - obj->GetInternalField(kPropertyEnumeratorIndex) - .As()->Value())); - callback(cbinfo); - return ReturnValueImp(cbinfo.GetReturnValue()).Value(); -} - -typedef v8::Handle (*NativePropertyEnumerator) - (const v8::AccessorInfo &); - -static -v8::Handle PropertyDeleterCallbackWrapper( - v8::Local property - , const v8::AccessorInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertyDeleterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kPropertyDeleterIndex) - .As()->Value())); - callback(property, cbinfo); - return ReturnValueImp(cbinfo.GetReturnValue()).Value(); -} - -typedef v8::Handle (NativePropertyDeleter) - (v8::Local, const v8::AccessorInfo &); - -static -v8::Handle PropertyQueryCallbackWrapper( - v8::Local property, const v8::AccessorInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - PropertyQueryCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kPropertyQueryIndex) - .As()->Value())); - callback(property, cbinfo); - return ReturnValueImp(cbinfo.GetReturnValue()).Value(); -} - -typedef v8::Handle (*NativePropertyQuery) - (v8::Local, const v8::AccessorInfo &); - -static -v8::Handle IndexGetterCallbackWrapper( - uint32_t index, const v8::AccessorInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - IndexGetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kIndexPropertyGetterIndex) - .As()->Value())); - callback(index, cbinfo); - return ReturnValueImp(cbinfo.GetReturnValue()).Value(); -} - -typedef v8::Handle (*NativeIndexGetter) - (uint32_t, const v8::AccessorInfo &); - -static -v8::Handle IndexSetterCallbackWrapper( - uint32_t index - , v8::Local value - , const v8::AccessorInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - IndexSetterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kIndexPropertySetterIndex) - .As()->Value())); - callback(index, value, cbinfo); - return ReturnValueImp(cbinfo.GetReturnValue()).Value(); -} - -typedef v8::Handle (*NativeIndexSetter) - (uint32_t, v8::Local, const v8::AccessorInfo &); - -static -v8::Handle IndexEnumeratorCallbackWrapper( - const v8::AccessorInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - IndexEnumeratorCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kIndexPropertyEnumeratorIndex) - .As()->Value())); - callback(cbinfo); - return ReturnValueImp(cbinfo.GetReturnValue()).Value(); -} - -typedef v8::Handle (*NativeIndexEnumerator) - (const v8::AccessorInfo &); - -static -v8::Handle IndexDeleterCallbackWrapper( - uint32_t index, const v8::AccessorInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - IndexDeleterCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kIndexPropertyDeleterIndex) - .As()->Value())); - callback(index, cbinfo); - return ReturnValueImp(cbinfo.GetReturnValue()).Value(); -} - -typedef v8::Handle (*NativeIndexDeleter) - (uint32_t, const v8::AccessorInfo &); - -static -v8::Handle IndexQueryCallbackWrapper( - uint32_t index, const v8::AccessorInfo &info) { - v8::Local obj = info.Data().As(); - PropertyCallbackInfo - cbinfo(info, obj->GetInternalField(kDataIndex)); - IndexQueryCallback callback = reinterpret_cast( - reinterpret_cast( - obj->GetInternalField(kIndexPropertyQueryIndex) - .As()->Value())); - callback(index, cbinfo); - return ReturnValueImp(cbinfo.GetReturnValue()).Value(); -} - -typedef v8::Handle (*NativeIndexQuery) - (uint32_t, const v8::AccessorInfo &); -} // end of namespace imp - -#endif // NAN_CALLBACKS_PRE_12_INL_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_converters.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_converters.h deleted file mode 100644 index c0b32729..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_converters.h +++ /dev/null @@ -1,72 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_CONVERTERS_H_ -#define NAN_CONVERTERS_H_ - -namespace imp { -template struct ToFactoryBase { - typedef MaybeLocal return_t; -}; -template struct ValueFactoryBase { typedef Maybe return_t; }; - -template struct ToFactory; - -template<> -struct ToFactory : ToFactoryBase { - static inline return_t convert(v8::Local val) { - if (val.IsEmpty() || !val->IsFunction()) return MaybeLocal(); - return MaybeLocal(val.As()); - } -}; - -#define X(TYPE) \ - template<> \ - struct ToFactory : ToFactoryBase { \ - static inline return_t convert(v8::Local val); \ - }; - -X(Boolean) -X(Number) -X(String) -X(Object) -X(Integer) -X(Uint32) -X(Int32) - -#undef X - -#define X(TYPE) \ - template<> \ - struct ToFactory : ValueFactoryBase { \ - static inline return_t convert(v8::Local val); \ - }; - -X(bool) -X(double) -X(int64_t) -X(uint32_t) -X(int32_t) - -#undef X -} // end of namespace imp - -template -inline -typename imp::ToFactory::return_t To(v8::Local val) { - return imp::ToFactory::convert(val); -} - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) -# include "nan_converters_43_inl.h" -#else -# include "nan_converters_pre_43_inl.h" -#endif - -#endif // NAN_CONVERTERS_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_converters_43_inl.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_converters_43_inl.h deleted file mode 100644 index 41b72deb..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_converters_43_inl.h +++ /dev/null @@ -1,68 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_CONVERTERS_43_INL_H_ -#define NAN_CONVERTERS_43_INL_H_ - -#define X(TYPE) \ -imp::ToFactory::return_t \ -imp::ToFactory::convert(v8::Local val) { \ - v8::Isolate *isolate = v8::Isolate::GetCurrent(); \ - v8::EscapableHandleScope scope(isolate); \ - return scope.Escape( \ - val->To ## TYPE(isolate->GetCurrentContext()) \ - .FromMaybe(v8::Local())); \ -} - -X(Number) -X(String) -X(Object) -X(Integer) -X(Uint32) -X(Int32) -// V8 <= 7.0 -#if V8_MAJOR_VERSION < 7 || (V8_MAJOR_VERSION == 7 && V8_MINOR_VERSION == 0) -X(Boolean) -#else -imp::ToFactory::return_t \ -imp::ToFactory::convert(v8::Local val) { \ - v8::Isolate *isolate = v8::Isolate::GetCurrent(); \ - v8::EscapableHandleScope scope(isolate); \ - return scope.Escape(val->ToBoolean(isolate)); \ -} -#endif - -#undef X - -#define X(TYPE, NAME) \ -imp::ToFactory::return_t \ -imp::ToFactory::convert(v8::Local val) { \ - v8::Isolate *isolate = v8::Isolate::GetCurrent(); \ - v8::HandleScope scope(isolate); \ - return val->NAME ## Value(isolate->GetCurrentContext()); \ -} - -X(double, Number) -X(int64_t, Integer) -X(uint32_t, Uint32) -X(int32_t, Int32) -// V8 <= 7.0 -#if V8_MAJOR_VERSION < 7 || (V8_MAJOR_VERSION == 7 && V8_MINOR_VERSION == 0) -X(bool, Boolean) -#else -imp::ToFactory::return_t \ -imp::ToFactory::convert(v8::Local val) { \ - v8::Isolate *isolate = v8::Isolate::GetCurrent(); \ - v8::HandleScope scope(isolate); \ - return Just(val->BooleanValue(isolate)); \ -} -#endif - -#undef X - -#endif // NAN_CONVERTERS_43_INL_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_converters_pre_43_inl.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_converters_pre_43_inl.h deleted file mode 100644 index ae0518aa..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_converters_pre_43_inl.h +++ /dev/null @@ -1,42 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_CONVERTERS_PRE_43_INL_H_ -#define NAN_CONVERTERS_PRE_43_INL_H_ - -#define X(TYPE) \ -imp::ToFactory::return_t \ -imp::ToFactory::convert(v8::Local val) { \ - return val->To ## TYPE(); \ -} - -X(Boolean) -X(Number) -X(String) -X(Object) -X(Integer) -X(Uint32) -X(Int32) - -#undef X - -#define X(TYPE, NAME) \ -imp::ToFactory::return_t \ -imp::ToFactory::convert(v8::Local val) { \ - return Just(val->NAME ## Value()); \ -} - -X(bool, Boolean) -X(double, Number) -X(int64_t, Integer) -X(uint32_t, Uint32) -X(int32_t, Int32) - -#undef X - -#endif // NAN_CONVERTERS_PRE_43_INL_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_define_own_property_helper.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_define_own_property_helper.h deleted file mode 100644 index d710ef22..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_define_own_property_helper.h +++ /dev/null @@ -1,29 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_DEFINE_OWN_PROPERTY_HELPER_H_ -#define NAN_DEFINE_OWN_PROPERTY_HELPER_H_ - -namespace imp { - -inline Maybe DefineOwnPropertyHelper( - v8::PropertyAttribute current - , v8::Handle obj - , v8::Handle key - , v8::Handle value - , v8::PropertyAttribute attribs = v8::None) { - return !(current & v8::DontDelete) || // configurable OR - (!(current & v8::ReadOnly) && // writable AND - !((attribs ^ current) & ~v8::ReadOnly)) // same excluding RO - ? Just(obj->ForceSet(key, value, attribs)) - : Nothing(); -} - -} // end of namespace imp - -#endif // NAN_DEFINE_OWN_PROPERTY_HELPER_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_implementation_12_inl.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_implementation_12_inl.h deleted file mode 100644 index 255293ac..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_implementation_12_inl.h +++ /dev/null @@ -1,430 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_IMPLEMENTATION_12_INL_H_ -#define NAN_IMPLEMENTATION_12_INL_H_ -//============================================================================== -// node v0.11 implementation -//============================================================================== - -namespace imp { - -//=== Array ==================================================================== - -Factory::return_t -Factory::New() { - return v8::Array::New(v8::Isolate::GetCurrent()); -} - -Factory::return_t -Factory::New(int length) { - return v8::Array::New(v8::Isolate::GetCurrent(), length); -} - -//=== Boolean ================================================================== - -Factory::return_t -Factory::New(bool value) { - return v8::Boolean::New(v8::Isolate::GetCurrent(), value); -} - -//=== Boolean Object =========================================================== - -Factory::return_t -Factory::New(bool value) { -#if (NODE_MODULE_VERSION >= NODE_6_0_MODULE_VERSION) - return v8::BooleanObject::New( - v8::Isolate::GetCurrent(), value).As(); -#else - return v8::BooleanObject::New(value).As(); -#endif -} - -//=== Context ================================================================== - -Factory::return_t -Factory::New( v8::ExtensionConfiguration* extensions - , v8::Local tmpl - , v8::Local obj) { - return v8::Context::New(v8::Isolate::GetCurrent(), extensions, tmpl, obj); -} - -//=== Date ===================================================================== - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) -Factory::return_t -Factory::New(double value) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(v8::Date::New(isolate->GetCurrentContext(), value) - .FromMaybe(v8::Local()).As()); -} -#else -Factory::return_t -Factory::New(double value) { - return v8::Date::New(v8::Isolate::GetCurrent(), value).As(); -} -#endif - -//=== External ================================================================= - -Factory::return_t -Factory::New(void * value) { - return v8::External::New(v8::Isolate::GetCurrent(), value); -} - -//=== Function ================================================================= - -Factory::return_t -Factory::New( FunctionCallback callback - , v8::Local data) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - v8::Local tpl = v8::ObjectTemplate::New(isolate); - tpl->SetInternalFieldCount(imp::kFunctionFieldCount); - v8::Local obj = NewInstance(tpl).ToLocalChecked(); - - obj->SetInternalField( - imp::kFunctionIndex - , v8::External::New(isolate, reinterpret_cast(callback))); - - v8::Local val = v8::Local::New(isolate, data); - - if (!val.IsEmpty()) { - obj->SetInternalField(imp::kDataIndex, val); - } - -#if NODE_MAJOR_VERSION >= 10 - v8::Local context = isolate->GetCurrentContext(); - v8::Local function = - v8::Function::New(context, imp::FunctionCallbackWrapper, obj) - .ToLocalChecked(); -#else - v8::Local function = - v8::Function::New(isolate, imp::FunctionCallbackWrapper, obj); -#endif - - return scope.Escape(function); -} - -//=== Function Template ======================================================== - -Factory::return_t -Factory::New( FunctionCallback callback - , v8::Local data - , v8::Local signature) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - if (callback) { - v8::EscapableHandleScope scope(isolate); - v8::Local tpl = v8::ObjectTemplate::New(isolate); - tpl->SetInternalFieldCount(imp::kFunctionFieldCount); - v8::Local obj = NewInstance(tpl).ToLocalChecked(); - - obj->SetInternalField( - imp::kFunctionIndex - , v8::External::New(isolate, reinterpret_cast(callback))); - v8::Local val = v8::Local::New(isolate, data); - - if (!val.IsEmpty()) { - obj->SetInternalField(imp::kDataIndex, val); - } - - return scope.Escape(v8::FunctionTemplate::New( isolate - , imp::FunctionCallbackWrapper - , obj - , signature)); - } else { - return v8::FunctionTemplate::New(isolate, 0, data, signature); - } -} - -//=== Number =================================================================== - -Factory::return_t -Factory::New(double value) { - return v8::Number::New(v8::Isolate::GetCurrent(), value); -} - -//=== Number Object ============================================================ - -Factory::return_t -Factory::New(double value) { - return v8::NumberObject::New( v8::Isolate::GetCurrent() - , value).As(); -} - -//=== Integer, Int32 and Uint32 ================================================ - -template -typename IntegerFactory::return_t -IntegerFactory::New(int32_t value) { - return To(T::New(v8::Isolate::GetCurrent(), value)); -} - -template -typename IntegerFactory::return_t -IntegerFactory::New(uint32_t value) { - return To(T::NewFromUnsigned(v8::Isolate::GetCurrent(), value)); -} - -Factory::return_t -Factory::New(int32_t value) { - return To( - v8::Uint32::NewFromUnsigned(v8::Isolate::GetCurrent(), value)); -} - -Factory::return_t -Factory::New(uint32_t value) { - return To( - v8::Uint32::NewFromUnsigned(v8::Isolate::GetCurrent(), value)); -} - -//=== Object =================================================================== - -Factory::return_t -Factory::New() { - return v8::Object::New(v8::Isolate::GetCurrent()); -} - -//=== Object Template ========================================================== - -Factory::return_t -Factory::New() { - return v8::ObjectTemplate::New(v8::Isolate::GetCurrent()); -} - -//=== RegExp =================================================================== - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) -Factory::return_t -Factory::New( - v8::Local pattern - , v8::RegExp::Flags flags) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape( - v8::RegExp::New(isolate->GetCurrentContext(), pattern, flags) - .FromMaybe(v8::Local())); -} -#else -Factory::return_t -Factory::New( - v8::Local pattern - , v8::RegExp::Flags flags) { - return v8::RegExp::New(pattern, flags); -} -#endif - -//=== Script =================================================================== - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) -Factory::return_t -Factory::New( v8::Local source) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - v8::ScriptCompiler::Source src(source); - return scope.Escape( - v8::ScriptCompiler::Compile(isolate->GetCurrentContext(), &src) - .FromMaybe(v8::Local())); -} - -Factory::return_t -Factory::New( v8::Local source - , v8::ScriptOrigin const& origin) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - v8::ScriptCompiler::Source src(source, origin); - return scope.Escape( - v8::ScriptCompiler::Compile(isolate->GetCurrentContext(), &src) - .FromMaybe(v8::Local())); -} -#else -Factory::return_t -Factory::New( v8::Local source) { - v8::ScriptCompiler::Source src(source); - return v8::ScriptCompiler::Compile(v8::Isolate::GetCurrent(), &src); -} - -Factory::return_t -Factory::New( v8::Local source - , v8::ScriptOrigin const& origin) { - v8::ScriptCompiler::Source src(source, origin); - return v8::ScriptCompiler::Compile(v8::Isolate::GetCurrent(), &src); -} -#endif - -//=== Signature ================================================================ - -Factory::return_t -Factory::New(Factory::FTH receiver) { - return v8::Signature::New(v8::Isolate::GetCurrent(), receiver); -} - -//=== String =================================================================== - -Factory::return_t -Factory::New() { - return v8::String::Empty(v8::Isolate::GetCurrent()); -} - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) -Factory::return_t -Factory::New(const char * value, int length) { - return v8::String::NewFromUtf8( - v8::Isolate::GetCurrent(), value, v8::NewStringType::kNormal, length); -} - -Factory::return_t -Factory::New(std::string const& value) { - assert(value.size() <= INT_MAX && "string too long"); - return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), - value.data(), v8::NewStringType::kNormal, static_cast(value.size())); -} - -Factory::return_t -Factory::New(const uint16_t * value, int length) { - return v8::String::NewFromTwoByte(v8::Isolate::GetCurrent(), value, - v8::NewStringType::kNormal, length); -} - -Factory::return_t -Factory::New(v8::String::ExternalStringResource * value) { - return v8::String::NewExternalTwoByte(v8::Isolate::GetCurrent(), value); -} - -Factory::return_t -Factory::New(ExternalOneByteStringResource * value) { - return v8::String::NewExternalOneByte(v8::Isolate::GetCurrent(), value); -} -#else -Factory::return_t -Factory::New(const char * value, int length) { - return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), value, - v8::String::kNormalString, length); -} - -Factory::return_t -Factory::New( - std::string const& value) /* NOLINT(build/include_what_you_use) */ { - assert(value.size() <= INT_MAX && "string too long"); - return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), value.data(), - v8::String::kNormalString, - static_cast(value.size())); -} - -Factory::return_t -Factory::New(const uint16_t * value, int length) { - return v8::String::NewFromTwoByte(v8::Isolate::GetCurrent(), value, - v8::String::kNormalString, length); -} - -Factory::return_t -Factory::New(v8::String::ExternalStringResource * value) { - return v8::String::NewExternal(v8::Isolate::GetCurrent(), value); -} - -Factory::return_t -Factory::New(ExternalOneByteStringResource * value) { - return v8::String::NewExternal(v8::Isolate::GetCurrent(), value); -} -#endif - -//=== String Object ============================================================ - -// See https://github.com/nodejs/nan/pull/811#discussion_r224594980. -// Disable the warning as there is no way around it. -// TODO(bnoordhuis) Use isolate-based version in Node.js v12. -Factory::return_t -Factory::New(v8::Local value) { -// V8 > 7.0 -#if V8_MAJOR_VERSION > 7 || (V8_MAJOR_VERSION == 7 && V8_MINOR_VERSION > 0) - return v8::StringObject::New(v8::Isolate::GetCurrent(), value) - .As(); -#else -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#endif - return v8::StringObject::New(value).As(); -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif -#ifdef _MSC_VER -#pragma warning(pop) -#endif -#endif -} - -//=== Unbound Script =========================================================== - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) -Factory::return_t -Factory::New(v8::Local source) { - v8::ScriptCompiler::Source src(source); - return v8::ScriptCompiler::CompileUnboundScript( - v8::Isolate::GetCurrent(), &src); -} - -Factory::return_t -Factory::New( v8::Local source - , v8::ScriptOrigin const& origin) { - v8::ScriptCompiler::Source src(source, origin); - return v8::ScriptCompiler::CompileUnboundScript( - v8::Isolate::GetCurrent(), &src); -} -#else -Factory::return_t -Factory::New(v8::Local source) { - v8::ScriptCompiler::Source src(source); - return v8::ScriptCompiler::CompileUnbound(v8::Isolate::GetCurrent(), &src); -} - -Factory::return_t -Factory::New( v8::Local source - , v8::ScriptOrigin const& origin) { - v8::ScriptCompiler::Source src(source, origin); - return v8::ScriptCompiler::CompileUnbound(v8::Isolate::GetCurrent(), &src); -} -#endif - -} // end of namespace imp - -//=== Presistents and Handles ================================================== - -#if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION -template -inline v8::Local New(v8::Handle h) { - return v8::Local::New(v8::Isolate::GetCurrent(), h); -} -#endif - -template -inline v8::Local New(v8::Persistent const& p) { - return v8::Local::New(v8::Isolate::GetCurrent(), p); -} - -template -inline v8::Local New(Persistent const& p) { - return v8::Local::New(v8::Isolate::GetCurrent(), p); -} - -template -inline v8::Local New(Global const& p) { - return v8::Local::New(v8::Isolate::GetCurrent(), p); -} - -#endif // NAN_IMPLEMENTATION_12_INL_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_implementation_pre_12_inl.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_implementation_pre_12_inl.h deleted file mode 100644 index 1472421a..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_implementation_pre_12_inl.h +++ /dev/null @@ -1,263 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_IMPLEMENTATION_PRE_12_INL_H_ -#define NAN_IMPLEMENTATION_PRE_12_INL_H_ - -//============================================================================== -// node v0.10 implementation -//============================================================================== - -namespace imp { - -//=== Array ==================================================================== - -Factory::return_t -Factory::New() { - return v8::Array::New(); -} - -Factory::return_t -Factory::New(int length) { - return v8::Array::New(length); -} - -//=== Boolean ================================================================== - -Factory::return_t -Factory::New(bool value) { - return v8::Boolean::New(value)->ToBoolean(); -} - -//=== Boolean Object =========================================================== - -Factory::return_t -Factory::New(bool value) { - return v8::BooleanObject::New(value).As(); -} - -//=== Context ================================================================== - -Factory::return_t -Factory::New( v8::ExtensionConfiguration* extensions - , v8::Local tmpl - , v8::Local obj) { - v8::Persistent ctx = v8::Context::New(extensions, tmpl, obj); - v8::Local lctx = v8::Local::New(ctx); - ctx.Dispose(); - return lctx; -} - -//=== Date ===================================================================== - -Factory::return_t -Factory::New(double value) { - return v8::Date::New(value).As(); -} - -//=== External ================================================================= - -Factory::return_t -Factory::New(void * value) { - return v8::External::New(value); -} - -//=== Function ================================================================= - -Factory::return_t -Factory::New( FunctionCallback callback - , v8::Local data) { - v8::HandleScope scope; - - return scope.Close(Factory::New( - callback, data, v8::Local()) - ->GetFunction()); -} - - -//=== FunctionTemplate ========================================================= - -Factory::return_t -Factory::New( FunctionCallback callback - , v8::Local data - , v8::Local signature) { - if (callback) { - v8::HandleScope scope; - - v8::Local tpl = v8::ObjectTemplate::New(); - tpl->SetInternalFieldCount(imp::kFunctionFieldCount); - v8::Local obj = tpl->NewInstance(); - - obj->SetInternalField( - imp::kFunctionIndex - , v8::External::New(reinterpret_cast(callback))); - - v8::Local val = v8::Local::New(data); - - if (!val.IsEmpty()) { - obj->SetInternalField(imp::kDataIndex, val); - } - - // Note(agnat): Emulate length argument here. Unfortunately, I couldn't find - // a way. Have at it though... - return scope.Close( - v8::FunctionTemplate::New(imp::FunctionCallbackWrapper - , obj - , signature)); - } else { - return v8::FunctionTemplate::New(0, data, signature); - } -} - -//=== Number =================================================================== - -Factory::return_t -Factory::New(double value) { - return v8::Number::New(value); -} - -//=== Number Object ============================================================ - -Factory::return_t -Factory::New(double value) { - return v8::NumberObject::New(value).As(); -} - -//=== Integer, Int32 and Uint32 ================================================ - -template -typename IntegerFactory::return_t -IntegerFactory::New(int32_t value) { - return To(T::New(value)); -} - -template -typename IntegerFactory::return_t -IntegerFactory::New(uint32_t value) { - return To(T::NewFromUnsigned(value)); -} - -Factory::return_t -Factory::New(int32_t value) { - return To(v8::Uint32::NewFromUnsigned(value)); -} - -Factory::return_t -Factory::New(uint32_t value) { - return To(v8::Uint32::NewFromUnsigned(value)); -} - - -//=== Object =================================================================== - -Factory::return_t -Factory::New() { - return v8::Object::New(); -} - -//=== Object Template ========================================================== - -Factory::return_t -Factory::New() { - return v8::ObjectTemplate::New(); -} - -//=== RegExp =================================================================== - -Factory::return_t -Factory::New( - v8::Local pattern - , v8::RegExp::Flags flags) { - return v8::RegExp::New(pattern, flags); -} - -//=== Script =================================================================== - -Factory::return_t -Factory::New( v8::Local source) { - return v8::Script::New(source); -} -Factory::return_t -Factory::New( v8::Local source - , v8::ScriptOrigin const& origin) { - return v8::Script::New(source, const_cast(&origin)); -} - -//=== Signature ================================================================ - -Factory::return_t -Factory::New(Factory::FTH receiver) { - return v8::Signature::New(receiver); -} - -//=== String =================================================================== - -Factory::return_t -Factory::New() { - return v8::String::Empty(); -} - -Factory::return_t -Factory::New(const char * value, int length) { - return v8::String::New(value, length); -} - -Factory::return_t -Factory::New( - std::string const& value) /* NOLINT(build/include_what_you_use) */ { - assert(value.size() <= INT_MAX && "string too long"); - return v8::String::New(value.data(), static_cast(value.size())); -} - -Factory::return_t -Factory::New(const uint16_t * value, int length) { - return v8::String::New(value, length); -} - -Factory::return_t -Factory::New(v8::String::ExternalStringResource * value) { - return v8::String::NewExternal(value); -} - -Factory::return_t -Factory::New(v8::String::ExternalAsciiStringResource * value) { - return v8::String::NewExternal(value); -} - -//=== String Object ============================================================ - -Factory::return_t -Factory::New(v8::Local value) { - return v8::StringObject::New(value).As(); -} - -} // end of namespace imp - -//=== Presistents and Handles ================================================== - -template -inline v8::Local New(v8::Handle h) { - return v8::Local::New(h); -} - -template -inline v8::Local New(v8::Persistent const& p) { - return v8::Local::New(p); -} - -template -inline v8::Local New(Persistent const& p) { - return v8::Local::New(p.persistent); -} - -template -inline v8::Local New(Global const& p) { - return v8::Local::New(p.persistent); -} - -#endif // NAN_IMPLEMENTATION_PRE_12_INL_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_json.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_json.h deleted file mode 100644 index 33ac8ba6..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_json.h +++ /dev/null @@ -1,166 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_JSON_H_ -#define NAN_JSON_H_ - -#if NODE_MODULE_VERSION < NODE_0_12_MODULE_VERSION -#define NAN_JSON_H_NEED_PARSE 1 -#else -#define NAN_JSON_H_NEED_PARSE 0 -#endif // NODE_MODULE_VERSION < NODE_0_12_MODULE_VERSION - -#if NODE_MODULE_VERSION >= NODE_7_0_MODULE_VERSION -#define NAN_JSON_H_NEED_STRINGIFY 0 -#else -#define NAN_JSON_H_NEED_STRINGIFY 1 -#endif // NODE_MODULE_VERSION >= NODE_7_0_MODULE_VERSION - -class JSON { - public: - JSON() { -#if NAN_JSON_H_NEED_PARSE + NAN_JSON_H_NEED_STRINGIFY - Nan::HandleScope scope; - - Nan::MaybeLocal maybe_global_json = Nan::Get( - Nan::GetCurrentContext()->Global(), - Nan::New("JSON").ToLocalChecked() - ); - - assert(!maybe_global_json.IsEmpty() && "global JSON is empty"); - v8::Local val_global_json = maybe_global_json.ToLocalChecked(); - - assert(val_global_json->IsObject() && "global JSON is not an object"); - Nan::MaybeLocal maybe_obj_global_json = - Nan::To(val_global_json); - - assert(!maybe_obj_global_json.IsEmpty() && "global JSON object is empty"); - v8::Local global_json = maybe_obj_global_json.ToLocalChecked(); - -#if NAN_JSON_H_NEED_PARSE - Nan::MaybeLocal maybe_parse_method = Nan::Get( - global_json, Nan::New("parse").ToLocalChecked() - ); - - assert(!maybe_parse_method.IsEmpty() && "JSON.parse is empty"); - v8::Local parse_method = maybe_parse_method.ToLocalChecked(); - - assert(parse_method->IsFunction() && "JSON.parse is not a function"); - parse_cb_.Reset(parse_method.As()); -#endif // NAN_JSON_H_NEED_PARSE - -#if NAN_JSON_H_NEED_STRINGIFY - Nan::MaybeLocal maybe_stringify_method = Nan::Get( - global_json, Nan::New("stringify").ToLocalChecked() - ); - - assert(!maybe_stringify_method.IsEmpty() && "JSON.stringify is empty"); - v8::Local stringify_method = - maybe_stringify_method.ToLocalChecked(); - - assert( - stringify_method->IsFunction() && "JSON.stringify is not a function" - ); - stringify_cb_.Reset(stringify_method.As()); -#endif // NAN_JSON_H_NEED_STRINGIFY -#endif // NAN_JSON_H_NEED_PARSE + NAN_JSON_H_NEED_STRINGIFY - } - - inline - Nan::MaybeLocal Parse(v8::Local json_string) { - Nan::EscapableHandleScope scope; -#if NAN_JSON_H_NEED_PARSE - return scope.Escape(parse(json_string)); -#else - Nan::MaybeLocal result; -#if NODE_MODULE_VERSION >= NODE_0_12_MODULE_VERSION && \ - NODE_MODULE_VERSION <= IOJS_2_0_MODULE_VERSION - result = v8::JSON::Parse(json_string); -#else -#if NODE_MODULE_VERSION > NODE_6_0_MODULE_VERSION - v8::Local context_or_isolate = Nan::GetCurrentContext(); -#else - v8::Isolate* context_or_isolate = v8::Isolate::GetCurrent(); -#endif // NODE_MODULE_VERSION > NODE_6_0_MODULE_VERSION - result = v8::JSON::Parse(context_or_isolate, json_string); -#endif // NODE_MODULE_VERSION >= NODE_0_12_MODULE_VERSION && - // NODE_MODULE_VERSION <= IOJS_2_0_MODULE_VERSION - if (result.IsEmpty()) return v8::Local(); - return scope.Escape(result.ToLocalChecked()); -#endif // NAN_JSON_H_NEED_PARSE - } - - inline - Nan::MaybeLocal Stringify(v8::Local json_object) { - Nan::EscapableHandleScope scope; - Nan::MaybeLocal result = -#if NAN_JSON_H_NEED_STRINGIFY - Nan::To(stringify(json_object)); -#else - v8::JSON::Stringify(Nan::GetCurrentContext(), json_object); -#endif // NAN_JSON_H_NEED_STRINGIFY - if (result.IsEmpty()) return v8::Local(); - return scope.Escape(result.ToLocalChecked()); - } - - inline - Nan::MaybeLocal Stringify(v8::Local json_object, - v8::Local gap) { - Nan::EscapableHandleScope scope; - Nan::MaybeLocal result = -#if NAN_JSON_H_NEED_STRINGIFY - Nan::To(stringify(json_object, gap)); -#else - v8::JSON::Stringify(Nan::GetCurrentContext(), json_object, gap); -#endif // NAN_JSON_H_NEED_STRINGIFY - if (result.IsEmpty()) return v8::Local(); - return scope.Escape(result.ToLocalChecked()); - } - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(JSON) -#if NAN_JSON_H_NEED_PARSE - Nan::Callback parse_cb_; -#endif // NAN_JSON_H_NEED_PARSE -#if NAN_JSON_H_NEED_STRINGIFY - Nan::Callback stringify_cb_; -#endif // NAN_JSON_H_NEED_STRINGIFY - -#if NAN_JSON_H_NEED_PARSE - inline v8::Local parse(v8::Local arg) { - assert(!parse_cb_.IsEmpty() && "parse_cb_ is empty"); - AsyncResource resource("nan:JSON.parse"); - return parse_cb_.Call(1, &arg, &resource).FromMaybe(v8::Local()); - } -#endif // NAN_JSON_H_NEED_PARSE - -#if NAN_JSON_H_NEED_STRINGIFY - inline v8::Local stringify(v8::Local arg) { - assert(!stringify_cb_.IsEmpty() && "stringify_cb_ is empty"); - AsyncResource resource("nan:JSON.stringify"); - return stringify_cb_.Call(1, &arg, &resource) - .FromMaybe(v8::Local()); - } - - inline v8::Local stringify(v8::Local arg, - v8::Local gap) { - assert(!stringify_cb_.IsEmpty() && "stringify_cb_ is empty"); - - v8::Local argv[] = { - arg, - Nan::Null(), - gap - }; - AsyncResource resource("nan:JSON.stringify"); - return stringify_cb_.Call(3, argv, &resource) - .FromMaybe(v8::Local()); - } -#endif // NAN_JSON_H_NEED_STRINGIFY -}; - -#endif // NAN_JSON_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_maybe_43_inl.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_maybe_43_inl.h deleted file mode 100644 index c04ce30d..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_maybe_43_inl.h +++ /dev/null @@ -1,356 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_MAYBE_43_INL_H_ -#define NAN_MAYBE_43_INL_H_ - -template -using MaybeLocal = v8::MaybeLocal; - -inline -MaybeLocal ToDetailString(v8::Local val) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(val->ToDetailString(isolate->GetCurrentContext()) - .FromMaybe(v8::Local())); -} - -inline -MaybeLocal ToArrayIndex(v8::Local val) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(val->ToArrayIndex(isolate->GetCurrentContext()) - .FromMaybe(v8::Local())); -} - -inline -Maybe Equals(v8::Local a, v8::Local(b)) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return a->Equals(isolate->GetCurrentContext(), b); -} - -inline -MaybeLocal NewInstance(v8::Local h) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(h->NewInstance(isolate->GetCurrentContext()) - .FromMaybe(v8::Local())); -} - -inline -MaybeLocal NewInstance( - v8::Local h - , int argc - , v8::Local argv[]) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(h->NewInstance(isolate->GetCurrentContext(), argc, argv) - .FromMaybe(v8::Local())); -} - -inline -MaybeLocal NewInstance(v8::Local h) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(h->NewInstance(isolate->GetCurrentContext()) - .FromMaybe(v8::Local())); -} - - -inline MaybeLocal GetFunction( - v8::Local t) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(t->GetFunction(isolate->GetCurrentContext()) - .FromMaybe(v8::Local())); -} - -inline Maybe Set( - v8::Local obj - , v8::Local key - , v8::Local value) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return obj->Set(isolate->GetCurrentContext(), key, value); -} - -inline Maybe Set( - v8::Local obj - , uint32_t index - , v8::Local value) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return obj->Set(isolate->GetCurrentContext(), index, value); -} - -#if NODE_MODULE_VERSION < NODE_4_0_MODULE_VERSION -#include "nan_define_own_property_helper.h" // NOLINT(build/include) -#endif - -inline Maybe DefineOwnProperty( - v8::Local obj - , v8::Local key - , v8::Local value - , v8::PropertyAttribute attribs = v8::None) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); -#if NODE_MODULE_VERSION >= NODE_4_0_MODULE_VERSION - return obj->DefineOwnProperty(isolate->GetCurrentContext(), key, value, - attribs); -#else - Maybe maybeCurrent = - obj->GetPropertyAttributes(isolate->GetCurrentContext(), key); - if (maybeCurrent.IsNothing()) { - return Nothing(); - } - v8::PropertyAttribute current = maybeCurrent.FromJust(); - return imp::DefineOwnPropertyHelper(current, obj, key, value, attribs); -#endif -} - -NAN_DEPRECATED inline Maybe ForceSet( - v8::Local obj - , v8::Local key - , v8::Local value - , v8::PropertyAttribute attribs = v8::None) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); -#if NODE_MODULE_VERSION >= NODE_9_0_MODULE_VERSION - return key->IsName() - ? obj->DefineOwnProperty(isolate->GetCurrentContext(), - key.As(), value, attribs) - : Nothing(); -#else - return obj->ForceSet(isolate->GetCurrentContext(), key, value, attribs); -#endif -} - -inline MaybeLocal Get( - v8::Local obj - , v8::Local key) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(obj->Get(isolate->GetCurrentContext(), key) - .FromMaybe(v8::Local())); -} - -inline -MaybeLocal Get(v8::Local obj, uint32_t index) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(obj->Get(isolate->GetCurrentContext(), index) - .FromMaybe(v8::Local())); -} - -inline v8::PropertyAttribute GetPropertyAttributes( - v8::Local obj - , v8::Local key) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return obj->GetPropertyAttributes(isolate->GetCurrentContext(), key) - .FromJust(); -} - -inline Maybe Has( - v8::Local obj - , v8::Local key) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return obj->Has(isolate->GetCurrentContext(), key); -} - -inline Maybe Has(v8::Local obj, uint32_t index) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return obj->Has(isolate->GetCurrentContext(), index); -} - -inline Maybe Delete( - v8::Local obj - , v8::Local key) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return obj->Delete(isolate->GetCurrentContext(), key); -} - -inline -Maybe Delete(v8::Local obj, uint32_t index) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return obj->Delete(isolate->GetCurrentContext(), index); -} - -inline -MaybeLocal GetPropertyNames(v8::Local obj) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(obj->GetPropertyNames(isolate->GetCurrentContext()) - .FromMaybe(v8::Local())); -} - -inline -MaybeLocal GetOwnPropertyNames(v8::Local obj) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(obj->GetOwnPropertyNames(isolate->GetCurrentContext()) - .FromMaybe(v8::Local())); -} - -inline Maybe SetPrototype( - v8::Local obj - , v8::Local prototype) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return obj->SetPrototype(isolate->GetCurrentContext(), prototype); -} - -inline MaybeLocal ObjectProtoToString( - v8::Local obj) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(obj->ObjectProtoToString(isolate->GetCurrentContext()) - .FromMaybe(v8::Local())); -} - -inline Maybe HasOwnProperty( - v8::Local obj - , v8::Local key) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return obj->HasOwnProperty(isolate->GetCurrentContext(), key); -} - -inline Maybe HasRealNamedProperty( - v8::Local obj - , v8::Local key) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return obj->HasRealNamedProperty(isolate->GetCurrentContext(), key); -} - -inline Maybe HasRealIndexedProperty( - v8::Local obj - , uint32_t index) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return obj->HasRealIndexedProperty(isolate->GetCurrentContext(), index); -} - -inline Maybe HasRealNamedCallbackProperty( - v8::Local obj - , v8::Local key) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return obj->HasRealNamedCallbackProperty(isolate->GetCurrentContext(), key); -} - -inline MaybeLocal GetRealNamedPropertyInPrototypeChain( - v8::Local obj - , v8::Local key) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(obj->GetRealNamedPropertyInPrototypeChain( - isolate->GetCurrentContext(), key) - .FromMaybe(v8::Local())); -} - -inline MaybeLocal GetRealNamedProperty( - v8::Local obj - , v8::Local key) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape( - obj->GetRealNamedProperty(isolate->GetCurrentContext(), key) - .FromMaybe(v8::Local())); -} - -inline MaybeLocal CallAsFunction( - v8::Local obj - , v8::Local recv - , int argc - , v8::Local argv[]) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape( - obj->CallAsFunction(isolate->GetCurrentContext(), recv, argc, argv) - .FromMaybe(v8::Local())); -} - -inline MaybeLocal CallAsConstructor( - v8::Local obj - , int argc, v8::Local argv[]) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape( - obj->CallAsConstructor(isolate->GetCurrentContext(), argc, argv) - .FromMaybe(v8::Local())); -} - -inline -MaybeLocal GetSourceLine(v8::Local msg) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(msg->GetSourceLine(isolate->GetCurrentContext()) - .FromMaybe(v8::Local())); -} - -inline Maybe GetLineNumber(v8::Local msg) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return msg->GetLineNumber(isolate->GetCurrentContext()); -} - -inline Maybe GetStartColumn(v8::Local msg) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return msg->GetStartColumn(isolate->GetCurrentContext()); -} - -inline Maybe GetEndColumn(v8::Local msg) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - return msg->GetEndColumn(isolate->GetCurrentContext()); -} - -inline MaybeLocal CloneElementAt( - v8::Local array - , uint32_t index) { -#if (NODE_MODULE_VERSION >= NODE_6_0_MODULE_VERSION) - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - v8::Local context = isolate->GetCurrentContext(); - v8::Local elem; - v8::Local obj; - if (!array->Get(context, index).ToLocal(&elem)) { - return scope.Escape(obj); - } - if (!elem->ToObject(context).ToLocal(&obj)) { - return scope.Escape(v8::Local()); - } - return scope.Escape(obj->Clone()); -#else - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(array->CloneElementAt(isolate->GetCurrentContext(), index) - .FromMaybe(v8::Local())); -#endif -} - -inline MaybeLocal Call( - v8::Local fun - , v8::Local recv - , int argc - , v8::Local argv[]) { - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - return scope.Escape(fun->Call(isolate->GetCurrentContext(), recv, argc, argv) - .FromMaybe(v8::Local())); -} - -#endif // NAN_MAYBE_43_INL_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_maybe_pre_43_inl.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_maybe_pre_43_inl.h deleted file mode 100644 index 83325ae0..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_maybe_pre_43_inl.h +++ /dev/null @@ -1,268 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_MAYBE_PRE_43_INL_H_ -#define NAN_MAYBE_PRE_43_INL_H_ - -template -class MaybeLocal { - public: - inline MaybeLocal() : val_(v8::Local()) {} - - template -# if NODE_MODULE_VERSION >= NODE_0_12_MODULE_VERSION - inline - MaybeLocal(v8::Local that) : val_(that) {} // NOLINT(runtime/explicit) -# else - inline - MaybeLocal(v8::Local that) : // NOLINT(runtime/explicit) - val_(*reinterpret_cast*>(&that)) {} -# endif - - inline bool IsEmpty() const { return val_.IsEmpty(); } - - template - inline bool ToLocal(v8::Local *out) const { - *out = val_; - return !IsEmpty(); - } - - inline v8::Local ToLocalChecked() const { -#if defined(V8_ENABLE_CHECKS) - assert(!IsEmpty() && "ToLocalChecked is Empty"); -#endif // V8_ENABLE_CHECKS - return val_; - } - - template - inline v8::Local FromMaybe(v8::Local default_value) const { - return IsEmpty() ? default_value : v8::Local(val_); - } - - private: - v8::Local val_; -}; - -inline -MaybeLocal ToDetailString(v8::Handle val) { - return MaybeLocal(val->ToDetailString()); -} - -inline -MaybeLocal ToArrayIndex(v8::Handle val) { - return MaybeLocal(val->ToArrayIndex()); -} - -inline -Maybe Equals(v8::Handle a, v8::Handle(b)) { - return Just(a->Equals(b)); -} - -inline -MaybeLocal NewInstance(v8::Handle h) { - return MaybeLocal(h->NewInstance()); -} - -inline -MaybeLocal NewInstance( - v8::Local h - , int argc - , v8::Local argv[]) { - return MaybeLocal(h->NewInstance(argc, argv)); -} - -inline -MaybeLocal NewInstance(v8::Handle h) { - return MaybeLocal(h->NewInstance()); -} - -inline -MaybeLocal GetFunction(v8::Handle t) { - return MaybeLocal(t->GetFunction()); -} - -inline Maybe Set( - v8::Handle obj - , v8::Handle key - , v8::Handle value) { - return Just(obj->Set(key, value)); -} - -inline Maybe Set( - v8::Handle obj - , uint32_t index - , v8::Handle value) { - return Just(obj->Set(index, value)); -} - -#include "nan_define_own_property_helper.h" // NOLINT(build/include) - -inline Maybe DefineOwnProperty( - v8::Handle obj - , v8::Handle key - , v8::Handle value - , v8::PropertyAttribute attribs = v8::None) { - v8::PropertyAttribute current = obj->GetPropertyAttributes(key); - return imp::DefineOwnPropertyHelper(current, obj, key, value, attribs); -} - -NAN_DEPRECATED inline Maybe ForceSet( - v8::Handle obj - , v8::Handle key - , v8::Handle value - , v8::PropertyAttribute attribs = v8::None) { - return Just(obj->ForceSet(key, value, attribs)); -} - -inline MaybeLocal Get( - v8::Handle obj - , v8::Handle key) { - return MaybeLocal(obj->Get(key)); -} - -inline MaybeLocal Get( - v8::Handle obj - , uint32_t index) { - return MaybeLocal(obj->Get(index)); -} - -inline Maybe GetPropertyAttributes( - v8::Handle obj - , v8::Handle key) { - return Just(obj->GetPropertyAttributes(key)); -} - -inline Maybe Has( - v8::Handle obj - , v8::Handle key) { - return Just(obj->Has(key)); -} - -inline Maybe Has( - v8::Handle obj - , uint32_t index) { - return Just(obj->Has(index)); -} - -inline Maybe Delete( - v8::Handle obj - , v8::Handle key) { - return Just(obj->Delete(key)); -} - -inline Maybe Delete( - v8::Handle obj - , uint32_t index) { - return Just(obj->Delete(index)); -} - -inline -MaybeLocal GetPropertyNames(v8::Handle obj) { - return MaybeLocal(obj->GetPropertyNames()); -} - -inline -MaybeLocal GetOwnPropertyNames(v8::Handle obj) { - return MaybeLocal(obj->GetOwnPropertyNames()); -} - -inline Maybe SetPrototype( - v8::Handle obj - , v8::Handle prototype) { - return Just(obj->SetPrototype(prototype)); -} - -inline MaybeLocal ObjectProtoToString( - v8::Handle obj) { - return MaybeLocal(obj->ObjectProtoToString()); -} - -inline Maybe HasOwnProperty( - v8::Handle obj - , v8::Handle key) { - return Just(obj->HasOwnProperty(key)); -} - -inline Maybe HasRealNamedProperty( - v8::Handle obj - , v8::Handle key) { - return Just(obj->HasRealNamedProperty(key)); -} - -inline Maybe HasRealIndexedProperty( - v8::Handle obj - , uint32_t index) { - return Just(obj->HasRealIndexedProperty(index)); -} - -inline Maybe HasRealNamedCallbackProperty( - v8::Handle obj - , v8::Handle key) { - return Just(obj->HasRealNamedCallbackProperty(key)); -} - -inline MaybeLocal GetRealNamedPropertyInPrototypeChain( - v8::Handle obj - , v8::Handle key) { - return MaybeLocal( - obj->GetRealNamedPropertyInPrototypeChain(key)); -} - -inline MaybeLocal GetRealNamedProperty( - v8::Handle obj - , v8::Handle key) { - return MaybeLocal(obj->GetRealNamedProperty(key)); -} - -inline MaybeLocal CallAsFunction( - v8::Handle obj - , v8::Handle recv - , int argc - , v8::Handle argv[]) { - return MaybeLocal(obj->CallAsFunction(recv, argc, argv)); -} - -inline MaybeLocal CallAsConstructor( - v8::Handle obj - , int argc - , v8::Local argv[]) { - return MaybeLocal(obj->CallAsConstructor(argc, argv)); -} - -inline -MaybeLocal GetSourceLine(v8::Handle msg) { - return MaybeLocal(msg->GetSourceLine()); -} - -inline Maybe GetLineNumber(v8::Handle msg) { - return Just(msg->GetLineNumber()); -} - -inline Maybe GetStartColumn(v8::Handle msg) { - return Just(msg->GetStartColumn()); -} - -inline Maybe GetEndColumn(v8::Handle msg) { - return Just(msg->GetEndColumn()); -} - -inline MaybeLocal CloneElementAt( - v8::Handle array - , uint32_t index) { - return MaybeLocal(array->CloneElementAt(index)); -} - -inline MaybeLocal Call( - v8::Local fun - , v8::Local recv - , int argc - , v8::Local argv[]) { - return MaybeLocal(fun->Call(recv, argc, argv)); -} - -#endif // NAN_MAYBE_PRE_43_INL_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_new.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_new.h deleted file mode 100644 index cdf8bbe4..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_new.h +++ /dev/null @@ -1,340 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_NEW_H_ -#define NAN_NEW_H_ - -namespace imp { // scnr - -// TODO(agnat): Generalize -template v8::Local To(v8::Local i); - -template <> -inline -v8::Local -To(v8::Local i) { - return Nan::To(i).ToLocalChecked(); -} - -template <> -inline -v8::Local -To(v8::Local i) { - return Nan::To(i).ToLocalChecked(); -} - -template <> -inline -v8::Local -To(v8::Local i) { - return Nan::To(i).ToLocalChecked(); -} - -template struct FactoryBase { - typedef v8::Local return_t; -}; - -template struct MaybeFactoryBase { - typedef MaybeLocal return_t; -}; - -template struct Factory; - -template <> -struct Factory : FactoryBase { - static inline return_t New(); - static inline return_t New(int length); -}; - -template <> -struct Factory : FactoryBase { - static inline return_t New(bool value); -}; - -template <> -struct Factory : FactoryBase { - static inline return_t New(bool value); -}; - -template <> -struct Factory : FactoryBase { - static inline - return_t - New( v8::ExtensionConfiguration* extensions = NULL - , v8::Local tmpl = v8::Local() - , v8::Local obj = v8::Local()); -}; - -template <> -struct Factory : MaybeFactoryBase { - static inline return_t New(double value); -}; - -template <> -struct Factory : FactoryBase { - static inline return_t New(void *value); -}; - -template <> -struct Factory : FactoryBase { - static inline - return_t - New( FunctionCallback callback - , v8::Local data = v8::Local()); -}; - -template <> -struct Factory : FactoryBase { - static inline - return_t - New( FunctionCallback callback = NULL - , v8::Local data = v8::Local() - , v8::Local signature = v8::Local()); -}; - -template <> -struct Factory : FactoryBase { - static inline return_t New(double value); -}; - -template <> -struct Factory : FactoryBase { - static inline return_t New(double value); -}; - -template -struct IntegerFactory : FactoryBase { - typedef typename FactoryBase::return_t return_t; - static inline return_t New(int32_t value); - static inline return_t New(uint32_t value); -}; - -template <> -struct Factory : IntegerFactory {}; - -template <> -struct Factory : IntegerFactory {}; - -template <> -struct Factory : FactoryBase { - static inline return_t New(int32_t value); - static inline return_t New(uint32_t value); -}; - -template <> -struct Factory : FactoryBase { - static inline return_t New(); -}; - -template <> -struct Factory : FactoryBase { - static inline return_t New(); -}; - -template <> -struct Factory : MaybeFactoryBase { - static inline return_t New( - v8::Local pattern, v8::RegExp::Flags flags); -}; - -template <> -struct Factory : MaybeFactoryBase { - static inline return_t New( v8::Local source); - static inline return_t New( v8::Local source - , v8::ScriptOrigin const& origin); -}; - -template <> -struct Factory : FactoryBase { - typedef v8::Local FTH; - static inline return_t New(FTH receiver = FTH()); -}; - -template <> -struct Factory : MaybeFactoryBase { - static inline return_t New(); - static inline return_t New(const char *value, int length = -1); - static inline return_t New(const uint16_t *value, int length = -1); - static inline return_t New(std::string const& value); - - static inline return_t New(v8::String::ExternalStringResource * value); - static inline return_t New(ExternalOneByteStringResource * value); -}; - -template <> -struct Factory : FactoryBase { - static inline return_t New(v8::Local value); -}; - -} // end of namespace imp - -#if (NODE_MODULE_VERSION >= 12) - -namespace imp { - -template <> -struct Factory : MaybeFactoryBase { - static inline return_t New( v8::Local source); - static inline return_t New( v8::Local source - , v8::ScriptOrigin const& origin); -}; - -} // end of namespace imp - -# include "nan_implementation_12_inl.h" - -#else // NODE_MODULE_VERSION >= 12 - -# include "nan_implementation_pre_12_inl.h" - -#endif - -//=== API ====================================================================== - -template -typename imp::Factory::return_t -New() { - return imp::Factory::New(); -} - -template -typename imp::Factory::return_t -New(A0 arg0) { - return imp::Factory::New(arg0); -} - -template -typename imp::Factory::return_t -New(A0 arg0, A1 arg1) { - return imp::Factory::New(arg0, arg1); -} - -template -typename imp::Factory::return_t -New(A0 arg0, A1 arg1, A2 arg2) { - return imp::Factory::New(arg0, arg1, arg2); -} - -template -typename imp::Factory::return_t -New(A0 arg0, A1 arg1, A2 arg2, A3 arg3) { - return imp::Factory::New(arg0, arg1, arg2, arg3); -} - -// Note(agnat): When passing overloaded function pointers to template functions -// as generic arguments the compiler needs help in picking the right overload. -// These two functions handle New and New with -// all argument variations. - -// v8::Function and v8::FunctionTemplate with one or two arguments -template -typename imp::Factory::return_t -New( FunctionCallback callback - , v8::Local data = v8::Local()) { - return imp::Factory::New(callback, data); -} - -// v8::Function and v8::FunctionTemplate with three arguments -template -typename imp::Factory::return_t -New( FunctionCallback callback - , v8::Local data = v8::Local() - , A2 a2 = A2()) { - return imp::Factory::New(callback, data, a2); -} - -// Convenience - -#if NODE_MODULE_VERSION < IOJS_3_0_MODULE_VERSION -template inline v8::Local New(v8::Handle h); -#endif - -#if NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION -template - inline v8::Local New(v8::Persistent const& p); -#else -template inline v8::Local New(v8::Persistent const& p); -#endif -template -inline v8::Local New(Persistent const& p); -template -inline v8::Local New(Global const& p); - -inline -imp::Factory::return_t -New(bool value) { - return New(value); -} - -inline -imp::Factory::return_t -New(int32_t value) { - return New(value); -} - -inline -imp::Factory::return_t -New(uint32_t value) { - return New(value); -} - -inline -imp::Factory::return_t -New(double value) { - return New(value); -} - -inline -imp::Factory::return_t -New(std::string const& value) { // NOLINT(build/include_what_you_use) - return New(value); -} - -inline -imp::Factory::return_t -New(const char * value, int length) { - return New(value, length); -} - -inline -imp::Factory::return_t -New(const uint16_t * value, int length) { - return New(value, length); -} - -inline -imp::Factory::return_t -New(const char * value) { - return New(value); -} - -inline -imp::Factory::return_t -New(const uint16_t * value) { - return New(value); -} - -inline -imp::Factory::return_t -New(v8::String::ExternalStringResource * value) { - return New(value); -} - -inline -imp::Factory::return_t -New(ExternalOneByteStringResource * value) { - return New(value); -} - -inline -imp::Factory::return_t -New(v8::Local pattern, v8::RegExp::Flags flags) { - return New(pattern, flags); -} - -#endif // NAN_NEW_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_object_wrap.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_object_wrap.h deleted file mode 100644 index 78712f9c..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_object_wrap.h +++ /dev/null @@ -1,156 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_OBJECT_WRAP_H_ -#define NAN_OBJECT_WRAP_H_ - -class ObjectWrap { - public: - ObjectWrap() { - refs_ = 0; - } - - - virtual ~ObjectWrap() { - if (persistent().IsEmpty()) { - return; - } - - persistent().ClearWeak(); - persistent().Reset(); - } - - - template - static inline T* Unwrap(v8::Local object) { - assert(!object.IsEmpty()); - assert(object->InternalFieldCount() > 0); - // Cast to ObjectWrap before casting to T. A direct cast from void - // to T won't work right when T has more than one base class. - void* ptr = GetInternalFieldPointer(object, 0); - ObjectWrap* wrap = static_cast(ptr); - return static_cast(wrap); - } - - - inline v8::Local handle() const { - return New(handle_); - } - - - inline Persistent& persistent() { - return handle_; - } - - - protected: - inline void Wrap(v8::Local object) { - assert(persistent().IsEmpty()); - assert(object->InternalFieldCount() > 0); - SetInternalFieldPointer(object, 0, this); - persistent().Reset(object); - MakeWeak(); - } - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) - - inline void MakeWeak() { - persistent().v8::PersistentBase::SetWeak( - this, WeakCallback, v8::WeakCallbackType::kParameter); -#if NODE_MAJOR_VERSION < 10 - // FIXME(bnoordhuis) Probably superfluous in older Node.js versions too. - persistent().MarkIndependent(); -#endif - } - -#elif NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - - inline void MakeWeak() { - persistent().v8::PersistentBase::SetWeak(this, WeakCallback); - persistent().MarkIndependent(); - } - -#else - - inline void MakeWeak() { - persistent().persistent.MakeWeak(this, WeakCallback); - persistent().MarkIndependent(); - } - -#endif - - /* Ref() marks the object as being attached to an event loop. - * Refed objects will not be garbage collected, even if - * all references are lost. - */ - virtual void Ref() { - assert(!persistent().IsEmpty()); - persistent().ClearWeak(); - refs_++; - } - - /* Unref() marks an object as detached from the event loop. This is its - * default state. When an object with a "weak" reference changes from - * attached to detached state it will be freed. Be careful not to access - * the object after making this call as it might be gone! - * (A "weak reference" means an object that only has a - * persistent handle.) - * - * DO NOT CALL THIS FROM DESTRUCTOR - */ - virtual void Unref() { - assert(!persistent().IsEmpty()); - assert(!persistent().IsWeak()); - assert(refs_ > 0); - if (--refs_ == 0) - MakeWeak(); - } - - int refs_; // ro - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(ObjectWrap) -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) - - static void - WeakCallback(v8::WeakCallbackInfo const& info) { - ObjectWrap* wrap = info.GetParameter(); - assert(wrap->refs_ == 0); - wrap->handle_.Reset(); - delete wrap; - } - -#elif NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - - static void - WeakCallback(v8::WeakCallbackData const& data) { - ObjectWrap* wrap = data.GetParameter(); - assert(wrap->refs_ == 0); - assert(wrap->handle_.IsNearDeath()); - wrap->handle_.Reset(); - delete wrap; - } - -#else - - static void WeakCallback(v8::Persistent value, void *data) { - ObjectWrap *wrap = static_cast(data); - assert(wrap->refs_ == 0); - assert(wrap->handle_.IsNearDeath()); - wrap->handle_.Reset(); - delete wrap; - } - -#endif - Persistent handle_; -}; - - -#endif // NAN_OBJECT_WRAP_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_persistent_12_inl.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_persistent_12_inl.h deleted file mode 100644 index d9649e86..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_persistent_12_inl.h +++ /dev/null @@ -1,132 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_PERSISTENT_12_INL_H_ -#define NAN_PERSISTENT_12_INL_H_ - -template class Persistent : - public v8::Persistent { - public: - inline Persistent() : v8::Persistent() {} - - template inline Persistent(v8::Local that) : - v8::Persistent(v8::Isolate::GetCurrent(), that) {} - - template - inline - Persistent(const v8::Persistent &that) : // NOLINT(runtime/explicit) - v8::Persistent(v8::Isolate::GetCurrent(), that) {} - - inline void Reset() { v8::PersistentBase::Reset(); } - - template - inline void Reset(const v8::Local &other) { - v8::PersistentBase::Reset(v8::Isolate::GetCurrent(), other); - } - - template - inline void Reset(const v8::PersistentBase &other) { - v8::PersistentBase::Reset(v8::Isolate::GetCurrent(), other); - } - - template - inline void SetWeak( - P *parameter - , typename WeakCallbackInfo

::Callback callback - , WeakCallbackType type); - - private: - inline T *operator*() const { return *PersistentBase::persistent; } - - template - inline void Copy(const Persistent &that) { - TYPE_CHECK(T, S); - - this->Reset(); - - if (!that.IsEmpty()) { - this->Reset(that); - M::Copy(that, this); - } - } -}; - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) -template -class Global : public v8::Global { - public: - inline Global() : v8::Global() {} - - template inline Global(v8::Local that) : - v8::Global(v8::Isolate::GetCurrent(), that) {} - - template - inline - Global(const v8::PersistentBase &that) : // NOLINT(runtime/explicit) - v8::Global(v8::Isolate::GetCurrent(), that) {} - - inline void Reset() { v8::PersistentBase::Reset(); } - - template - inline void Reset(const v8::Local &other) { - v8::PersistentBase::Reset(v8::Isolate::GetCurrent(), other); - } - - template - inline void Reset(const v8::PersistentBase &other) { - v8::PersistentBase::Reset(v8::Isolate::GetCurrent(), other); - } - - template - inline void SetWeak( - P *parameter - , typename WeakCallbackInfo

::Callback callback - , WeakCallbackType type) { - reinterpret_cast*>(this)->SetWeak( - parameter, callback, type); - } -}; -#else -template -class Global : public v8::UniquePersistent { - public: - inline Global() : v8::UniquePersistent() {} - - template inline Global(v8::Local that) : - v8::UniquePersistent(v8::Isolate::GetCurrent(), that) {} - - template - inline - Global(const v8::PersistentBase &that) : // NOLINT(runtime/explicit) - v8::UniquePersistent(v8::Isolate::GetCurrent(), that) {} - - inline void Reset() { v8::PersistentBase::Reset(); } - - template - inline void Reset(const v8::Local &other) { - v8::PersistentBase::Reset(v8::Isolate::GetCurrent(), other); - } - - template - inline void Reset(const v8::PersistentBase &other) { - v8::PersistentBase::Reset(v8::Isolate::GetCurrent(), other); - } - - template - inline void SetWeak( - P *parameter - , typename WeakCallbackInfo

::Callback callback - , WeakCallbackType type) { - reinterpret_cast*>(this)->SetWeak( - parameter, callback, type); - } -}; -#endif - -#endif // NAN_PERSISTENT_12_INL_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_persistent_pre_12_inl.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_persistent_pre_12_inl.h deleted file mode 100644 index 4c9c59da..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_persistent_pre_12_inl.h +++ /dev/null @@ -1,242 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_PERSISTENT_PRE_12_INL_H_ -#define NAN_PERSISTENT_PRE_12_INL_H_ - -template -class PersistentBase { - v8::Persistent persistent; - template - friend v8::Local New(const PersistentBase &p); - template - friend v8::Local New(const Persistent &p); - template - friend v8::Local New(const Global &p); - template friend class ReturnValue; - - public: - inline PersistentBase() : - persistent() {} - - inline void Reset() { - persistent.Dispose(); - persistent.Clear(); - } - - template - inline void Reset(const v8::Local &other) { - TYPE_CHECK(T, S); - - if (!persistent.IsEmpty()) { - persistent.Dispose(); - } - - if (other.IsEmpty()) { - persistent.Clear(); - } else { - persistent = v8::Persistent::New(other); - } - } - - template - inline void Reset(const PersistentBase &other) { - TYPE_CHECK(T, S); - - if (!persistent.IsEmpty()) { - persistent.Dispose(); - } - - if (other.IsEmpty()) { - persistent.Clear(); - } else { - persistent = v8::Persistent::New(other.persistent); - } - } - - inline bool IsEmpty() const { return persistent.IsEmpty(); } - - inline void Empty() { persistent.Clear(); } - - template - inline bool operator==(const PersistentBase &that) const { - return this->persistent == that.persistent; - } - - template - inline bool operator==(const v8::Local &that) const { - return this->persistent == that; - } - - template - inline bool operator!=(const PersistentBase &that) const { - return !operator==(that); - } - - template - inline bool operator!=(const v8::Local &that) const { - return !operator==(that); - } - - template - inline void SetWeak( - P *parameter - , typename WeakCallbackInfo

::Callback callback - , WeakCallbackType type); - - inline void ClearWeak() { persistent.ClearWeak(); } - - inline void MarkIndependent() { persistent.MarkIndependent(); } - - inline bool IsIndependent() const { return persistent.IsIndependent(); } - - inline bool IsNearDeath() const { return persistent.IsNearDeath(); } - - inline bool IsWeak() const { return persistent.IsWeak(); } - - private: - inline explicit PersistentBase(v8::Persistent that) : - persistent(that) { } - inline explicit PersistentBase(T *val) : persistent(val) {} - template friend class Persistent; - template friend class Global; - friend class ObjectWrap; -}; - -template -class NonCopyablePersistentTraits { - public: - typedef Persistent > - NonCopyablePersistent; - static const bool kResetInDestructor = false; - template - inline static void Copy(const Persistent &source, - NonCopyablePersistent *dest) { - Uncompilable(); - } - - template inline static void Uncompilable() { - TYPE_CHECK(O, v8::Primitive); - } -}; - -template -struct CopyablePersistentTraits { - typedef Persistent > CopyablePersistent; - static const bool kResetInDestructor = true; - template - static inline void Copy(const Persistent &source, - CopyablePersistent *dest) {} -}; - -template class Persistent : - public PersistentBase { - public: - inline Persistent() {} - - template inline Persistent(v8::Handle that) - : PersistentBase(v8::Persistent::New(that)) { - TYPE_CHECK(T, S); - } - - inline Persistent(const Persistent &that) : PersistentBase() { - Copy(that); - } - - template - inline Persistent(const Persistent &that) : - PersistentBase() { - Copy(that); - } - - inline Persistent &operator=(const Persistent &that) { - Copy(that); - return *this; - } - - template - inline Persistent &operator=(const Persistent &that) { - Copy(that); - return *this; - } - - inline ~Persistent() { - if (M::kResetInDestructor) this->Reset(); - } - - private: - inline T *operator*() const { return *PersistentBase::persistent; } - - template - inline void Copy(const Persistent &that) { - TYPE_CHECK(T, S); - - this->Reset(); - - if (!that.IsEmpty()) { - this->persistent = v8::Persistent::New(that.persistent); - M::Copy(that, this); - } - } -}; - -template -class Global : public PersistentBase { - struct RValue { - inline explicit RValue(Global* obj) : object(obj) {} - Global* object; - }; - - public: - inline Global() : PersistentBase(0) { } - - template - inline Global(v8::Local that) // NOLINT(runtime/explicit) - : PersistentBase(v8::Persistent::New(that)) { - TYPE_CHECK(T, S); - } - - template - inline Global(const PersistentBase &that) // NOLINT(runtime/explicit) - : PersistentBase(that) { - TYPE_CHECK(T, S); - } - /** - * Move constructor. - */ - inline Global(RValue rvalue) // NOLINT(runtime/explicit) - : PersistentBase(rvalue.object->persistent) { - rvalue.object->Reset(); - } - inline ~Global() { this->Reset(); } - /** - * Move via assignment. - */ - template - inline Global &operator=(Global rhs) { - TYPE_CHECK(T, S); - this->Reset(rhs.persistent); - rhs.Reset(); - return *this; - } - /** - * Cast operator for moves. - */ - inline operator RValue() { return RValue(this); } - /** - * Pass allows returning uniques from functions, etc. - */ - Global Pass() { return Global(RValue(this)); } - - private: - Global(Global &); - void operator=(Global &); - template friend class ReturnValue; -}; - -#endif // NAN_PERSISTENT_PRE_12_INL_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_private.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_private.h deleted file mode 100644 index 15f44cc8..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_private.h +++ /dev/null @@ -1,73 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_PRIVATE_H_ -#define NAN_PRIVATE_H_ - -inline Maybe -HasPrivate(v8::Local object, v8::Local key) { - HandleScope scope; -#if NODE_MODULE_VERSION >= NODE_6_0_MODULE_VERSION - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::Local context = isolate->GetCurrentContext(); - v8::Local private_key = v8::Private::ForApi(isolate, key); - return object->HasPrivate(context, private_key); -#else - return Just(!object->GetHiddenValue(key).IsEmpty()); -#endif -} - -inline MaybeLocal -GetPrivate(v8::Local object, v8::Local key) { -#if NODE_MODULE_VERSION >= NODE_6_0_MODULE_VERSION - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - v8::Local context = isolate->GetCurrentContext(); - v8::Local private_key = v8::Private::ForApi(isolate, key); - v8::MaybeLocal v = object->GetPrivate(context, private_key); - return scope.Escape(v.ToLocalChecked()); -#else - EscapableHandleScope scope; - v8::Local v = object->GetHiddenValue(key); - if (v.IsEmpty()) { - v = Undefined(); - } - return scope.Escape(v); -#endif -} - -inline Maybe SetPrivate( - v8::Local object, - v8::Local key, - v8::Local value) { -#if NODE_MODULE_VERSION >= NODE_6_0_MODULE_VERSION - HandleScope scope; - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::Local context = isolate->GetCurrentContext(); - v8::Local private_key = v8::Private::ForApi(isolate, key); - return object->SetPrivate(context, private_key, value); -#else - return Just(object->SetHiddenValue(key, value)); -#endif -} - -inline Maybe DeletePrivate( - v8::Local object, - v8::Local key) { -#if NODE_MODULE_VERSION >= NODE_6_0_MODULE_VERSION - HandleScope scope; - v8::Isolate *isolate = v8::Isolate::GetCurrent(); - v8::Local private_key = v8::Private::ForApi(isolate, key); - return object->DeletePrivate(isolate->GetCurrentContext(), private_key); -#else - return Just(object->DeleteHiddenValue(key)); -#endif -} - -#endif // NAN_PRIVATE_H_ - diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_scriptorigin.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_scriptorigin.h deleted file mode 100644 index ce79cdf8..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_scriptorigin.h +++ /dev/null @@ -1,76 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2021 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_SCRIPTORIGIN_H_ -#define NAN_SCRIPTORIGIN_H_ - -class ScriptOrigin : public v8::ScriptOrigin { - public: -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 9 || \ - (V8_MAJOR_VERSION == 9 && (defined(V8_MINOR_VERSION) && (V8_MINOR_VERSION > 0\ - || (V8_MINOR_VERSION == 0 && defined(V8_BUILD_NUMBER) \ - && V8_BUILD_NUMBER >= 1))))) - explicit ScriptOrigin(v8::Local name) : - v8::ScriptOrigin(v8::Isolate::GetCurrent(), name) {} - - ScriptOrigin(v8::Local name - , v8::Local line) : - v8::ScriptOrigin(v8::Isolate::GetCurrent() - , name - , To(line).FromMaybe(0)) {} - - ScriptOrigin(v8::Local name - , v8::Local line - , v8::Local column) : - v8::ScriptOrigin(v8::Isolate::GetCurrent() - , name - , To(line).FromMaybe(0) - , To(column).FromMaybe(0)) {} -#elif defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 8 || \ - (V8_MAJOR_VERSION == 8 && (defined(V8_MINOR_VERSION) && (V8_MINOR_VERSION > 9\ - || (V8_MINOR_VERSION == 9 && defined(V8_BUILD_NUMBER) \ - && V8_BUILD_NUMBER >= 45))))) - explicit ScriptOrigin(v8::Local name) : v8::ScriptOrigin(name) {} - - ScriptOrigin(v8::Local name - , v8::Local line) : - v8::ScriptOrigin(name, To(line).FromMaybe(0)) {} - - ScriptOrigin(v8::Local name - , v8::Local line - , v8::Local column) : - v8::ScriptOrigin(name - , To(line).FromMaybe(0) - , To(column).FromMaybe(0)) {} -#else - explicit ScriptOrigin(v8::Local name) : v8::ScriptOrigin(name) {} - - ScriptOrigin(v8::Local name - , v8::Local line) : v8::ScriptOrigin(name, line) {} - - ScriptOrigin(v8::Local name - , v8::Local line - , v8::Local column) : - v8::ScriptOrigin(name, line, column) {} -#endif - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 8 || \ - (V8_MAJOR_VERSION == 8 && (defined(V8_MINOR_VERSION) && (V8_MINOR_VERSION > 9\ - || (V8_MINOR_VERSION == 9 && defined(V8_BUILD_NUMBER) \ - && V8_BUILD_NUMBER >= 45))))) - v8::Local ResourceLineOffset() const { - return New(LineOffset()); - } - - v8::Local ResourceColumnOffset() const { - return New(ColumnOffset()); - } -#endif -}; - -#endif // NAN_SCRIPTORIGIN_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_string_bytes.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_string_bytes.h deleted file mode 100644 index a2e6437d..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_string_bytes.h +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright Joyent, Inc. and other Node contributors. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to permit -// persons to whom the Software is furnished to do so, subject to the -// following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -// USE OR OTHER DEALINGS IN THE SOFTWARE. - -#ifndef NAN_STRING_BYTES_H_ -#define NAN_STRING_BYTES_H_ - -// Decodes a v8::Local or Buffer to a raw char* - -namespace imp { - -using v8::Local; -using v8::Object; -using v8::String; -using v8::Value; - - -//// Base 64 //// - -#define base64_encoded_size(size) ((size + 2 - ((size + 2) % 3)) / 3 * 4) - - - -//// HEX //// - -static bool contains_non_ascii_slow(const char* buf, size_t len) { - for (size_t i = 0; i < len; ++i) { - if (buf[i] & 0x80) return true; - } - return false; -} - - -static bool contains_non_ascii(const char* src, size_t len) { - if (len < 16) { - return contains_non_ascii_slow(src, len); - } - - const unsigned bytes_per_word = sizeof(void*); - const unsigned align_mask = bytes_per_word - 1; - const unsigned unaligned = reinterpret_cast(src) & align_mask; - - if (unaligned > 0) { - const unsigned n = bytes_per_word - unaligned; - if (contains_non_ascii_slow(src, n)) return true; - src += n; - len -= n; - } - - -#if defined(__x86_64__) || defined(_WIN64) - const uintptr_t mask = 0x8080808080808080ll; -#else - const uintptr_t mask = 0x80808080l; -#endif - - const uintptr_t* srcw = reinterpret_cast(src); - - for (size_t i = 0, n = len / bytes_per_word; i < n; ++i) { - if (srcw[i] & mask) return true; - } - - const unsigned remainder = len & align_mask; - if (remainder > 0) { - const size_t offset = len - remainder; - if (contains_non_ascii_slow(src + offset, remainder)) return true; - } - - return false; -} - - -static void force_ascii_slow(const char* src, char* dst, size_t len) { - for (size_t i = 0; i < len; ++i) { - dst[i] = src[i] & 0x7f; - } -} - - -static void force_ascii(const char* src, char* dst, size_t len) { - if (len < 16) { - force_ascii_slow(src, dst, len); - return; - } - - const unsigned bytes_per_word = sizeof(void*); - const unsigned align_mask = bytes_per_word - 1; - const unsigned src_unalign = reinterpret_cast(src) & align_mask; - const unsigned dst_unalign = reinterpret_cast(dst) & align_mask; - - if (src_unalign > 0) { - if (src_unalign == dst_unalign) { - const unsigned unalign = bytes_per_word - src_unalign; - force_ascii_slow(src, dst, unalign); - src += unalign; - dst += unalign; - len -= src_unalign; - } else { - force_ascii_slow(src, dst, len); - return; - } - } - -#if defined(__x86_64__) || defined(_WIN64) - const uintptr_t mask = ~0x8080808080808080ll; -#else - const uintptr_t mask = ~0x80808080l; -#endif - - const uintptr_t* srcw = reinterpret_cast(src); - uintptr_t* dstw = reinterpret_cast(dst); - - for (size_t i = 0, n = len / bytes_per_word; i < n; ++i) { - dstw[i] = srcw[i] & mask; - } - - const unsigned remainder = len & align_mask; - if (remainder > 0) { - const size_t offset = len - remainder; - force_ascii_slow(src + offset, dst + offset, remainder); - } -} - - -static size_t base64_encode(const char* src, - size_t slen, - char* dst, - size_t dlen) { - // We know how much we'll write, just make sure that there's space. - assert(dlen >= base64_encoded_size(slen) && - "not enough space provided for base64 encode"); - - dlen = base64_encoded_size(slen); - - unsigned a; - unsigned b; - unsigned c; - unsigned i; - unsigned k; - unsigned n; - - static const char table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "abcdefghijklmnopqrstuvwxyz" - "0123456789+/"; - - i = 0; - k = 0; - n = slen / 3 * 3; - - while (i < n) { - a = src[i + 0] & 0xff; - b = src[i + 1] & 0xff; - c = src[i + 2] & 0xff; - - dst[k + 0] = table[a >> 2]; - dst[k + 1] = table[((a & 3) << 4) | (b >> 4)]; - dst[k + 2] = table[((b & 0x0f) << 2) | (c >> 6)]; - dst[k + 3] = table[c & 0x3f]; - - i += 3; - k += 4; - } - - if (n != slen) { - switch (slen - n) { - case 1: - a = src[i + 0] & 0xff; - dst[k + 0] = table[a >> 2]; - dst[k + 1] = table[(a & 3) << 4]; - dst[k + 2] = '='; - dst[k + 3] = '='; - break; - - case 2: - a = src[i + 0] & 0xff; - b = src[i + 1] & 0xff; - dst[k + 0] = table[a >> 2]; - dst[k + 1] = table[((a & 3) << 4) | (b >> 4)]; - dst[k + 2] = table[(b & 0x0f) << 2]; - dst[k + 3] = '='; - break; - } - } - - return dlen; -} - - -static size_t hex_encode(const char* src, size_t slen, char* dst, size_t dlen) { - // We know how much we'll write, just make sure that there's space. - assert(dlen >= slen * 2 && - "not enough space provided for hex encode"); - - dlen = slen * 2; - for (uint32_t i = 0, k = 0; k < dlen; i += 1, k += 2) { - static const char hex[] = "0123456789abcdef"; - uint8_t val = static_cast(src[i]); - dst[k + 0] = hex[val >> 4]; - dst[k + 1] = hex[val & 15]; - } - - return dlen; -} - - - -static Local Encode(const char* buf, - size_t buflen, - enum Encoding encoding) { - assert(buflen <= node::Buffer::kMaxLength); - if (!buflen && encoding != BUFFER) - return New("").ToLocalChecked(); - - Local val; - switch (encoding) { - case BUFFER: - return CopyBuffer(buf, buflen).ToLocalChecked(); - - case ASCII: - if (contains_non_ascii(buf, buflen)) { - char* out = new char[buflen]; - force_ascii(buf, out, buflen); - val = New(out, buflen).ToLocalChecked(); - delete[] out; - } else { - val = New(buf, buflen).ToLocalChecked(); - } - break; - - case UTF8: - val = New(buf, buflen).ToLocalChecked(); - break; - - case BINARY: { - // TODO(isaacs) use ExternalTwoByteString? - const unsigned char *cbuf = reinterpret_cast(buf); - uint16_t * twobytebuf = new uint16_t[buflen]; - for (size_t i = 0; i < buflen; i++) { - // XXX is the following line platform independent? - twobytebuf[i] = cbuf[i]; - } - val = New(twobytebuf, buflen).ToLocalChecked(); - delete[] twobytebuf; - break; - } - - case BASE64: { - size_t dlen = base64_encoded_size(buflen); - char* dst = new char[dlen]; - - size_t written = base64_encode(buf, buflen, dst, dlen); - assert(written == dlen); - - val = New(dst, dlen).ToLocalChecked(); - delete[] dst; - break; - } - - case UCS2: { - const uint16_t* data = reinterpret_cast(buf); - val = New(data, buflen / 2).ToLocalChecked(); - break; - } - - case HEX: { - size_t dlen = buflen * 2; - char* dst = new char[dlen]; - size_t written = hex_encode(buf, buflen, dst, dlen); - assert(written == dlen); - - val = New(dst, dlen).ToLocalChecked(); - delete[] dst; - break; - } - - default: - assert(0 && "unknown encoding"); - break; - } - - return val; -} - -#undef base64_encoded_size - -} // end of namespace imp - -#endif // NAN_STRING_BYTES_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_typedarray_contents.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_typedarray_contents.h deleted file mode 100644 index c6ac8a41..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_typedarray_contents.h +++ /dev/null @@ -1,96 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_TYPEDARRAY_CONTENTS_H_ -#define NAN_TYPEDARRAY_CONTENTS_H_ - -template -class TypedArrayContents { - public: - inline explicit TypedArrayContents(v8::Local from) : - length_(0), data_(NULL) { - HandleScope scope; - - size_t length = 0; - void* data = NULL; - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) - - if (from->IsArrayBufferView()) { - v8::Local array = - v8::Local::Cast(from); - - const size_t byte_length = array->ByteLength(); - const ptrdiff_t byte_offset = array->ByteOffset(); - v8::Local buffer = array->Buffer(); - - length = byte_length / sizeof(T); -// Actually it's 7.9 here but this would lead to ABI issues with Node.js 13 -// using 7.8 till 13.2.0. -#if (V8_MAJOR_VERSION >= 8) - data = static_cast(buffer->GetBackingStore()->Data()) + byte_offset; -#else - data = static_cast(buffer->GetContents().Data()) + byte_offset; -#endif - } - -#else - - if (from->IsObject() && !from->IsNull()) { - v8::Local array = v8::Local::Cast(from); - - MaybeLocal buffer = Get(array, - New("buffer").ToLocalChecked()); - MaybeLocal byte_length = Get(array, - New("byteLength").ToLocalChecked()); - MaybeLocal byte_offset = Get(array, - New("byteOffset").ToLocalChecked()); - - if (!buffer.IsEmpty() && - !byte_length.IsEmpty() && byte_length.ToLocalChecked()->IsUint32() && - !byte_offset.IsEmpty() && byte_offset.ToLocalChecked()->IsUint32()) { - data = array->GetIndexedPropertiesExternalArrayData(); - if(data) { - length = byte_length.ToLocalChecked()->Uint32Value() / sizeof(T); - } - } - } - -#endif - -#if defined(_MSC_VER) && _MSC_VER >= 1900 || __cplusplus >= 201103L - assert(reinterpret_cast(data) % alignof (T) == 0); -#elif defined(_MSC_VER) && _MSC_VER >= 1600 || defined(__GNUC__) - assert(reinterpret_cast(data) % __alignof(T) == 0); -#else - assert(reinterpret_cast(data) % sizeof (T) == 0); -#endif - - length_ = length; - data_ = static_cast(data); - } - - inline size_t length() const { return length_; } - inline T* operator*() { return data_; } - inline const T* operator*() const { return data_; } - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(TypedArrayContents) - - //Disable heap allocation - void *operator new(size_t size); - void operator delete(void *, size_t) { - abort(); - } - - size_t length_; - T* data_; -}; - -#endif // NAN_TYPEDARRAY_CONTENTS_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_weak.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_weak.h deleted file mode 100644 index 7e7ab07b..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/nan_weak.h +++ /dev/null @@ -1,437 +0,0 @@ -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -#ifndef NAN_WEAK_H_ -#define NAN_WEAK_H_ - -static const int kInternalFieldsInWeakCallback = 2; -static const int kNoInternalFieldIndex = -1; - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) -# define NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ \ - v8::WeakCallbackInfo > const& -# define NAN_WEAK_TWOFIELD_CALLBACK_DATA_TYPE_ \ - NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ -# define NAN_WEAK_PARAMETER_CALLBACK_SIG_ NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ -# define NAN_WEAK_TWOFIELD_CALLBACK_SIG_ NAN_WEAK_TWOFIELD_CALLBACK_DATA_TYPE_ -#elif NODE_MODULE_VERSION > IOJS_1_1_MODULE_VERSION -# define NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ \ - v8::PhantomCallbackData > const& -# define NAN_WEAK_TWOFIELD_CALLBACK_DATA_TYPE_ \ - NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ -# define NAN_WEAK_PARAMETER_CALLBACK_SIG_ NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ -# define NAN_WEAK_TWOFIELD_CALLBACK_SIG_ NAN_WEAK_TWOFIELD_CALLBACK_DATA_TYPE_ -#elif NODE_MODULE_VERSION > NODE_0_12_MODULE_VERSION -# define NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ \ - v8::PhantomCallbackData > const& -# define NAN_WEAK_TWOFIELD_CALLBACK_DATA_TYPE_ \ - v8::InternalFieldsCallbackData, void> const& -# define NAN_WEAK_PARAMETER_CALLBACK_SIG_ NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ -# define NAN_WEAK_TWOFIELD_CALLBACK_SIG_ NAN_WEAK_TWOFIELD_CALLBACK_DATA_TYPE_ -#elif NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION -# define NAN_WEAK_CALLBACK_DATA_TYPE_ \ - v8::WeakCallbackData > const& -# define NAN_WEAK_CALLBACK_SIG_ NAN_WEAK_CALLBACK_DATA_TYPE_ -#else -# define NAN_WEAK_CALLBACK_DATA_TYPE_ void * -# define NAN_WEAK_CALLBACK_SIG_ \ - v8::Persistent, NAN_WEAK_CALLBACK_DATA_TYPE_ -#endif - -template -class WeakCallbackInfo { - public: - typedef void (*Callback)(const WeakCallbackInfo& data); - WeakCallbackInfo( - Persistent *persistent - , Callback callback - , void *parameter - , void *field1 = 0 - , void *field2 = 0) : - callback_(callback), isolate_(0), parameter_(parameter) { - std::memcpy(&persistent_, persistent, sizeof (v8::Persistent)); - internal_fields_[0] = field1; - internal_fields_[1] = field2; - } - inline v8::Isolate *GetIsolate() const { return isolate_; } - inline T *GetParameter() const { return static_cast(parameter_); } - inline void *GetInternalField(int index) const { - assert((index == 0 || index == 1) && "internal field index out of bounds"); - if (index == 0) { - return internal_fields_[0]; - } else { - return internal_fields_[1]; - } - } - - private: - NAN_DISALLOW_ASSIGN_COPY_MOVE(WeakCallbackInfo) - Callback callback_; - v8::Isolate *isolate_; - void *parameter_; - void *internal_fields_[kInternalFieldsInWeakCallback]; - v8::Persistent persistent_; - template friend class Persistent; - template friend class PersistentBase; -#if NODE_MODULE_VERSION <= NODE_0_12_MODULE_VERSION -# if NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - template - static void invoke(NAN_WEAK_CALLBACK_SIG_ data); - template - static WeakCallbackInfo *unwrap(NAN_WEAK_CALLBACK_DATA_TYPE_ data); -# else - static void invoke(NAN_WEAK_CALLBACK_SIG_ data); - static WeakCallbackInfo *unwrap(NAN_WEAK_CALLBACK_DATA_TYPE_ data); -# endif -#else -# if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) - template - static void invokeparameter(NAN_WEAK_PARAMETER_CALLBACK_SIG_ data); - template - static void invoketwofield(NAN_WEAK_TWOFIELD_CALLBACK_SIG_ data); -# else - static void invokeparameter(NAN_WEAK_PARAMETER_CALLBACK_SIG_ data); - static void invoketwofield(NAN_WEAK_TWOFIELD_CALLBACK_SIG_ data); -# endif - static WeakCallbackInfo *unwrapparameter( - NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ data); - static WeakCallbackInfo *unwraptwofield( - NAN_WEAK_TWOFIELD_CALLBACK_DATA_TYPE_ data); -#endif -}; - - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) - -template -template -void -WeakCallbackInfo::invokeparameter(NAN_WEAK_PARAMETER_CALLBACK_SIG_ data) { - WeakCallbackInfo *cbinfo = unwrapparameter(data); - if (isFirstPass) { - cbinfo->persistent_.Reset(); - data.SetSecondPassCallback(invokeparameter); - } else { - cbinfo->callback_(*cbinfo); - delete cbinfo; - } -} - -template -template -void -WeakCallbackInfo::invoketwofield(NAN_WEAK_TWOFIELD_CALLBACK_SIG_ data) { - WeakCallbackInfo *cbinfo = unwraptwofield(data); - if (isFirstPass) { - cbinfo->persistent_.Reset(); - data.SetSecondPassCallback(invoketwofield); - } else { - cbinfo->callback_(*cbinfo); - delete cbinfo; - } -} - -template -WeakCallbackInfo *WeakCallbackInfo::unwrapparameter( - NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ data) { - WeakCallbackInfo *cbinfo = - static_cast*>(data.GetParameter()); - cbinfo->isolate_ = data.GetIsolate(); - return cbinfo; -} - -template -WeakCallbackInfo *WeakCallbackInfo::unwraptwofield( - NAN_WEAK_TWOFIELD_CALLBACK_DATA_TYPE_ data) { - WeakCallbackInfo *cbinfo = - static_cast*>(data.GetInternalField(0)); - cbinfo->isolate_ = data.GetIsolate(); - return cbinfo; -} - -#undef NAN_WEAK_PARAMETER_CALLBACK_SIG_ -#undef NAN_WEAK_TWOFIELD_CALLBACK_SIG_ -#undef NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ -#undef NAN_WEAK_TWOFIELD_CALLBACK_DATA_TYPE_ -# elif NODE_MODULE_VERSION > NODE_0_12_MODULE_VERSION - -template -void -WeakCallbackInfo::invokeparameter(NAN_WEAK_PARAMETER_CALLBACK_SIG_ data) { - WeakCallbackInfo *cbinfo = unwrapparameter(data); - cbinfo->persistent_.Reset(); - cbinfo->callback_(*cbinfo); - delete cbinfo; -} - -template -void -WeakCallbackInfo::invoketwofield(NAN_WEAK_TWOFIELD_CALLBACK_SIG_ data) { - WeakCallbackInfo *cbinfo = unwraptwofield(data); - cbinfo->persistent_.Reset(); - cbinfo->callback_(*cbinfo); - delete cbinfo; -} - -template -WeakCallbackInfo *WeakCallbackInfo::unwrapparameter( - NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ data) { - WeakCallbackInfo *cbinfo = - static_cast*>(data.GetParameter()); - cbinfo->isolate_ = data.GetIsolate(); - return cbinfo; -} - -template -WeakCallbackInfo *WeakCallbackInfo::unwraptwofield( - NAN_WEAK_TWOFIELD_CALLBACK_DATA_TYPE_ data) { - WeakCallbackInfo *cbinfo = - static_cast*>(data.GetInternalField1()); - cbinfo->isolate_ = data.GetIsolate(); - return cbinfo; -} - -#undef NAN_WEAK_PARAMETER_CALLBACK_SIG_ -#undef NAN_WEAK_TWOFIELD_CALLBACK_SIG_ -#undef NAN_WEAK_PARAMETER_CALLBACK_DATA_TYPE_ -#undef NAN_WEAK_TWOFIELD_CALLBACK_DATA_TYPE_ -#elif NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION - -template -template -void WeakCallbackInfo::invoke(NAN_WEAK_CALLBACK_SIG_ data) { - WeakCallbackInfo *cbinfo = unwrap(data); - cbinfo->persistent_.Reset(); - cbinfo->callback_(*cbinfo); - delete cbinfo; -} - -template -template -WeakCallbackInfo *WeakCallbackInfo::unwrap( - NAN_WEAK_CALLBACK_DATA_TYPE_ data) { - void *parameter = data.GetParameter(); - WeakCallbackInfo *cbinfo = - static_cast*>(parameter); - cbinfo->isolate_ = data.GetIsolate(); - return cbinfo; -} - -#undef NAN_WEAK_CALLBACK_SIG_ -#undef NAN_WEAK_CALLBACK_DATA_TYPE_ -#else - -template -void WeakCallbackInfo::invoke(NAN_WEAK_CALLBACK_SIG_ data) { - WeakCallbackInfo *cbinfo = unwrap(data); - cbinfo->persistent_.Dispose(); - cbinfo->persistent_.Clear(); - cbinfo->callback_(*cbinfo); - delete cbinfo; -} - -template -WeakCallbackInfo *WeakCallbackInfo::unwrap( - NAN_WEAK_CALLBACK_DATA_TYPE_ data) { - WeakCallbackInfo *cbinfo = - static_cast*>(data); - cbinfo->isolate_ = v8::Isolate::GetCurrent(); - return cbinfo; -} - -#undef NAN_WEAK_CALLBACK_SIG_ -#undef NAN_WEAK_CALLBACK_DATA_TYPE_ -#endif - -#if defined(V8_MAJOR_VERSION) && (V8_MAJOR_VERSION > 4 || \ - (V8_MAJOR_VERSION == 4 && defined(V8_MINOR_VERSION) && V8_MINOR_VERSION >= 3)) -template -template -inline void Persistent::SetWeak( - P *parameter - , typename WeakCallbackInfo

::Callback callback - , WeakCallbackType type) { - WeakCallbackInfo

*wcbd; - if (type == WeakCallbackType::kParameter) { - wcbd = new WeakCallbackInfo

( - reinterpret_cast*>(this) - , callback - , parameter); - v8::PersistentBase::SetWeak( - wcbd - , WeakCallbackInfo

::template invokeparameter - , type); - } else { - v8::Local* self_v(reinterpret_cast*>(this)); - assert((*self_v)->IsObject()); - v8::Local self((*self_v).As()); - int count = self->InternalFieldCount(); - void *internal_fields[kInternalFieldsInWeakCallback] = {0, 0}; - for (int i = 0; i < count && i < kInternalFieldsInWeakCallback; i++) { - internal_fields[i] = self->GetAlignedPointerFromInternalField(i); - } - wcbd = new WeakCallbackInfo

( - reinterpret_cast*>(this) - , callback - , 0 - , internal_fields[0] - , internal_fields[1]); - self->SetAlignedPointerInInternalField(0, wcbd); - v8::PersistentBase::SetWeak( - static_cast*>(0) - , WeakCallbackInfo

::template invoketwofield - , type); - } -} -#elif NODE_MODULE_VERSION > IOJS_1_1_MODULE_VERSION -template -template -inline void Persistent::SetWeak( - P *parameter - , typename WeakCallbackInfo

::Callback callback - , WeakCallbackType type) { - WeakCallbackInfo

*wcbd; - if (type == WeakCallbackType::kParameter) { - wcbd = new WeakCallbackInfo

( - reinterpret_cast*>(this) - , callback - , parameter); - v8::PersistentBase::SetPhantom( - wcbd - , WeakCallbackInfo

::invokeparameter); - } else { - v8::Local* self_v(reinterpret_cast*>(this)); - assert((*self_v)->IsObject()); - v8::Local self((*self_v).As()); - int count = self->InternalFieldCount(); - void *internal_fields[kInternalFieldsInWeakCallback] = {0, 0}; - for (int i = 0; i < count && i < kInternalFieldsInWeakCallback; i++) { - internal_fields[i] = self->GetAlignedPointerFromInternalField(i); - } - wcbd = new WeakCallbackInfo

( - reinterpret_cast*>(this) - , callback - , 0 - , internal_fields[0] - , internal_fields[1]); - self->SetAlignedPointerInInternalField(0, wcbd); - v8::PersistentBase::SetPhantom( - static_cast*>(0) - , WeakCallbackInfo

::invoketwofield - , 0 - , count > 1 ? 1 : kNoInternalFieldIndex); - } -} -#elif NODE_MODULE_VERSION > NODE_0_12_MODULE_VERSION -template -template -inline void Persistent::SetWeak( - P *parameter - , typename WeakCallbackInfo

::Callback callback - , WeakCallbackType type) { - WeakCallbackInfo

*wcbd; - if (type == WeakCallbackType::kParameter) { - wcbd = new WeakCallbackInfo

( - reinterpret_cast*>(this) - , callback - , parameter); - v8::PersistentBase::SetPhantom( - wcbd - , WeakCallbackInfo

::invokeparameter); - } else { - v8::Local* self_v(reinterpret_cast*>(this)); - assert((*self_v)->IsObject()); - v8::Local self((*self_v).As()); - int count = self->InternalFieldCount(); - void *internal_fields[kInternalFieldsInWeakCallback] = {0, 0}; - for (int i = 0; i < count && i < kInternalFieldsInWeakCallback; i++) { - internal_fields[i] = self->GetAlignedPointerFromInternalField(i); - } - wcbd = new WeakCallbackInfo

( - reinterpret_cast*>(this) - , callback - , 0 - , internal_fields[0] - , internal_fields[1]); - self->SetAlignedPointerInInternalField(0, wcbd); - v8::PersistentBase::SetPhantom( - WeakCallbackInfo

::invoketwofield - , 0 - , count > 1 ? 1 : kNoInternalFieldIndex); - } -} -#elif NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION -template -template -inline void Persistent::SetWeak( - P *parameter - , typename WeakCallbackInfo

::Callback callback - , WeakCallbackType type) { - WeakCallbackInfo

*wcbd; - if (type == WeakCallbackType::kParameter) { - wcbd = new WeakCallbackInfo

( - reinterpret_cast*>(this) - , callback - , parameter); - v8::PersistentBase::SetWeak(wcbd, WeakCallbackInfo

::invoke); - } else { - v8::Local* self_v(reinterpret_cast*>(this)); - assert((*self_v)->IsObject()); - v8::Local self((*self_v).As()); - int count = self->InternalFieldCount(); - void *internal_fields[kInternalFieldsInWeakCallback] = {0, 0}; - for (int i = 0; i < count && i < kInternalFieldsInWeakCallback; i++) { - internal_fields[i] = self->GetAlignedPointerFromInternalField(i); - } - wcbd = new WeakCallbackInfo

( - reinterpret_cast*>(this) - , callback - , 0 - , internal_fields[0] - , internal_fields[1]); - v8::PersistentBase::SetWeak(wcbd, WeakCallbackInfo

::invoke); - } -} -#else -template -template -inline void PersistentBase::SetWeak( - P *parameter - , typename WeakCallbackInfo

::Callback callback - , WeakCallbackType type) { - WeakCallbackInfo

*wcbd; - if (type == WeakCallbackType::kParameter) { - wcbd = new WeakCallbackInfo

( - reinterpret_cast*>(this) - , callback - , parameter); - persistent.MakeWeak(wcbd, WeakCallbackInfo

::invoke); - } else { - v8::Local* self_v(reinterpret_cast*>(this)); - assert((*self_v)->IsObject()); - v8::Local self((*self_v).As()); - int count = self->InternalFieldCount(); - void *internal_fields[kInternalFieldsInWeakCallback] = {0, 0}; - for (int i = 0; i < count && i < kInternalFieldsInWeakCallback; i++) { - internal_fields[i] = self->GetPointerFromInternalField(i); - } - wcbd = new WeakCallbackInfo

( - reinterpret_cast*>(this) - , callback - , 0 - , internal_fields[0] - , internal_fields[1]); - persistent.MakeWeak(wcbd, WeakCallbackInfo

::invoke); - } -} -#endif - -#endif // NAN_WEAK_H_ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/package.json b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/package.json deleted file mode 100644 index a03d8139..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/package.json +++ /dev/null @@ -1,98 +0,0 @@ -{ - "_from": "nan@^2.14.0", - "_id": "nan@2.16.0", - "_inBundle": false, - "_integrity": "sha512-UdAqHyFngu7TfQKsCBgAA6pWDkT8MAO7d0jyOecVhN5354xbLqdn8mV9Tat9gepAupm0bt2DbeaSC8vS52MuFA==", - "_location": "/nan", - "_phantomChildren": {}, - "_requested": { - "type": "range", - "registry": true, - "raw": "nan@^2.14.0", - "name": "nan", - "escapedName": "nan", - "rawSpec": "^2.14.0", - "saveSpec": null, - "fetchSpec": "^2.14.0" - }, - "_requiredBy": [ - "/node-rdkafka" - ], - "_resolved": "https://registry.npmmirror.com/nan/-/nan-2.16.0.tgz", - "_shasum": "664f43e45460fb98faf00edca0bb0d7b8dce7916", - "_spec": "nan@^2.14.0", - "_where": "/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka", - "bugs": { - "url": "https://github.com/nodejs/nan/issues" - }, - "bundleDependencies": false, - "contributors": [ - { - "name": "Rod Vagg", - "email": "r@va.gg", - "url": "https://github.com/rvagg" - }, - { - "name": "Benjamin Byholm", - "email": "bbyholm@abo.fi", - "url": "https://github.com/kkoopa/" - }, - { - "name": "Trevor Norris", - "email": "trev.norris@gmail.com", - "url": "https://github.com/trevnorris" - }, - { - "name": "Nathan Rajlich", - "email": "nathan@tootallnate.net", - "url": "https://github.com/TooTallNate" - }, - { - "name": "Brett Lawson", - "email": "brett19@gmail.com", - "url": "https://github.com/brett19" - }, - { - "name": "Ben Noordhuis", - "email": "info@bnoordhuis.nl", - "url": "https://github.com/bnoordhuis" - }, - { - "name": "David Siegel", - "email": "david@artcom.de", - "url": "https://github.com/agnat" - }, - { - "name": "Michael Ira Krufky", - "email": "mkrufky@gmail.com", - "url": "https://github.com/mkrufky" - } - ], - "deprecated": false, - "description": "Native Abstractions for Node.js: C++ header for Node 0.8 -> 14 compatibility", - "devDependencies": { - "bindings": "~1.2.1", - "commander": "^2.8.1", - "glob": "^5.0.14", - "node-gyp": "~8.4.1", - "readable-stream": "^2.1.4", - "request": "=2.81.0", - "tap": "~0.7.1", - "xtend": "~4.0.0" - }, - "homepage": "https://github.com/nodejs/nan#readme", - "license": "MIT", - "main": "include_dirs.js", - "name": "nan", - "repository": { - "type": "git", - "url": "git://github.com/nodejs/nan.git" - }, - "scripts": { - "docs": "doc/.build.sh", - "rebuild-tests": "node-gyp rebuild --msvs_version=2015 --directory test", - "test": "tap --gc --stderr test/js/*-test.js", - "test:worker": "node --experimental-worker test/tap-as-worker.js --gc --stderr test/js/*-test.js" - }, - "version": "2.16.0" -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/tools/1to2.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/tools/1to2.js deleted file mode 100755 index 6af25058..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/tools/1to2.js +++ /dev/null @@ -1,412 +0,0 @@ -#!/usr/bin/env node -/********************************************************************* - * NAN - Native Abstractions for Node.js - * - * Copyright (c) 2018 NAN contributors - * - * MIT License - ********************************************************************/ - -var commander = require('commander'), - fs = require('fs'), - glob = require('glob'), - groups = [], - total = 0, - warning1 = '/* ERROR: Rewrite using Buffer */\n', - warning2 = '\\/\\* ERROR\\: Rewrite using Buffer \\*\\/\\n', - length, - i; - -fs.readFile(__dirname + '/package.json', 'utf8', function (err, data) { - if (err) { - throw err; - } - - commander - .version(JSON.parse(data).version) - .usage('[options] ') - .parse(process.argv); - - if (!process.argv.slice(2).length) { - commander.outputHelp(); - } -}); - -/* construct strings representing regular expressions - each expression contains a unique group allowing for identification of the match - the index of this key group, relative to the regular expression in question, - is indicated by the first array member */ - -/* simple substistutions, key group is the entire match, 0 */ -groups.push([0, [ - '_NAN_', - 'NODE_SET_METHOD', - 'NODE_SET_PROTOTYPE_METHOD', - 'NanAsciiString', - 'NanEscapeScope', - 'NanReturnValue', - 'NanUcs2String'].join('|')]); - -/* substitutions of parameterless macros, key group is 1 */ -groups.push([1, ['(', [ - 'NanEscapableScope', - 'NanReturnNull', - 'NanReturnUndefined', - 'NanScope'].join('|'), ')\\(\\)'].join('')]); - -/* replace TryCatch with NanTryCatch once, gobbling possible namespace, key group 2 */ -groups.push([2, '(?:(?:v8\\:\\:)?|(Nan)?)(TryCatch)']); - -/* NanNew("string") will likely not fail a ToLocalChecked(), key group 1 */ -groups.push([1, ['(NanNew)', '(\\("[^\\"]*"[^\\)]*\\))(?!\\.ToLocalChecked\\(\\))'].join('')]); - -/* Removed v8 APIs, warn that the code needs rewriting using node::Buffer, key group 2 */ -groups.push([2, ['(', warning2, ')?', '^.*?(', [ - 'GetIndexedPropertiesExternalArrayDataLength', - 'GetIndexedPropertiesExternalArrayData', - 'GetIndexedPropertiesExternalArrayDataType', - 'GetIndexedPropertiesPixelData', - 'GetIndexedPropertiesPixelDataLength', - 'HasIndexedPropertiesInExternalArrayData', - 'HasIndexedPropertiesInPixelData', - 'SetIndexedPropertiesToExternalArrayData', - 'SetIndexedPropertiesToPixelData'].join('|'), ')'].join('')]); - -/* No need for NanScope in V8-exposed methods, key group 2 */ -groups.push([2, ['((', [ - 'NAN_METHOD', - 'NAN_GETTER', - 'NAN_SETTER', - 'NAN_PROPERTY_GETTER', - 'NAN_PROPERTY_SETTER', - 'NAN_PROPERTY_ENUMERATOR', - 'NAN_PROPERTY_DELETER', - 'NAN_PROPERTY_QUERY', - 'NAN_INDEX_GETTER', - 'NAN_INDEX_SETTER', - 'NAN_INDEX_ENUMERATOR', - 'NAN_INDEX_DELETER', - 'NAN_INDEX_QUERY'].join('|'), ')\\([^\\)]*\\)\\s*\\{)\\s*NanScope\\(\\)\\s*;'].join('')]); - -/* v8::Value::ToXXXXXXX returns v8::MaybeLocal, key group 3 */ -groups.push([3, ['([\\s\\(\\)])([^\\s\\(\\)]+)->(', [ - 'Boolean', - 'Number', - 'String', - 'Object', - 'Integer', - 'Uint32', - 'Int32'].join('|'), ')\\('].join('')]); - -/* v8::Value::XXXXXXXValue returns v8::Maybe, key group 3 */ -groups.push([3, ['([\\s\\(\\)])([^\\s\\(\\)]+)->((?:', [ - 'Boolean', - 'Number', - 'Integer', - 'Uint32', - 'Int32'].join('|'), ')Value)\\('].join('')]); - -/* NAN_WEAK_CALLBACK macro was removed, write out callback definition, key group 1 */ -groups.push([1, '(NAN_WEAK_CALLBACK)\\(([^\\s\\)]+)\\)']); - -/* node::ObjectWrap and v8::Persistent have been replaced with Nan implementations, key group 1 */ -groups.push([1, ['(', [ - 'NanDisposePersistent', - 'NanObjectWrapHandle'].join('|'), ')\\s*\\(\\s*([^\\s\\)]+)'].join('')]); - -/* Since NanPersistent there is no need for NanMakeWeakPersistent, key group 1 */ -groups.push([1, '(NanMakeWeakPersistent)\\s*\\(\\s*([^\\s,]+)\\s*,\\s*']); - -/* Many methods of v8::Object and others now return v8::MaybeLocal, key group 3 */ -groups.push([3, ['([\\s])([^\\s]+)->(', [ - 'GetEndColumn', - 'GetFunction', - 'GetLineNumber', - 'NewInstance', - 'GetPropertyNames', - 'GetOwnPropertyNames', - 'GetSourceLine', - 'GetStartColumn', - 'ObjectProtoToString', - 'ToArrayIndex', - 'ToDetailString', - 'CallAsConstructor', - 'CallAsFunction', - 'CloneElementAt', - 'Delete', - 'ForceSet', - 'Get', - 'GetPropertyAttributes', - 'GetRealNamedProperty', - 'GetRealNamedPropertyInPrototypeChain', - 'Has', - 'HasOwnProperty', - 'HasRealIndexedProperty', - 'HasRealNamedCallbackProperty', - 'HasRealNamedProperty', - 'Set', - 'SetAccessor', - 'SetIndexedPropertyHandler', - 'SetNamedPropertyHandler', - 'SetPrototype'].join('|'), ')\\('].join('')]); - -/* You should get an error if any of these fail anyways, - or handle the error better, it is indicated either way, key group 2 */ -groups.push([2, ['NanNew(<(?:v8\\:\\:)?(', ['Date', 'String', 'RegExp'].join('|'), ')>)(\\([^\\)]*\\))(?!\\.ToLocalChecked\\(\\))'].join('')]); - -/* v8::Value::Equals now returns a v8::Maybe, key group 3 */ -groups.push([3, '([\\s\\(\\)])([^\\s\\(\\)]+)->(Equals)\\(([^\\s\\)]+)']); - -/* NanPersistent makes this unnecessary, key group 1 */ -groups.push([1, '(NanAssignPersistent)(?:]+>)?\\(([^,]+),\\s*']); - -/* args has been renamed to info, key group 2 */ -groups.push([2, '(\\W)(args)(\\W)']) - -/* node::ObjectWrap was replaced with NanObjectWrap, key group 2 */ -groups.push([2, '(\\W)(?:node\\:\\:)?(ObjectWrap)(\\W)']); - -/* v8::Persistent was replaced with NanPersistent, key group 2 */ -groups.push([2, '(\\W)(?:v8\\:\\:)?(Persistent)(\\W)']); - -/* counts the number of capturing groups in a well-formed regular expression, - ignoring non-capturing groups and escaped parentheses */ -function groupcount(s) { - var positive = s.match(/\((?!\?)/g), - negative = s.match(/\\\(/g); - return (positive ? positive.length : 0) - (negative ? negative.length : 0); -} - -/* compute the absolute position of each key group in the joined master RegExp */ -for (i = 1, length = groups.length; i < length; i++) { - total += groupcount(groups[i - 1][1]); - groups[i][0] += total; -} - -/* create the master RegExp, whis is the union of all the groups' expressions */ -master = new RegExp(groups.map(function (a) { return a[1]; }).join('|'), 'gm'); - -/* replacement function for String.replace, receives 21 arguments */ -function replace() { - /* simple expressions */ - switch (arguments[groups[0][0]]) { - case '_NAN_': - return 'NAN_'; - case 'NODE_SET_METHOD': - return 'NanSetMethod'; - case 'NODE_SET_PROTOTYPE_METHOD': - return 'NanSetPrototypeMethod'; - case 'NanAsciiString': - return 'NanUtf8String'; - case 'NanEscapeScope': - return 'scope.Escape'; - case 'NanReturnNull': - return 'info.GetReturnValue().SetNull'; - case 'NanReturnValue': - return 'info.GetReturnValue().Set'; - case 'NanUcs2String': - return 'v8::String::Value'; - default: - } - - /* macros without arguments */ - switch (arguments[groups[1][0]]) { - case 'NanEscapableScope': - return 'NanEscapableScope scope' - case 'NanReturnUndefined': - return 'return'; - case 'NanScope': - return 'NanScope scope'; - default: - } - - /* TryCatch, emulate negative backref */ - if (arguments[groups[2][0]] === 'TryCatch') { - return arguments[groups[2][0] - 1] ? arguments[0] : 'NanTryCatch'; - } - - /* NanNew("foo") --> NanNew("foo").ToLocalChecked() */ - if (arguments[groups[3][0]] === 'NanNew') { - return [arguments[0], '.ToLocalChecked()'].join(''); - } - - /* insert warning for removed functions as comment on new line above */ - switch (arguments[groups[4][0]]) { - case 'GetIndexedPropertiesExternalArrayData': - case 'GetIndexedPropertiesExternalArrayDataLength': - case 'GetIndexedPropertiesExternalArrayDataType': - case 'GetIndexedPropertiesPixelData': - case 'GetIndexedPropertiesPixelDataLength': - case 'HasIndexedPropertiesInExternalArrayData': - case 'HasIndexedPropertiesInPixelData': - case 'SetIndexedPropertiesToExternalArrayData': - case 'SetIndexedPropertiesToPixelData': - return arguments[groups[4][0] - 1] ? arguments[0] : [warning1, arguments[0]].join(''); - default: - } - - /* remove unnecessary NanScope() */ - switch (arguments[groups[5][0]]) { - case 'NAN_GETTER': - case 'NAN_METHOD': - case 'NAN_SETTER': - case 'NAN_INDEX_DELETER': - case 'NAN_INDEX_ENUMERATOR': - case 'NAN_INDEX_GETTER': - case 'NAN_INDEX_QUERY': - case 'NAN_INDEX_SETTER': - case 'NAN_PROPERTY_DELETER': - case 'NAN_PROPERTY_ENUMERATOR': - case 'NAN_PROPERTY_GETTER': - case 'NAN_PROPERTY_QUERY': - case 'NAN_PROPERTY_SETTER': - return arguments[groups[5][0] - 1]; - default: - } - - /* Value conversion */ - switch (arguments[groups[6][0]]) { - case 'Boolean': - case 'Int32': - case 'Integer': - case 'Number': - case 'Object': - case 'String': - case 'Uint32': - return [arguments[groups[6][0] - 2], 'NanTo(', arguments[groups[6][0] - 1]].join(''); - default: - } - - /* other value conversion */ - switch (arguments[groups[7][0]]) { - case 'BooleanValue': - return [arguments[groups[7][0] - 2], 'NanTo(', arguments[groups[7][0] - 1]].join(''); - case 'Int32Value': - return [arguments[groups[7][0] - 2], 'NanTo(', arguments[groups[7][0] - 1]].join(''); - case 'IntegerValue': - return [arguments[groups[7][0] - 2], 'NanTo(', arguments[groups[7][0] - 1]].join(''); - case 'Uint32Value': - return [arguments[groups[7][0] - 2], 'NanTo(', arguments[groups[7][0] - 1]].join(''); - default: - } - - /* NAN_WEAK_CALLBACK */ - if (arguments[groups[8][0]] === 'NAN_WEAK_CALLBACK') { - return ['template\nvoid ', - arguments[groups[8][0] + 1], '(const NanWeakCallbackInfo &data)'].join(''); - } - - /* use methods on NAN classes instead */ - switch (arguments[groups[9][0]]) { - case 'NanDisposePersistent': - return [arguments[groups[9][0] + 1], '.Reset('].join(''); - case 'NanObjectWrapHandle': - return [arguments[groups[9][0] + 1], '->handle('].join(''); - default: - } - - /* use method on NanPersistent instead */ - if (arguments[groups[10][0]] === 'NanMakeWeakPersistent') { - return arguments[groups[10][0] + 1] + '.SetWeak('; - } - - /* These return Maybes, the upper ones take no arguments */ - switch (arguments[groups[11][0]]) { - case 'GetEndColumn': - case 'GetFunction': - case 'GetLineNumber': - case 'GetOwnPropertyNames': - case 'GetPropertyNames': - case 'GetSourceLine': - case 'GetStartColumn': - case 'NewInstance': - case 'ObjectProtoToString': - case 'ToArrayIndex': - case 'ToDetailString': - return [arguments[groups[11][0] - 2], 'Nan', arguments[groups[11][0]], '(', arguments[groups[11][0] - 1]].join(''); - case 'CallAsConstructor': - case 'CallAsFunction': - case 'CloneElementAt': - case 'Delete': - case 'ForceSet': - case 'Get': - case 'GetPropertyAttributes': - case 'GetRealNamedProperty': - case 'GetRealNamedPropertyInPrototypeChain': - case 'Has': - case 'HasOwnProperty': - case 'HasRealIndexedProperty': - case 'HasRealNamedCallbackProperty': - case 'HasRealNamedProperty': - case 'Set': - case 'SetAccessor': - case 'SetIndexedPropertyHandler': - case 'SetNamedPropertyHandler': - case 'SetPrototype': - return [arguments[groups[11][0] - 2], 'Nan', arguments[groups[11][0]], '(', arguments[groups[11][0] - 1], ', '].join(''); - default: - } - - /* Automatic ToLocalChecked(), take it or leave it */ - switch (arguments[groups[12][0]]) { - case 'Date': - case 'String': - case 'RegExp': - return ['NanNew', arguments[groups[12][0] - 1], arguments[groups[12][0] + 1], '.ToLocalChecked()'].join(''); - default: - } - - /* NanEquals is now required for uniformity */ - if (arguments[groups[13][0]] === 'Equals') { - return [arguments[groups[13][0] - 1], 'NanEquals(', arguments[groups[13][0] - 1], ', ', arguments[groups[13][0] + 1]].join(''); - } - - /* use method on replacement class instead */ - if (arguments[groups[14][0]] === 'NanAssignPersistent') { - return [arguments[groups[14][0] + 1], '.Reset('].join(''); - } - - /* args --> info */ - if (arguments[groups[15][0]] === 'args') { - return [arguments[groups[15][0] - 1], 'info', arguments[groups[15][0] + 1]].join(''); - } - - /* ObjectWrap --> NanObjectWrap */ - if (arguments[groups[16][0]] === 'ObjectWrap') { - return [arguments[groups[16][0] - 1], 'NanObjectWrap', arguments[groups[16][0] + 1]].join(''); - } - - /* Persistent --> NanPersistent */ - if (arguments[groups[17][0]] === 'Persistent') { - return [arguments[groups[17][0] - 1], 'NanPersistent', arguments[groups[17][0] + 1]].join(''); - } - - /* This should not happen. A switch is probably missing a case if it does. */ - throw 'Unhandled match: ' + arguments[0]; -} - -/* reads a file, runs replacement and writes it back */ -function processFile(file) { - fs.readFile(file, {encoding: 'utf8'}, function (err, data) { - if (err) { - throw err; - } - - /* run replacement twice, might need more runs */ - fs.writeFile(file, data.replace(master, replace).replace(master, replace), function (err) { - if (err) { - throw err; - } - }); - }); -} - -/* process file names from command line and process the identified files */ -for (i = 2, length = process.argv.length; i < length; i++) { - glob(process.argv[i], function (err, matches) { - if (err) { - throw err; - } - matches.forEach(processFile); - }); -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/tools/README.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/tools/README.md deleted file mode 100644 index 7f07e4b8..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/tools/README.md +++ /dev/null @@ -1,14 +0,0 @@ -1to2 naively converts source code files from NAN 1 to NAN 2. There will be erroneous conversions, -false positives and missed opportunities. The input files are rewritten in place. Make sure that -you have backups. You will have to manually review the changes afterwards and do some touchups. - -```sh -$ tools/1to2.js - - Usage: 1to2 [options] - - Options: - - -h, --help output usage information - -V, --version output the version number -``` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/tools/package.json b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/tools/package.json deleted file mode 100644 index 2dcdd789..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/nan/tools/package.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "name": "1to2", - "version": "1.0.0", - "description": "NAN 1 -> 2 Migration Script", - "main": "1to2.js", - "repository": { - "type": "git", - "url": "git://github.com/nodejs/nan.git" - }, - "contributors": [ - "Benjamin Byholm (https://github.com/kkoopa/)", - "Mathias Küsel (https://github.com/mathiask88/)" - ], - "dependencies": { - "glob": "~5.0.10", - "commander": "~2.8.1" - }, - "license": "MIT" -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/.editorconfig b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/.editorconfig deleted file mode 100644 index 00098894..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/.editorconfig +++ /dev/null @@ -1,5 +0,0 @@ -[*] -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true -insert_final_newline = true diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/.jshintignore b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/.jshintignore deleted file mode 100644 index b43bf86b..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/.jshintignore +++ /dev/null @@ -1 +0,0 @@ -README.md diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/.jshintrc b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/.jshintrc deleted file mode 100644 index 09968b8b..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/.jshintrc +++ /dev/null @@ -1,23 +0,0 @@ -{ - "node": true, - "mocha": true, - "browser": false, - "boss": true, - "curly": true, - "debug": false, - "devel": false, - "eqeqeq": true, - "evil": true, - "forin": false, - "latedef": false, - "noarg": true, - "nonew": true, - "nomen": false, - "onevar": false, - "plusplus": false, - "regexp": false, - "undef": true, - "strict": false, - "white": false, - "eqnull": true -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/.travis.yml b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/.travis.yml deleted file mode 100644 index 6124d08f..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/.travis.yml +++ /dev/null @@ -1,67 +0,0 @@ -language: node_js - -cache: - directories: - - node_modules -node_js: - - "4" - - "6" - - "8" - - "10" - - "12" - - "13" - - "14" - - "15" - - "16" -sudo: required -services: docker -before_install: - - if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_BUILD_STAGE_NAME" =~ Test.* ]]; then ./run_docker.sh; fi - # - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install openssl; fi - - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then ./win_install.bat; fi -install: - - if [[ "$TRAVIS_BUILD_STAGE_NAME" =~ Test.* ]]; then npm install; fi - -script: - - if [[ "$TRAVIS_BUILD_STAGE_NAME" =~ Test.* ]]; then make lint && make test && make check; else echo $TRAVIS_BUILD_STAGE_NAME; fi - -jobs: - include: - - stage: test_on_mac - os: osx - osx_image: xcode10 - env: CPPFLAGS=-I/usr/local/opt/openssl/include LDFLAGS=-L/usr/local/opt/openssl/lib - node_js: - - "10" - - stage: test_on_win - os: windows - node_js: - - "10" - # - stage: create_doc - # provider: script - # before_deploy: - # - openssl aes-256-cbc -K $encrypted_a2e08d5c220e_key -iv $encrypted_a2e08d5c220e_iv -in deploy.enc -out /tmp/deploy -d - # - eval "$(ssh-agent -s)" - # - chmod 600 /tmp/deploy - # - ssh-add /tmp/deploy - # script: - # - "./make_docs.sh" - # if: (tag =~ ^v) - - stage: deploy - os: linux - node_js: - - "10" - script: skip - if: (tag =~ ^v) - before_deploy: - - npm install --no-save semver - deploy: - provider: npm - skip_cleanup: true - email: webmakersteve@gmail.com - api_key: - secure: "GUI9X1TnemXIMj5nZDqjrB3zBdMVxhAVKz18BN8TzsQBBne7BOrZ7L9yM3nOwxydm53NAPHFnEjYhYhvM+qhzLnik+XFQ7O3i9rS0hAvzQOBXfup5daib5A8VPMEdTRCXPS3hZ+p5n3ZBruSCQnyTu+HL1SGH//L+j52T5hiOZ3HauhCx9Q0myTBhB6CU5L8yscUUMg12qL7Uw4jsJLfgBRq6hAxphKXfvKt+NJMOWMSmQMbC1FxgfEgIkjFUtasWcUqCmfqx+983XdhhGhC64CYkusSZynNxnsTAosZGJiIZTPYXXL+imBgsEpsMCnW/id/qwaDzRueh2vWlBq2Lk9XSU3VOlKf8nMCJafc3CVjdOZvekyk+WU23gFd4Tpmwk0OtOOM2CKMoNxeMfNvA7ovQ96PP+LDdnRvdoFZV/oX3v3jaXR6DXFd8jnqRTpK4qj7qFO9eWgy1vXdfpwwS2gGVkFvSlWa3niBWzfLSL49Lm7UBwPKJYq0V5taO2dXz8nniTBAQJDcIEKaJkG6IYw5qnLpDLHB3jIw4NwYWw6f/cB5KZXCSeoGBOb9/61XD6Uq0QoQLIhj/vTKmOjNtZHlBzHUoKGYrP+SQRk9BgYh/Cr0azVhSpm2Zjz1fTJ1kYWKzHU8JPyWf6/isKQM4FLhFvWihy/kxkVTkZGR0b4=" - on: - tags: true - after_deploy: - - cat package.json diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/CONTRIBUTING.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/CONTRIBUTING.md deleted file mode 100644 index 66d04c01..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/CONTRIBUTING.md +++ /dev/null @@ -1,222 +0,0 @@ -# Contributing to `node-rdkafka` - -:+1::tada: First off, thanks for taking the time to contribute! :tada::+1: - -The following is a set of guidelines for contributing to `node-rdkafka` -which is hosted in the [Blizzard Organization](https://github.com/blizzard) -on GitHub. This document lists rules, guidelines, and help getting started, -so if you feel something is missing feel free to send a pull request. - -#### Table Of Contents - -[What should I know before I get started?](#what-should-i-know-before-i-get-started) - * [Contributor Agreement](#contributor-agreement) - -[How Can I Contribute?](#how-can-i-contribute) - * [Reporting Bugs](#reporting-bugs) - * [Suggesting Enhancements](#suggesting-enhancements) - * [Pull Requests](#pull-requests) - -[Styleguides](#styleguides) - * [Git Commit Messages](#git-commit-messages) - * [JavaScript Styleguide](#javascript-styleguide) - * [C++ Styleguide](#c++-styleguide) - * [Specs Styleguide](#specs-styleguide) - * [Documentation Styleguide](#documentation-styleguide) - -[Debugging](#debugging) - * [Debugging C++](#debugging-c) - -[Updating librdkafka version](#updating-librdkafka-version) - -## What should I know before I get started? - -### Contributor Agreement - -Not currently required. - -## How can I contribute? - -### Reporting Bugs - -Please use __Github Issues__ to report bugs. When filling out an issue report, -make sure to copy any related code and stack traces so we can properly debug. -We need to be able to reproduce a failing test to be able to fix your issue -most of the time, so a custom written failing test is very helpful. - -Please also note the Kafka broker version that you are using and how many -replicas, partitions, and brokers you are connecting to, because some issues -might be related to Kafka. A list of `librdkafka` configuration key-value pairs -also helps. - -### Suggesting Enhancements - -Please use __Github Issues__ to suggest enhancements. We are happy to consider -any extra functionality or features to the library, as long as they add real -and related value to users. Describing your use case and why such an addition -helps the user base can help guide the decision to implement it into the -library's core. - -### Pull Requests - -* Include new test cases (either end-to-end or unit tests) with your change. -* Follow our style guides. -* Make sure all tests are still passing and the `linter` does not report any issues. -* End files with a new line. -* Document the new code in the comments (if it is JavaScript) so the - documentation generator can update the reference documentation. -* Avoid platform-dependent code. -
**Note:** If making modifications to the underlying C++, please use built-in - precompiler directives to detect such platform specificities. Use `Nan` - whenever possible to abstract node/v8 version incompatibility. -* Make sure your branch is up to date and rebased. -* Squash extraneous commits unless their history truly adds value to the library. - -## Styleguides - -### General style guidelines - -Download the [EditorConfig](http://editorconfig.org) plugin for your preferred -text editor to automate the application of the following guidelines: - -* Use 2-space indent (no tabs). -* Do not leave trailing whitespace on lines. -* Files should end with a final newline. - -Also, adhere to the following not enforced by EditorConfig: - -* Limit lines to 80 characters in length. A few extra (<= 5) is fine if it helps - readability, use good judgement. -* Use `lf` line endings. (git's `core.autocrlf` setting can help) - -### Git Commit Messages - -Commit messages should adhere to the guidelines in tpope's -[A Note About Git Commit Messages](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) - -In short: - -* Use the imperative mood. ("Fix bug", not "Fixed bug" or "Fixes bug") -* Limit the first line to 50 characters or less, followed by a blank line - and detail paragraphs (limit detail lines to about 72 characters). -* Reference issue numbers or pull requests whenever possible. - -### JavaScript Styleguide - -* Place `module.exports` at or near the top of the file. - * Defined functions are hoisted, so it is appropriate to define the - function after you export it. - * When exporting an object, define it first, then export it, and then add - methods or properties. -* Do not use ES2015 specific features (for example, do not use `let`, `const`, - or `class`). -* All callbacks should follow the standard Node.js callback signature. -* Your JavaScript should properly pass the linter (`make jslint`). - -### C++ Styleguide - -* Class member variables should be prefixed with `m_`. -* Use a comment when pointer ownership has changed hands. -* Your C++ should properly pass the `cpplint.py` in the `make lint` test. - -### Specs Styleguide - -* Write all JavaScript tests by using the `mocha` testing framework. -* All `mocha` tests should use exports syntax. -* All `mocha` test files should be suffixed with `.spec.js` instead of `.js`. -* Unit tests should mirror the JavaScript files they test (for example, - `lib/client.js` is tested in `test/client.spec.js`). -* Unit tests should have no outside service dependencies. Any time a dependency, - like Kafka, exists, you should create an end-to-end test. -* You may mock a connection in a unit test if it is reliably similar to its real - variant. - -### Documentation Styleguide - -* Write all JavaScript documentation in jsdoc-compatible inline comments. -* Each docblock should have references to return types and parameters. If an - object is a parameter, you should also document any required subproperties. -* Use `@see` to reference similar pieces of code. -* Use comments to document your code when its intent may be difficult to understand. -* All documentation outside of the code should be in Github-compatible markdown. -* Make good use of font variations like __bold__ and *italics*. -* Use headers and tables of contents when they make sense. - -## Editor - -I began using Visual Studio code to develop on `node-rdkafka`. If you use it you can configure the C++ plugin to resolve the paths needed to inform your intellisense. This is the config file I am using on a mac to resolve the required paths: - -`c_cpp_properties.json` -``` -{ - "configurations": [ - { - "name": "Mac", - "includePath": [ - "${workspaceFolder}/**", - "${workspaceFolder}", - "${workspaceFolder}/src", - "${workspaceFolder}/node_modules/nan", - "${workspaceFolder}/deps/librdkafka/src", - "${workspaceFolder}/deps/librdkafka/src-cpp", - "/usr/local/include/node", - "/usr/local/include/node/uv" - ], - "defines": [], - "macFrameworkPath": [ - "/Library/Developer/CommandLineTools/SDKs/MacOSX10.14.sdk/System/Library/Frameworks" - ], - "compilerPath": "/usr/bin/clang", - "cStandard": "c11", - "cppStandard": "c++17", - "intelliSenseMode": "clang-x64" - } - ], - "version": 4 -} -``` - -## Debugging - -### Debugging C++ - -Use `gdb` for debugging (as shown in the following example). - -``` -node-gyp rebuild --debug - -gdb node -(gdb) set args "path/to/file.js" -(gdb) run -[output here] -``` - -You can add breakpoints and so on after that. - -## Updating librdkafka version - -The librdkafka should be periodically updated to the latest release in https://github.com/edenhill/librdkafka/releases - -Steps to update: -1. Update the `librdkafka` property in [`package.json`](https://github.com/Blizzard/node-rdkafka/blob/master/package.json) to the desired version. - -1. Update the librdkafka git submodule to that versions release commit (example below) - - ```bash - cd deps/librdkafka - git checkout 063a9ae7a65cebdf1cc128da9815c05f91a2a996 # for version 1.8.2 - ``` - -1. Update [`config.d.ts`](https://github.com/Blizzard/node-rdkafka/blob/master/config.d.ts) and [`errors.d.ts`](https://github.com/Blizzard/node-rdkafka/blob/master/errors.d.ts) TypeScript definitions by running: - ```bash - node ci/librdkafka-defs-generator.js - ``` - Note: This is ran automatically during CI flows but it's good to run it during the version upgrade pull request. - -1. Run `npm install` to build with the new version and fix any build errors that occur. - -1. Run unit tests: `npm run test` - -1. Run end to end tests: `npm run test:e2e`. This requires running kafka & zookeeper locally. - -1. Update the version numbers referenced in the [`README.md`](https://github.com/Blizzard/node-rdkafka/blob/master/README.md) file to the new version. diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/LICENSE.txt b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/LICENSE.txt deleted file mode 100644 index be3db86c..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/LICENSE.txt +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) -Copyright (c) 2016 Blizzard Entertainment - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/Makefile b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/Makefile deleted file mode 100644 index a35103f8..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/Makefile +++ /dev/null @@ -1,94 +0,0 @@ -NODE-GYP ?= node_modules/.bin/node-gyp - -# Sick of changing this. Do a check and try to use python 2 if it doesn't work -PYTHON_VERSION_FULL := $(wordlist 2,4,$(subst ., ,$(shell python --version 2>&1))) -PYTHON_VERSION_MAJOR := $(word 1,${PYTHON_VERSION_FULL}) - -ifeq ($(PYTHON_VERSION_MAJOR), 2) -PYTHON = python -else -PYTHON = python2 -endif - -NODE ?= node -CPPLINT ?= cpplint.py -BUILDTYPE ?= Release -TESTS = "test/**/*.js" -E2E_TESTS = $(wildcard e2e/*.spec.js) -TEST_REPORTER = -TEST_OUTPUT = -CONFIG_OUTPUTS = \ - build/bindings.target.mk \ - build/Makefile \ - build/binding.Makefile build/config.gypi - -CPPLINT_FILES = $(wildcard src/*.cc src/*.h) -CPPLINT_FILTER = -legal/copyright -JSLINT_FILES = lib/*.js test/*.js e2e/*.js - -PACKAGE = $(shell node -pe 'require("./package.json").name.split("/")[1]') -VERSION = $(shell node -pe 'require("./package.json").version') - -GYPBUILDARGS= -ifeq ($(BUILDTYPE),Debug) -GYPBUILDARGS=--debug -endif - -.PHONY: all clean lint test lib docs e2e ghpages check - -all: lint lib test e2e - -lint: cpplint jslint - -cpplint: - @$(PYTHON) $(CPPLINT) --filter=$(CPPLINT_FILTER) $(CPPLINT_FILES) - -jslint: node_modules/.dirstamp - @./node_modules/.bin/jshint --verbose $(JSLINT_FILES) - -lib: node_modules/.dirstamp $(CONFIG_OUTPUTS) - @PYTHONHTTPSVERIFY=0 $(NODE-GYP) build $(GYPBUILDARGS) - -node_modules/.dirstamp: package.json - @npm update --loglevel warn - @touch $@ - -$(CONFIG_OUTPUTS): node_modules/.dirstamp binding.gyp - @$(NODE-GYP) configure - -test: node_modules/.dirstamp - @./node_modules/.bin/mocha $(TEST_REPORTER) $(TESTS) $(TEST_OUTPUT) - -check: node_modules/.dirstamp - @$(NODE) util/test-compile.js - -e2e: $(E2E_TESTS) - @./node_modules/.bin/mocha --exit --timeout 120000 $(TEST_REPORTER) $(E2E_TESTS) $(TEST_OUTPUT) - -define release - NEXT_VERSION=$(shell node -pe 'require("semver").inc("$(VERSION)", "$(1)")') - node -e "\ - var j = require('./package.json');\ - j.version = \"$$NEXT_VERSION\";\ - var s = JSON.stringify(j, null, 2);\ - require('fs').writeFileSync('./package.json', s);" && \ - git commit -m "release $$NEXT_VERSION" -- package.json && \ - git tag "$$NEXT_VERSION" -m "release $$NEXT_VERSION" -endef - -docs: node_modules/.dirstamp - @rm -rf docs - @./node_modules/jsdoc/jsdoc.js --destination docs \ - --recurse -R ./README.md \ - -t "./node_modules/toolkit-jsdoc/" \ - --tutorials examples ./lib - -gh-pages: node_modules/.dirstamp - @./make_docs.sh - -release-patch: - @$(call release,patch) - -clean: node_modules/.dirstamp - @rm -f deps/librdkafka/config.h - @$(NODE-GYP) clean diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/README.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/README.md deleted file mode 100644 index 51f66b83..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/README.md +++ /dev/null @@ -1,634 +0,0 @@ -node-rdkafka - Node.js wrapper for Kafka C/C++ library -============================================== - -Copyright (c) 2016 Blizzard Entertainment. - -[https://github.com/blizzard/node-rdkafka](https://github.com/blizzard/node-rdkafka) - -[![Build Status](https://app.travis-ci.com/Blizzard/node-rdkafka.svg?branch=master)](https://app.travis-ci.com/github/Blizzard/node-rdkafka) -[![npm version](https://badge.fury.io/js/node-rdkafka.svg)](https://badge.fury.io/js/node-rdkafka) - -# Looking for Collaborators! - -I am looking for *your* help to make this project even better! If you're interested, check [this out](https://github.com/Blizzard/node-rdkafka/issues/628) - -# Overview - -The `node-rdkafka` library is a high-performance NodeJS client for [Apache Kafka](http://kafka.apache.org/) that wraps the native [librdkafka](https://github.com/edenhill/librdkafka) library. All the complexity of balancing writes across partitions and managing (possibly ever-changing) brokers should be encapsulated in the library. - -__This library currently uses `librdkafka` version `1.8.2`.__ - -## Reference Docs - -To view the reference docs for the current version, go [here](https://blizzard.github.io/node-rdkafka/current/) - -## Contributing - -For guidelines on contributing please see [CONTRIBUTING.md](https://github.com/blizzard/node-rdkafka/blob/master/CONTRIBUTING.md) - -## Code of Conduct - -Play nice; Play fair. - -## Requirements - -* Apache Kafka >=0.9 -* Node.js >=4 -* Linux/Mac -* Windows?! See below -* OpenSSL - -### Mac OS High Sierra / Mojave - -OpenSSL has been upgraded in High Sierra and homebrew does not overwrite default system libraries. That means when building node-rdkafka, because you are using openssl, you need to tell the linker where to find it: - -```sh -export CPPFLAGS=-I/usr/local/opt/openssl/include -export LDFLAGS=-L/usr/local/opt/openssl/lib -``` - -Then you can run `npm install` on your application to get it to build correctly. - -__NOTE:__ From the `librdkafka` docs - -> WARNING: Due to a bug in Apache Kafka 0.9.0.x, the ApiVersionRequest (as sent by the client when connecting to the broker) will be silently ignored by the broker causing the request to time out after 10 seconds. This causes client-broker connections to stall for 10 seconds during connection-setup before librdkafka falls back on the `broker.version.fallback` protocol features. The workaround is to explicitly configure `api.version.request` to `false` on clients communicating with <=0.9.0.x brokers. - -### Alpine - -Using Alpine Linux? Check out the [docs](https://github.com/Blizzard/node-rdkafka/blob/master/examples/docker-alpine.md). - -### Windows - -Windows build **is not** compiled from `librdkafka` source but it is rather linked against the appropriate version of [NuGet librdkafka.redist](https://www.nuget.org/packages/librdkafka.redist/) static binary that gets downloaded from `https://globalcdn.nuget.org/packages/librdkafka.redist.1.8.2.nupkg` during installation. This download link can be changed using the environment variable `NODE_RDKAFKA_NUGET_BASE_URL` that defaults to `https://globalcdn.nuget.org/packages/` when it's no set. - -Requirements: - * [node-gyp for Windows](https://github.com/nodejs/node-gyp#on-windows) (the easies way to get it: `npm install --global --production windows-build-tools`, if your node version is 6.x or below, pleasse use `npm install --global --production windows-build-tools@3.1.0`) - -**Note:** I _still_ do not recommend using `node-rdkafka` in production on Windows. This feature was in high demand and is provided to help develop, but we do not test against Windows, and windows support may lag behind Linux/Mac support because those platforms are the ones used to develop this library. Contributors are welcome if any Windows issues are found :) - -## Tests - -This project includes two types of unit tests in this project: -* end-to-end integration tests -* unit tests - -You can run both types of tests by using `Makefile`. Doing so calls `mocha` in your locally installed `node_modules` directory. - -* Before you run the tests, be sure to init and update the submodules: - 1. `git submodule init` - 2. `git submodule update` -* To run the unit tests, you can run `make lint` or `make test`. -* To run the integration tests, you must have a running Kafka installation available. By default, the test tries to connect to `localhost:9092`; however, you can supply the `KAFKA_HOST` environment variable to override this default behavior. Run `make e2e`. - -# Usage - -You can install the `node-rdkafka` module like any other module: - -``` -npm install node-rdkafka -``` - -To use the module, you must `require` it. - -```js -var Kafka = require('node-rdkafka'); -``` - -## Configuration - -You can pass many configuration options to `librdkafka`. A full list can be found in `librdkafka`'s [Configuration.md](https://github.com/edenhill/librdkafka/blob/v1.8.2/CONFIGURATION.md) - -Configuration keys that have the suffix `_cb` are designated as callbacks. Some -of these keys are informational and you can choose to opt-in (for example, `dr_cb`). Others are callbacks designed to -return a value, such as `partitioner_cb`. - -Not all of these options are supported. -The library will throw an error if the value you send in is invalid. - -The library currently supports the following callbacks: -* `partitioner_cb` -* `dr_cb` or `dr_msg_cb` -* `event_cb` -* `rebalance_cb` (see [Rebalancing](#rebalancing)) -* `offset_commit_cb` (see [Commits](#commits)) - -### Librdkafka Methods - -This library includes two utility functions for detecting the status of your installation. Please try to include these when making issue reports where applicable. - -You can get the features supported by your compile of `librdkafka` by reading the variable "features" on the root of the `node-rdkafka` object. - -```js -const Kafka = require('node-rdkafka'); -console.log(Kafka.features); - -// #=> [ 'gzip', 'snappy', 'ssl', 'sasl', 'regex', 'lz4' ] -``` - -You can also get the version of `librdkafka` - -```js -const Kafka = require('node-rdkafka'); -console.log(Kafka.librdkafkaVersion); - -// #=> 1.8.2 -``` - -## Sending Messages - -A `Producer` sends messages to Kafka. The `Producer` constructor takes a configuration object, as shown in the following example: - -```js -var producer = new Kafka.Producer({ - 'metadata.broker.list': 'kafka-host1:9092,kafka-host2:9092' -}); -``` - -A `Producer` requires only `metadata.broker.list` (the Kafka brokers) to be created. The values in this list are separated by commas. For other configuration options, see the [Configuration.md](https://github.com/edenhill/librdkafka/blob/v1.8.2/CONFIGURATION.md) file described previously. - -The following example illustrates a list with several `librdkafka` options set. - -```js -var producer = new Kafka.Producer({ - 'client.id': 'kafka', - 'metadata.broker.list': 'localhost:9092', - 'compression.codec': 'gzip', - 'retry.backoff.ms': 200, - 'message.send.max.retries': 10, - 'socket.keepalive.enable': true, - 'queue.buffering.max.messages': 100000, - 'queue.buffering.max.ms': 1000, - 'batch.num.messages': 1000000, - 'dr_cb': true -}); -``` - -#### Stream API - -You can easily use the `Producer` as a writable stream immediately after creation (as shown in the following example): - -```js -// Our producer with its Kafka brokers -// This call returns a new writable stream to our topic 'topic-name' -var stream = Kafka.Producer.createWriteStream({ - 'metadata.broker.list': 'kafka-host1:9092,kafka-host2:9092' -}, {}, { - topic: 'topic-name' -}); - -// Writes a message to the stream -var queuedSuccess = stream.write(Buffer.from('Awesome message')); - -if (queuedSuccess) { - console.log('We queued our message!'); -} else { - // Note that this only tells us if the stream's queue is full, - // it does NOT tell us if the message got to Kafka! See below... - console.log('Too many messages in our queue already'); -} - -// NOTE: MAKE SURE TO LISTEN TO THIS IF YOU WANT THE STREAM TO BE DURABLE -// Otherwise, any error will bubble up as an uncaught exception. -stream.on('error', function (err) { - // Here's where we'll know if something went wrong sending to Kafka - console.error('Error in our kafka stream'); - console.error(err); -}) -``` - -If you do not want your code to crash when an error happens, ensure you have an `error` listener on the stream. Most errors are not necessarily fatal, but the ones that are will immediately destroy the stream. If you use `autoClose`, the stream will close itself at the first sign of a problem. - -#### Standard API - -The Standard API is more performant, particularly when handling high volumes of messages. -However, it requires more manual setup to use. The following example illustrates its use: - -```js -var producer = new Kafka.Producer({ - 'metadata.broker.list': 'localhost:9092', - 'dr_cb': true -}); - -// Connect to the broker manually -producer.connect(); - -// Wait for the ready event before proceeding -producer.on('ready', function() { - try { - producer.produce( - // Topic to send the message to - 'topic', - // optionally we can manually specify a partition for the message - // this defaults to -1 - which will use librdkafka's default partitioner (consistent random for keyed messages, random for unkeyed messages) - null, - // Message to send. Must be a buffer - Buffer.from('Awesome message'), - // for keyed messages, we also specify the key - note that this field is optional - 'Stormwind', - // you can send a timestamp here. If your broker version supports it, - // it will get added. Otherwise, we default to 0 - Date.now(), - // you can send an opaque token here, which gets passed along - // to your delivery reports - ); - } catch (err) { - console.error('A problem occurred when sending our message'); - console.error(err); - } -}); - -// Any errors we encounter, including connection errors -producer.on('event.error', function(err) { - console.error('Error from producer'); - console.error(err); -}) - -// We must either call .poll() manually after sending messages -// or set the producer to poll on an interval (.setPollInterval). -// Without this, we do not get delivery events and the queue -// will eventually fill up. -producer.setPollInterval(100); -``` - -To see the configuration options available to you, see the [Configuration](#configuration) section. - -##### Methods - -|Method|Description| -|-------|----------| -|`producer.connect()`| Connects to the broker.

The `connect()` method emits the `ready` event when it connects successfully. If it does not, the error will be passed through the callback. | -|`producer.disconnect()`| Disconnects from the broker.

The `disconnect()` method emits the `disconnected` event when it has disconnected. If it does not, the error will be passed through the callback. | -|`producer.poll()` | Polls the producer for delivery reports or other events to be transmitted via the emitter.

In order to get the events in `librdkafka`'s queue to emit, you must call this regularly. | -|`producer.setPollInterval(interval)` | Polls the producer on this interval, handling disconnections and reconnection. Set it to 0 to turn it off. | -|`producer.produce(topic, partition, msg, key, timestamp, opaque)`| Sends a message.

The `produce()` method throws when produce would return an error. Ordinarily, this is just if the queue is full. | -|`producer.flush(timeout, callback)`| Flush the librdkafka internal queue, sending all messages. Default timeout is 500ms | -|`producer.initTransactions(timeout, callback)`| Initializes the transactional producer. | -|`producer.beginTransaction(callback)`| Starts a new transaction. | -|`producer.sendOffsetsToTransaction(offsets, consumer, timeout, callback)`| Sends consumed topic-partition-offsets to the broker, which will get committed along with the transaction. | -|`producer.abortTransaction(timeout, callback)`| Aborts the ongoing transaction. | -|`producer.commitTransaction(timeout, callback)`| Commits the ongoing transaction. | - -##### Events - -Some configuration properties that end in `_cb` indicate that an event should be generated for that option. You can either: - -* provide a value of `true` and react to the event -* provide a callback function directly - -The following example illustrates an event: - -```js -var producer = new Kafka.Producer({ - 'client.id': 'my-client', // Specifies an identifier to use to help trace activity in Kafka - 'metadata.broker.list': 'localhost:9092', // Connect to a Kafka instance on localhost - 'dr_cb': true // Specifies that we want a delivery-report event to be generated -}); - -// Poll for events every 100 ms -producer.setPollInterval(100); - -producer.on('delivery-report', function(err, report) { - // Report of delivery statistics here: - // - console.log(report); -}); -``` - -The following table describes types of events. - -|Event|Description| -|-------|----------| -| `disconnected` | The `disconnected` event is emitted when the broker has disconnected.

This event is emitted only when `.disconnect` is called. The wrapper will always try to reconnect otherwise. | -| `ready` | The `ready` event is emitted when the `Producer` is ready to send messages. | -| `event` | The `event` event is emitted when `librdkafka` reports an event (if you opted in via the `event_cb` option). | -| `event.log` | The `event.log` event is emitted when logging events come in (if you opted into logging via the `event_cb` option).

You will need to set a value for `debug` if you want to send information. | -| `event.stats` | The `event.stats` event is emitted when `librdkafka` reports stats (if you opted in by setting the `statistics.interval.ms` to a non-zero value). | -| `event.error` | The `event.error` event is emitted when `librdkafka` reports an error | -| `event.throttle` | The `event.throttle` event emitted when `librdkafka` reports throttling. | -| `delivery-report` | The `delivery-report` event is emitted when a delivery report has been found via polling.

To use this event, you must set `request.required.acks` to `1` or `-1` in topic configuration and `dr_cb` (or `dr_msg_cb` if you want the report to contain the message payload) to `true` in the `Producer` constructor options. | - -### Higher Level Producer - -The higher level producer is a variant of the producer which can propagate callbacks to you upon message delivery. - -```js -var producer = new Kafka.HighLevelProducer({ - 'metadata.broker.list': 'localhost:9092', -}); -``` - -This will enrich the produce call so it will have a callback to tell you when the message has been delivered. You lose the ability to specify opaque tokens. - -```js -producer.produce(topicName, null, Buffer.from('alliance4ever'), null, Date.now(), (err, offset) => { - // The offset if our acknowledgement level allows us to receive delivery offsets - console.log(offset); -}); -``` - -Additionally you can add serializers to modify the value of a produce for a key or value before it is sent over to Kafka. - -```js -producer.setValueSerializer(function(value) { - return Buffer.from(JSON.stringify(value)); -}); -``` - -Otherwise the behavior of the class should be exactly the same. - -## Kafka.KafkaConsumer - -To read messages from Kafka, you use a `KafkaConsumer`. You instantiate a `KafkaConsumer` object as follows: - -```js -var consumer = new Kafka.KafkaConsumer({ - 'group.id': 'kafka', - 'metadata.broker.list': 'localhost:9092', -}, {}); -``` - -The first parameter is the global config, while the second parameter is the topic config that gets applied to all subscribed topics. To view a list of all supported configuration properties, see the [Configuration.md](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) file described previously. Look for the `C` and `*` keys. - -The `group.id` and `metadata.broker.list` properties are required for a consumer. - -### Rebalancing - -Rebalancing is managed internally by `librdkafka` by default. If you would like to override this functionality, you may provide your own logic as a rebalance callback. - -```js -var consumer = new Kafka.KafkaConsumer({ - 'group.id': 'kafka', - 'metadata.broker.list': 'localhost:9092', - 'rebalance_cb': function(err, assignment) { - - if (err.code === Kafka.CODES.ERRORS.ERR__ASSIGN_PARTITIONS) { - // Note: this can throw when you are disconnected. Take care and wrap it in - // a try catch if that matters to you - this.assign(assignment); - } else if (err.code == Kafka.CODES.ERRORS.ERR__REVOKE_PARTITIONS){ - // Same as above - this.unassign(); - } else { - // We had a real error - console.error(err); - } - - } -}) -``` - -`this` is bound to the `KafkaConsumer` you have created. By specifying a `rebalance_cb` you can also listen to the `rebalance` event as an emitted event. This event is not emitted when using the internal `librdkafka` rebalancer. - -### Commits - -When you commit in `node-rdkafka`, the standard way is to queue the commit request up with the next `librdkafka` request to the broker. When doing this, there isn't a way to know the result of the commit. Luckily there is another callback you can listen to to get this information - -```js -var consumer = new Kafka.KafkaConsumer({ - 'group.id': 'kafka', - 'metadata.broker.list': 'localhost:9092', - 'offset_commit_cb': function(err, topicPartitions) { - - if (err) { - // There was an error committing - console.error(err); - } else { - // Commit went through. Let's log the topic partitions - console.log(topicPartitions); - } - - } -}) -``` - -`this` is bound to the `KafkaConsumer` you have created. By specifying an `offset_commit_cb` you can also listen to the `offset.commit` event as an emitted event. It receives an error and the list of topic partitions as argument. This is not emitted unless opted in. - -### Message Structure - -Messages that are returned by the `KafkaConsumer` have the following structure. - -```js -{ - value: Buffer.from('hi'), // message contents as a Buffer - size: 2, // size of the message, in bytes - topic: 'librdtesting-01', // topic the message comes from - offset: 1337, // offset the message was read from - partition: 1, // partition the message was on - key: 'someKey', // key of the message if present - timestamp: 1510325354780 // timestamp of message creation -} -``` - -### Stream API - -The stream API is the easiest way to consume messages. The following example illustrates the use of the stream API: - -```js -// Read from the librdtesting-01 topic... note that this creates a new stream on each call! -var stream = KafkaConsumer.createReadStream(globalConfig, topicConfig, { - topics: ['librdtesting-01'] -}); - -stream.on('data', function(message) { - console.log('Got message'); - console.log(message.value.toString()); -}); -``` - -You can also get the `consumer` from the streamConsumer, for using consumer methods. The following example illustrates that: - -```js -stream.consumer.commit(); // Commits all locally stored offsets -``` - -### Standard API - -You can also use the Standard API and manage callbacks and events yourself. You can choose different modes for consuming messages: - -* *Flowing mode*. This mode flows all of the messages it can read by maintaining an infinite loop in the event loop. It only stops when it detects the consumer has issued the `unsubscribe` or `disconnect` method. -* *Non-flowing mode*. This mode reads a single message from Kafka at a time manually. - -The following example illustrates flowing mode: -```js -// Flowing mode -consumer.connect(); - -consumer - .on('ready', function() { - consumer.subscribe(['librdtesting-01']); - - // Consume from the librdtesting-01 topic. This is what determines - // the mode we are running in. By not specifying a callback (or specifying - // only a callback) we get messages as soon as they are available. - consumer.consume(); - }) - .on('data', function(data) { - // Output the actual message contents - console.log(data.value.toString()); - }); -``` -The following example illustrates non-flowing mode: -```js -// Non-flowing mode -consumer.connect(); - -consumer - .on('ready', function() { - // Subscribe to the librdtesting-01 topic - // This makes subsequent consumes read from that topic. - consumer.subscribe(['librdtesting-01']); - - // Read one message every 1000 milliseconds - setInterval(function() { - consumer.consume(1); - }, 1000); - }) - .on('data', function(data) { - console.log('Message found! Contents below.'); - console.log(data.value.toString()); - }); -``` - -The following table lists important methods for this API. - -|Method|Description| -|-------|----------| -|`consumer.connect()` | Connects to the broker.

The `connect()` emits the event `ready` when it has successfully connected. If it does not, the error will be passed through the callback. | -|`consumer.disconnect()` | Disconnects from the broker.

The `disconnect()` method emits `disconnected` when it has disconnected. If it does not, the error will be passed through the callback. | -|`consumer.subscribe(topics)` | Subscribes to an array of topics. | -|`consumer.unsubscribe()` | Unsubscribes from the currently subscribed topics.

You cannot subscribe to different topics without calling the `unsubscribe()` method first. | -|`consumer.consume(cb)` | Gets messages from the existing subscription as quickly as possible. If `cb` is specified, invokes `cb(err, message)`.

This method keeps a background thread running to do the work. Note that the number of threads in nodejs process is limited by `UV_THREADPOOL_SIZE` (default value is 4) and using up all of them blocks other parts of the application that need threads. If you need multiple consumers then consider increasing `UV_THREADPOOL_SIZE` or using `consumer.consume(number, cb)` instead. | -|`consumer.consume(number, cb)` | Gets `number` of messages from the existing subscription. If `cb` is specified, invokes `cb(err, message)`. | -|`consumer.commit()` | Commits all locally stored offsets | -|`consumer.commit(topicPartition)` | Commits offsets specified by the topic partition | -|`consumer.commitMessage(message)` | Commits the offsets specified by the message | - -The following table lists events for this API. - -|Event|Description| -|-------|----------| -|`data` | When using the Standard API consumed messages are emitted in this event. | -|`partition.eof` | When using Standard API and the configuration option `enable.partition.eof` is set, `partition.eof` events are emitted in this event. The event contains `topic`, `partition` and `offset` properties. | -|`warning` | The event is emitted in case of `UNKNOWN_TOPIC_OR_PART` or `TOPIC_AUTHORIZATION_FAILED` errors when consuming in *Flowing mode*. Since the consumer will continue working if the error is still happening, the warning event should reappear after the next metadata refresh. To control the metadata refresh rate set `topic.metadata.refresh.interval.ms` property. Once you resolve the error, you can manually call `getMetadata` to speed up consumer recovery. | -|`disconnected` | The `disconnected` event is emitted when the broker disconnects.

This event is only emitted when `.disconnect` is called. The wrapper will always try to reconnect otherwise. | -|`ready` | The `ready` event is emitted when the `Consumer` is ready to read messages. | -|`event` | The `event` event is emitted when `librdkafka` reports an event (if you opted in via the `event_cb` option).| -|`event.log` | The `event.log` event is emitted when logging events occur (if you opted in for logging via the `event_cb` option).

You will need to set a value for `debug` if you want information to send. | -|`event.stats` | The `event.stats` event is emitted when `librdkafka` reports stats (if you opted in by setting the `statistics.interval.ms` to a non-zero value). | -|`event.error` | The `event.error` event is emitted when `librdkafka` reports an error | -|`event.throttle` | The `event.throttle` event is emitted when `librdkafka` reports throttling.| - -## Reading current offsets from the broker for a topic - -Some times you find yourself in the situation where you need to know the latest (and earliest) offset for one of your topics. Connected producers and consumers both allow you to query for these through `queryWaterMarkOffsets` like follows: - -```js -var timeout = 5000, partition = 0; -consumer.queryWatermarkOffsets('my-topic', partition, timeout, function(err, offsets) { - var high = offsets.highOffset; - var low = offsets.lowOffset; -}); - -producer.queryWatermarkOffsets('my-topic', partition, timeout, function(err, offsets) { - var high = offsets.highOffset; - var low = offsets.lowOffset; -}); - -An error will be returned if the client was not connected or the request timed out within the specified interval. - -``` - -## Metadata - -Both `Kafka.Producer` and `Kafka.KafkaConsumer` include a `getMetadata` method to retrieve metadata from Kafka. - -Getting metadata on any connection returns the following data structure: - -```js -{ - orig_broker_id: 1, - orig_broker_name: "broker_name", - brokers: [ - { - id: 1, - host: 'localhost', - port: 40 - } - ], - topics: [ - { - name: 'awesome-topic', - partitions: [ - { - id: 1, - leader: 20, - replicas: [1, 2], - isrs: [1, 2] - } - ] - } - ] -} -``` - -The following example illustrates how to use the `getMetadata` method. - -When fetching metadata for a specific topic, if a topic reference does not exist, one is created using the default config. -Please see the documentation on `Client.getMetadata` if you want to set configuration parameters, e.g. `acks`, on a topic to produce messages to. - -```js -var opts = { - topic: 'librdtesting-01', - timeout: 10000 -}; - -producer.getMetadata(opts, function(err, metadata) { - if (err) { - console.error('Error getting metadata'); - console.error(err); - } else { - console.log('Got metadata'); - console.log(metadata); - } -}); -``` - -## Admin Client - -`node-rdkafka` now supports the admin client for creating, deleting, and scaling out topics. The `librdkafka` APIs also support altering configuration of topics and broker, but that is not currently implemented. - -To create an Admin client, you can do as follows: - -```js -const Kafka = require('node-rdkafka'); - -const client = Kafka.AdminClient.create({ - 'client.id': 'kafka-admin', - 'metadata.broker.list': 'broker01' -}); -``` - -This will instantiate the `AdminClient`, which will allow the calling of the admin methods. - -```js -client.createTopic({ - topic: topicName, - num_partitions: 1, - replication_factor: 1 -}, function(err) { - // Done! -}); -``` - -All of the admin api methods can have an optional timeout as their penultimate parameter. - -The following table lists important methods for this API. - -|Method|Description| -|-------|----------| -|`client.disconnect()` | Destroy the admin client, making it invalid for further use. | -|`client.createTopic(topic, timeout, cb)` | Create a topic on the broker with the given configuration. See JS doc for more on structure of the topic object | -|`client.deleteTopic(topicName, timeout, cb)` | Delete a topic of the given name | -|`client.createPartitions(topicName, desiredPartitions, timeout, cb)` | Create partitions until the topic has the desired number of partitions. | - -Check the tests for an example of how to use this API! diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/consumer-raw-rdkafka.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/consumer-raw-rdkafka.js deleted file mode 100644 index 82b9343b..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/consumer-raw-rdkafka.js +++ /dev/null @@ -1,76 +0,0 @@ -/* - * node-rdkafka - Node.js wrapper for RdKafka C/C++ library - * - * Copyright (c) 2016 Blizzard Entertainment - * - * This software may be modified and distributed under the terms - * of the MIT license. See the LICENSE.txt file for details. - */ - -var Kafka = require('../'); -var count = 0; -var total = 0; -var store = []; -var host = process.argv[2] || 'localhost:9092'; -var topic = process.argv[3] || 'test'; - -var consumer = new Kafka.KafkaConsumer({ - 'metadata.broker.list': host, - 'group.id': 'node-rdkafka-bench-s', - 'fetch.wait.max.ms': 100, - 'fetch.message.max.bytes': 1024 * 1024, - 'enable.auto.commit': false - // paused: true, -}, { - 'auto.offset.reset': 'earliest' -}); - -var interval; - -consumer.connect() - .once('ready', function() { - consumer.subscribe([topic]); - consumer.consume(); - }) - .on('rebalance', function() { - console.log('rebalance'); - }) - .once('data', function() { - interval = setInterval(function() { - console.log('%d messages per second', count); - if (count > 0) { - store.push(count); - } - count = 0; - }, 1000); - }) - .on('data', function(message) { - count += 1; - total += 1; - }); - -function shutdown() { - clearInterval(interval); - - if (store.length > 0) { - var calc = 0; - for (var x in store) { - calc += store[x]; - } - - var mps = parseFloat(calc * 1.0/store.length); - - console.log('%d messages per second on average', mps); - - } - - var killTimer = setTimeout(function() { - process.exit(); - }, 5000); - - consumer.disconnect(function() { - clearTimeout(killTimer); - process.exit(); - }); - -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/consumer-subscribe.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/consumer-subscribe.js deleted file mode 100644 index c124cd64..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/consumer-subscribe.js +++ /dev/null @@ -1,77 +0,0 @@ -/* - * node-rdkafka - Node.js wrapper for RdKafka C/C++ library - * - * Copyright (c) 2016 Blizzard Entertainment - * - * This software may be modified and distributed under the terms - * of the MIT license. See the LICENSE.txt file for details. - */ - -var Kafka = require('../'); -var count = 0; -var total = 0; -var store = []; -var host = process.argv[2] || 'localhost:9092'; -var topic = process.argv[3] || 'test'; - -var consumer = new Kafka.KafkaConsumer({ - 'metadata.broker.list': host, - 'group.id': 'node-rdkafka-bench', - 'fetch.wait.max.ms': 100, - 'fetch.message.max.bytes': 1024 * 1024, - 'enable.auto.commit': false - // paused: true, -}, { - 'auto.offset.reset': 'earliest' -}); - -var interval; - -consumer.connect() - .once('ready', function() { - consumer.subscribe([topic]); - consumer.consume(); - }) - .once('data', function() { - interval = setInterval(function() { - console.log('%d messages per second', count); - if (count > 0) { - store.push(count); - } - count = 0; - }, 1000); - }) - .on('data', function(message) { - count += 1; - total += 1; - }); - -process.once('SIGTERM', shutdown); -process.once('SIGINT', shutdown); -process.once('SIGHUP', shutdown); - -function shutdown() { - clearInterval(interval); - - if (store.length > 0) { - var calc = 0; - for (var x in store) { - calc += store[x]; - } - - var mps = parseFloat(calc * 1.0/store.length); - - console.log('%d messages per second on average', mps); - - } - - var killTimer = setTimeout(function() { - process.exit(); - }, 5000); - - consumer.disconnect(function() { - clearTimeout(killTimer); - process.exit(); - }); - -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/kafka-consumer-stream.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/kafka-consumer-stream.js deleted file mode 100644 index 6e240ffc..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/kafka-consumer-stream.js +++ /dev/null @@ -1,100 +0,0 @@ -/* - * node-rdkafka - Node.js wrapper for RdKafka C/C++ library - * - * Copyright (c) 2016 Blizzard Entertainment - * - * This software may be modified and distributed under the terms - * of the MIT license. See the LICENSE.txt file for details. - */ - -var Writable = require('stream').Writable; - -var Kafka = require('../'); -var count = 0; -var total = 0; -var store = []; -var host = process.argv[2] || 'localhost:9092'; -var topic = process.argv[3] || 'test'; - -var stream = Kafka.createReadStream({ - 'metadata.broker.list': host, - 'group.id': 'node-rdkafka-benchs', - 'fetch.wait.max.ms': 100, - 'fetch.message.max.bytes': 1024 * 1024, - 'enable.auto.commit': false - // paused: true, -}, { - 'auto.offset.reset': 'earliest' -}, { - fetchSize: 16, - topics: [topic] -}); - -// Track how many messages we see per second -var interval; - -var isShuttingDown = false; - -stream - .on('error', function(err) { - console.log('Shutting down due to error'); - console.log(err.stack); - shutdown(); - }) - .once('data', function(d) { - interval = setInterval(function() { - if (isShuttingDown) { - clearInterval(interval); - } - console.log('%d messages per second', count); - if (count > 0) { - // Don't store ones when we didn't get data i guess? - store.push(count); - // setTimeout(shutdown, 500); - } - count = 0; - }, 1000).unref(); - }) - .on('end', function() { - // Can be called more than once without issue because of guard var - console.log('Shutting down due to stream end'); - shutdown(); - }) - .pipe(new Writable({ - objectMode: true, - write: function(message, encoding, cb) { - count += 1; - total += 1; - setImmediate(cb); - } - })); - -process.once('SIGTERM', shutdown); -process.once('SIGINT', shutdown); -process.once('SIGHUP', shutdown); - -function shutdown() { - if (isShuttingDown) { - return; - } - clearInterval(interval); - isShuttingDown = true; - if (store.length > 0) { - var calc = 0; - for (var x in store) { - calc += store[x]; - } - - var mps = parseFloat(calc * 1.0/store.length); - - console.log('%d messages per second on average', mps); - } - - // Destroy the stream - stream.destroy(); - - stream.once('end', function() { - console.log('total: %d', total); - }); - -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/producer-raw-rdkafka.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/producer-raw-rdkafka.js deleted file mode 100644 index d8fabccf..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/producer-raw-rdkafka.js +++ /dev/null @@ -1,125 +0,0 @@ -/* - * node-rdkafka - Node.js wrapper for RdKafka C/C++ library - * - * Copyright (c) 2016 Blizzard Entertainment - * - * This software may be modified and distributed under the terms - * of the MIT license. See the LICENSE.txt file for details. - */ - -var Kafka = require('../'); -var crypto = require('crypto'); -var count = 0; -var total = 0; -var totalComplete = 0; -var verifiedComplete = 0; -var errors = 0; -var store = []; -var started; -var done = false; -var host = process.argv[2] || '127.0.0.1:9092'; -var topicName = process.argv[3] || 'test'; -var compression = process.argv[4] || 'gzip'; -var MAX = process.argv[5] || 10000000; - -var producer = new Kafka.Producer({ - 'metadata.broker.list': host, - 'group.id': 'node-rdkafka-bench', - 'compression.codec': compression, - 'retry.backoff.ms': 200, - 'message.send.max.retries': 10, - 'socket.keepalive.enable': true, - 'queue.buffering.max.messages': 100000, - 'queue.buffering.max.ms': 1000, - 'batch.num.messages': 1000 -}); - -// Track how many messages we see per second -var interval; -var ok = true; - -function getTimer() { - if (!interval) { - interval = setTimeout(function() { - interval = false; - if (!done) { - console.log('%d messages per sent second', count); - store.push(count); - count = 0; - getTimer(); - - } else { - console.log('%d messages remaining sent in last batch <1000ms', count); - } - }, 1000); - } - - return interval; -} - -var t; - -crypto.randomBytes(4096, function(ex, buffer) { - - producer.connect() - .on('ready', function() { - getTimer(); - - started = new Date().getTime(); - - var sendMessage = function() { - try { - var errorCode = producer.produce(topicName, null, buffer, null); - verifiedComplete += 1; - } catch (e) { - console.error(e); - errors++; - } - - count += 1; - totalComplete += 1; - if (totalComplete === MAX) { - shutdown(); - } - if (total < MAX) { - total += 1; - - // This is 100% sync so we need to setImmediate to give it time - // to breathe. - setImmediate(sendMessage); - } - }; - - sendMessage(); - - }) - .on('event.error', function(err) { - console.error(err); - process.exit(1); - }) - .on('disconnected', shutdown); - -}); - -function shutdown(e) { - done = true; - - clearInterval(interval); - - var killTimer = setTimeout(function() { - process.exit(); - }, 5000); - - producer.disconnect(function() { - clearTimeout(killTimer); - var ended = new Date().getTime(); - var elapsed = ended - started; - - // console.log('Ended %s', ended); - console.log('total: %d messages over %d ms', total, elapsed); - - console.log('%d messages / second', parseInt(total / (elapsed / 1000))); - process.exit(); - }); - -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/producer-rdkafka.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/producer-rdkafka.js deleted file mode 100644 index 3f1aba55..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/producer-rdkafka.js +++ /dev/null @@ -1,117 +0,0 @@ -/* - * node-rdkafka - Node.js wrapper for RdKafka C/C++ library - * - * Copyright (c) 2016 Blizzard Entertainment - * - * This software may be modified and distributed under the terms - * of the MIT license. See the LICENSE.txt file for details. - */ - -var Kafka = require('../'); -var crypto = require('crypto'); -var count = 0; -var total = 0; -var totalComplete = 0; -var store = []; -var host = process.argv[2] || '127.0.0.1:9092'; -var topicName = process.argv[3] || 'test'; -var compression = process.argv[4] || 'gzip'; -var MAX = process.argv[5] || 1000000; - -var stream = Kafka.Producer.createWriteStream({ - 'metadata.broker.list': host, - 'group.id': 'node-rdkafka-bench', - 'compression.codec': compression, - 'retry.backoff.ms': 200, - 'message.send.max.retries': 10, - 'socket.keepalive.enable': true, - 'queue.buffering.max.messages': 100000, - 'queue.buffering.max.ms': 1000, - 'batch.num.messages': 1000, -}, {}, { - topic: topicName, - pollInterval: 20 -}); - -stream.on('error', function(e) { - console.log(e); - process.exit(1); -}); - -// Track how many messages we see per second -var interval; -var done = false; - -function log() { - console.log('%d messages per sent second', count); - store.push(count); - count = 0; -} - -crypto.randomBytes(4096, function(ex, buffer) { - - var x = function(e) { - if (e) { - console.error(e); - } - count += 1; - totalComplete += 1; - if (totalComplete >= MAX && !done) { - done = true; - clearInterval(interval); - setTimeout(shutdown, 5000); - } - }; - - function write() { - if (!stream.write(buffer, 'base64', x)) { - return stream.once('drain', write); - } else { - total++; - } - - if (total < MAX) { - // we are not done - setImmediate(write); - } - - } - - write(); - interval = setInterval(log, 1000); - stream.on('error', function(err) { - console.log(err); - }); - // stream.on('end', shutdown); - -}); - - -process.once('SIGTERM', shutdown); -process.once('SIGINT', shutdown); -process.once('SIGHUP', shutdown); - -function shutdown() { - - if (store.length > 0) { - var calc = 0; - for (var x in store) { - calc += store[x]; - } - - var mps = parseFloat(calc * 1.0/store.length); - - console.log('%d messages per second on average', mps); - console.log('%d messages total', total); - - } - - clearInterval(interval); - - stream.end(); - - stream.on('close', function() { - console.log('total: %d', total); - }); - -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/seed.sh b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/seed.sh deleted file mode 100755 index 5395aaa4..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/bench/seed.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -kafka_root=${KAFKA_ROOT:-/opt/kafka} -# Generate and insert some messages - -OS=$(uname -s) - -function initializeTopic { - topic=$1 - host=$2 - msg_size=$3 - batch_size=$4 - batch_count=$5 - - if [ $host == "localhost:9092" ]; then - ${kafka_root}/bin/kafka-topics.sh --create --zookeeper localhost:2181 \ - --replication-factor 1 --partitions 1 --topic ${topic} - fi - - echo "Generating messages (size: ${msg_size})" - : > /tmp/msgs # Truncate /tmp/msgs - for i in $(seq 1 ${batch_size}); do - if [ $OS == 'Darwin' ]; then - printf %s\\n "$(head -c${msg_size} /dev/urandom | base64)" >> /tmp/msgs - else - printf %s\\n "$(head --bytes=${msg_size} /dev/urandom | base64 --wrap=0)" >> /tmp/msgs - fi - done - - echo "Done generating messages" - - for i in $(seq 1 ${batch_count}); do - echo "Adding $(wc -l /tmp/msgs) messages to topic ${topic}" - "${kafka_root}/bin/kafka-console-producer.sh" \ - --broker-list ${host} --topic ${topic} < /tmp/msgs - done -} - -initializeTopic "librdtesting-01" "localhost:9092" "4096" "5000" "2000" diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/binding.gyp b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/binding.gyp deleted file mode 100644 index 529d5d94..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/binding.gyp +++ /dev/null @@ -1,154 +0,0 @@ -{ - "variables": { - # may be redefined in command line on configuration stage - # "BUILD_LIBRDKAFKA%": "> $(depfile) -# Add extra rules as in (2). -# We remove slashes and replace spaces with new lines; -# remove blank lines; -# delete the first line and append a colon to the remaining lines. -sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\ - grep -v '^$$' |\ - sed -e 1d -e 's|$$|:|' \ - >> $(depfile) -rm $(depfile).raw -endef - -# Command definitions: -# - cmd_foo is the actual command to run; -# - quiet_cmd_foo is the brief-output summary of the command. - -quiet_cmd_cc = CC($(TOOLSET)) $@ -cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $< - -quiet_cmd_cxx = CXX($(TOOLSET)) $@ -cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $< - -quiet_cmd_touch = TOUCH $@ -cmd_touch = touch $@ - -quiet_cmd_copy = COPY $@ -# send stderr to /dev/null to ignore messages when linking directories. -cmd_copy = rm -rf "$@" && cp -af "$<" "$@" - -quiet_cmd_alink = AR($(TOOLSET)) $@ -cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^) - -quiet_cmd_alink_thin = AR($(TOOLSET)) $@ -cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^) - -# Due to circular dependencies between libraries :(, we wrap the -# special "figure out circular dependencies" flags around the entire -# input list during linking. -quiet_cmd_link = LINK($(TOOLSET)) $@ -cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) $(LIBS) -Wl,--end-group - -# We support two kinds of shared objects (.so): -# 1) shared_library, which is just bundling together many dependent libraries -# into a link line. -# 2) loadable_module, which is generating a module intended for dlopen(). -# -# They differ only slightly: -# In the former case, we want to package all dependent code into the .so. -# In the latter case, we want to package just the API exposed by the -# outermost module. -# This means shared_library uses --whole-archive, while loadable_module doesn't. -# (Note that --whole-archive is incompatible with the --start-group used in -# normal linking.) - -# Other shared-object link notes: -# - Set SONAME to the library filename so our binaries don't reference -# the local, absolute paths used on the link command-line. -quiet_cmd_solink = SOLINK($(TOOLSET)) $@ -cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS) - -quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ -cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS) - - -# Define an escape_quotes function to escape single quotes. -# This allows us to handle quotes properly as long as we always use -# use single quotes and escape_quotes. -escape_quotes = $(subst ','\'',$(1)) -# This comment is here just to include a ' to unconfuse syntax highlighting. -# Define an escape_vars function to escape '$' variable syntax. -# This allows us to read/write command lines with shell variables (e.g. -# $LD_LIBRARY_PATH), without triggering make substitution. -escape_vars = $(subst $$,$$$$,$(1)) -# Helper that expands to a shell command to echo a string exactly as it is in -# make. This uses printf instead of echo because printf's behaviour with respect -# to escape sequences is more portable than echo's across different shells -# (e.g., dash, bash). -exact_echo = printf '%s\n' '$(call escape_quotes,$(1))' - -# Helper to compare the command we're about to run against the command -# we logged the last time we ran the command. Produces an empty -# string (false) when the commands match. -# Tricky point: Make has no string-equality test function. -# The kernel uses the following, but it seems like it would have false -# positives, where one string reordered its arguments. -# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \ -# $(filter-out $(cmd_$@), $(cmd_$(1)))) -# We instead substitute each for the empty string into the other, and -# say they're equal if both substitutions produce the empty string. -# .d files contain ? instead of spaces, take that into account. -command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\ - $(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1)))) - -# Helper that is non-empty when a prerequisite changes. -# Normally make does this implicitly, but we force rules to always run -# so we can check their command lines. -# $? -- new prerequisites -# $| -- order-only dependencies -prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?)) - -# Helper that executes all postbuilds until one fails. -define do_postbuilds - @E=0;\ - for p in $(POSTBUILDS); do\ - eval $$p;\ - E=$$?;\ - if [ $$E -ne 0 ]; then\ - break;\ - fi;\ - done;\ - if [ $$E -ne 0 ]; then\ - rm -rf "$@";\ - exit $$E;\ - fi -endef - -# do_cmd: run a command via the above cmd_foo names, if necessary. -# Should always run for a given target to handle command-line changes. -# Second argument, if non-zero, makes it do asm/C/C++ dependency munging. -# Third argument, if non-zero, makes it do POSTBUILDS processing. -# Note: We intentionally do NOT call dirx for depfile, since it contains ? for -# spaces already and dirx strips the ? characters. -define do_cmd -$(if $(or $(command_changed),$(prereq_changed)), - @$(call exact_echo, $($(quiet)cmd_$(1))) - @mkdir -p "$(call dirx,$@)" "$(dir $(depfile))" - $(if $(findstring flock,$(word 1,$(cmd_$1))), - @$(cmd_$(1)) - @echo " $(quiet_cmd_$(1)): Finished", - @$(cmd_$(1)) - ) - @$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile) - @$(if $(2),$(fixup_dep)) - $(if $(and $(3), $(POSTBUILDS)), - $(call do_postbuilds) - ) -) -endef - -# Declare the "all" target first so it is the default, -# even though we don't have the deps yet. -.PHONY: all -all: - -# make looks for ways to re-generate included makefiles, but in our case, we -# don't have a direct way. Explicitly telling make that it has nothing to do -# for them makes it go faster. -%.d: ; - -# Use FORCE_DO_CMD to force a target to run. Should be coupled with -# do_cmd. -.PHONY: FORCE_DO_CMD -FORCE_DO_CMD: - -TOOLSET := target -# Suffix rules, putting all outputs into $(obj). -$(obj).$(TOOLSET)/%.o: $(srcdir)/%.c FORCE_DO_CMD - @$(call do_cmd,cc,1) -$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD - @$(call do_cmd,cxx,1) -$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cpp FORCE_DO_CMD - @$(call do_cmd,cxx,1) -$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cxx FORCE_DO_CMD - @$(call do_cmd,cxx,1) -$(obj).$(TOOLSET)/%.o: $(srcdir)/%.S FORCE_DO_CMD - @$(call do_cmd,cc,1) -$(obj).$(TOOLSET)/%.o: $(srcdir)/%.s FORCE_DO_CMD - @$(call do_cmd,cc,1) - -# Try building from generated source, too. -$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.c FORCE_DO_CMD - @$(call do_cmd,cc,1) -$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD - @$(call do_cmd,cxx,1) -$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cpp FORCE_DO_CMD - @$(call do_cmd,cxx,1) -$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cxx FORCE_DO_CMD - @$(call do_cmd,cxx,1) -$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.S FORCE_DO_CMD - @$(call do_cmd,cc,1) -$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.s FORCE_DO_CMD - @$(call do_cmd,cc,1) - -$(obj).$(TOOLSET)/%.o: $(obj)/%.c FORCE_DO_CMD - @$(call do_cmd,cc,1) -$(obj).$(TOOLSET)/%.o: $(obj)/%.cc FORCE_DO_CMD - @$(call do_cmd,cxx,1) -$(obj).$(TOOLSET)/%.o: $(obj)/%.cpp FORCE_DO_CMD - @$(call do_cmd,cxx,1) -$(obj).$(TOOLSET)/%.o: $(obj)/%.cxx FORCE_DO_CMD - @$(call do_cmd,cxx,1) -$(obj).$(TOOLSET)/%.o: $(obj)/%.S FORCE_DO_CMD - @$(call do_cmd,cc,1) -$(obj).$(TOOLSET)/%.o: $(obj)/%.s FORCE_DO_CMD - @$(call do_cmd,cc,1) - - -ifeq ($(strip $(foreach prefix,$(NO_LOAD),\ - $(findstring $(join ^,$(prefix)),\ - $(join ^,deps/librdkafka.target.mk)))),) - include deps/librdkafka.target.mk -endif -ifeq ($(strip $(foreach prefix,$(NO_LOAD),\ - $(findstring $(join ^,$(prefix)),\ - $(join ^,node-librdkafka.target.mk)))),) - include node-librdkafka.target.mk -endif - -quiet_cmd_regen_makefile = ACTION Regenerating $@ -cmd_regen_makefile = cd $(srcdir); /var/fc/lang/nodejs14_alinode/lib/node_modules/npm/node_modules/node-gyp/gyp/gyp_main.py -fmake --ignore-environment "--toplevel-dir=." -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/config.gypi -I/var/fc/lang/nodejs14_alinode/lib/node_modules/npm/node_modules/node-gyp/addon.gypi -I/tmp/.cache/node-gyp/14.18.1/include/node/common.gypi "--depth=." "-Goutput_dir=." "--generator-output=build" "-Dlibrary=shared_library" "-Dvisibility=default" "-Dnode_root_dir=/tmp/.cache/node-gyp/14.18.1" "-Dnode_gyp_dir=/var/fc/lang/nodejs14_alinode/lib/node_modules/npm/node_modules/node-gyp" "-Dnode_lib_file=/tmp/.cache/node-gyp/14.18.1/<(target_arch)/node.lib" "-Dmodule_root_dir=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka" "-Dnode_engine=v8" binding.gyp -Makefile: $(srcdir)/../../../../../../var/fc/lang/nodejs14_alinode/lib/node_modules/npm/node_modules/node-gyp/addon.gypi $(srcdir)/deps/librdkafka.gyp $(srcdir)/build/config.gypi $(srcdir)/binding.gyp $(srcdir)/../../../../../.cache/node-gyp/14.18.1/include/node/common.gypi - $(call do_cmd,regen_makefile) - -# "all" is a concatenation of the "all" targets from all the included -# sub-makefiles. This is just here to clarify. -all: - -# Add in dependency-tracking rules. $(all_deps) is the list of every single -# target in our tree. Only consider the ones with .d (dependency) info: -d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d)) -ifneq ($(d_files),) - include $(d_files) -endif diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/11a9e3388a67e1ca5c31c1d8da49cb6d2714eb41.intermediate.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/11a9e3388a67e1ca5c31c1d8da49cb6d2714eb41.intermediate.d deleted file mode 100644 index aae2b939..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/11a9e3388a67e1ca5c31c1d8da49cb6d2714eb41.intermediate.d +++ /dev/null @@ -1 +0,0 @@ -cmd_11a9e3388a67e1ca5c31c1d8da49cb6d2714eb41.intermediate := LD_LIBRARY_PATH=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/Release/lib.host:/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/Release/lib.target:$$LD_LIBRARY_PATH; export LD_LIBRARY_PATH; cd ../deps; mkdir -p deps/librdkafka/src deps/librdkafka/src-cpp; make -C librdkafka libs install diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/node-librdkafka.node.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/node-librdkafka.node.d deleted file mode 100644 index d3c2545d..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/node-librdkafka.node.d +++ /dev/null @@ -1 +0,0 @@ -cmd_Release/node-librdkafka.node := rm -rf "Release/node-librdkafka.node" && cp -af "Release/obj.target/node-librdkafka.node" "Release/node-librdkafka.node" diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/deps/librdkafka.stamp.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/deps/librdkafka.stamp.d deleted file mode 100644 index 9f8aa279..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/deps/librdkafka.stamp.d +++ /dev/null @@ -1 +0,0 @@ -cmd_Release/obj.target/deps/librdkafka.stamp := touch Release/obj.target/deps/librdkafka.stamp diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka.node.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka.node.d deleted file mode 100644 index 174bc6d2..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka.node.d +++ /dev/null @@ -1 +0,0 @@ -cmd_Release/obj.target/node-librdkafka.node := g++ -shared -pthread -rdynamic -m64 -Wl,-soname=node-librdkafka.node -o Release/obj.target/node-librdkafka.node -Wl,--start-group Release/obj.target/node-librdkafka/src/binding.o Release/obj.target/node-librdkafka/src/callbacks.o Release/obj.target/node-librdkafka/src/common.o Release/obj.target/node-librdkafka/src/config.o Release/obj.target/node-librdkafka/src/connection.o Release/obj.target/node-librdkafka/src/errors.o Release/obj.target/node-librdkafka/src/kafka-consumer.o Release/obj.target/node-librdkafka/src/producer.o Release/obj.target/node-librdkafka/src/topic.o Release/obj.target/node-librdkafka/src/workers.o Release/obj.target/node-librdkafka/src/admin.o -Wl,--end-group ../build/deps/librdkafka.so ../build/deps/librdkafka++.so -Wl,-rpath='$$ORIGIN/../deps' diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/admin.o.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/admin.o.d deleted file mode 100644 index 240d3f8e..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/admin.o.d +++ /dev/null @@ -1,86 +0,0 @@ -cmd_Release/obj.target/node-librdkafka/src/admin.o := g++ '-DNODE_GYP_MODULE_NAME=node-librdkafka' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-DV8_DEPRECATION_WARNINGS' '-DV8_IMMINENT_DEPRECATION_WARNINGS' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-D__STDC_FORMAT_MACROS' '-DOPENSSL_NO_PINSHARED' '-DOPENSSL_THREADS' '-DBUILDING_NODE_EXTENSION' -I/tmp/.cache/node-gyp/14.18.1/include/node -I/tmp/.cache/node-gyp/14.18.1/src -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include -I/tmp/.cache/node-gyp/14.18.1/deps/zlib -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include -I../../nan -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka -I../deps/librdkafka/src -I../deps/librdkafka/src-cpp -fPIC -pthread -Wall -Wextra -Wno-unused-parameter -m64 -O3 -fno-omit-frame-pointer -fno-exceptions -std=gnu++1y -std=c++14 -MMD -MF ./Release/.deps/Release/obj.target/node-librdkafka/src/admin.o.d.raw -c -o Release/obj.target/node-librdkafka/src/admin.o ../src/admin.cc -Release/obj.target/node-librdkafka/src/admin.o: ../src/admin.cc \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/workers.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h ../../nan/nan.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h \ - ../../nan/nan_callbacks.h ../../nan/nan_callbacks_12_inl.h \ - ../../nan/nan_maybe_43_inl.h ../../nan/nan_converters.h \ - ../../nan/nan_converters_43_inl.h ../../nan/nan_new.h \ - ../../nan/nan_implementation_12_inl.h ../../nan/nan_persistent_12_inl.h \ - ../../nan/nan_weak.h ../../nan/nan_object_wrap.h ../../nan/nan_private.h \ - ../../nan/nan_typedarray_contents.h ../../nan/nan_json.h \ - ../../nan/nan_scriptorigin.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h \ - ../deps/librdkafka/src-cpp/rdkafkacpp.h ../deps/librdkafka/src/rdkafka.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/producer.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/admin.h -../src/admin.cc: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/workers.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h: -../../nan/nan.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8.h: -/tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h: -../../nan/nan_callbacks.h: -../../nan/nan_callbacks_12_inl.h: -../../nan/nan_maybe_43_inl.h: -../../nan/nan_converters.h: -../../nan/nan_converters_43_inl.h: -../../nan/nan_new.h: -../../nan/nan_implementation_12_inl.h: -../../nan/nan_persistent_12_inl.h: -../../nan/nan_weak.h: -../../nan/nan_object_wrap.h: -../../nan/nan_private.h: -../../nan/nan_typedarray_contents.h: -../../nan/nan_json.h: -../../nan/nan_scriptorigin.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h: -../deps/librdkafka/src-cpp/rdkafkacpp.h: -../deps/librdkafka/src/rdkafka.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/producer.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/admin.h: diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/binding.o.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/binding.o.d deleted file mode 100644 index b67fa207..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/binding.o.d +++ /dev/null @@ -1,86 +0,0 @@ -cmd_Release/obj.target/node-librdkafka/src/binding.o := g++ '-DNODE_GYP_MODULE_NAME=node-librdkafka' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-DV8_DEPRECATION_WARNINGS' '-DV8_IMMINENT_DEPRECATION_WARNINGS' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-D__STDC_FORMAT_MACROS' '-DOPENSSL_NO_PINSHARED' '-DOPENSSL_THREADS' '-DBUILDING_NODE_EXTENSION' -I/tmp/.cache/node-gyp/14.18.1/include/node -I/tmp/.cache/node-gyp/14.18.1/src -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include -I/tmp/.cache/node-gyp/14.18.1/deps/zlib -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include -I../../nan -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka -I../deps/librdkafka/src -I../deps/librdkafka/src-cpp -fPIC -pthread -Wall -Wextra -Wno-unused-parameter -m64 -O3 -fno-omit-frame-pointer -fno-exceptions -std=gnu++1y -std=c++14 -MMD -MF ./Release/.deps/Release/obj.target/node-librdkafka/src/binding.o.d.raw -c -o Release/obj.target/node-librdkafka/src/binding.o ../src/binding.cc -Release/obj.target/node-librdkafka/src/binding.o: ../src/binding.cc \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/binding.h \ - ../../nan/nan.h /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h \ - ../../nan/nan_callbacks.h ../../nan/nan_callbacks_12_inl.h \ - ../../nan/nan_maybe_43_inl.h ../../nan/nan_converters.h \ - ../../nan/nan_converters_43_inl.h ../../nan/nan_new.h \ - ../../nan/nan_implementation_12_inl.h ../../nan/nan_persistent_12_inl.h \ - ../../nan/nan_weak.h ../../nan/nan_object_wrap.h ../../nan/nan_private.h \ - ../../nan/nan_typedarray_contents.h ../../nan/nan_json.h \ - ../../nan/nan_scriptorigin.h ../deps/librdkafka/src-cpp/rdkafkacpp.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h \ - ../deps/librdkafka/src/rdkafka.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/producer.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/admin.h -../src/binding.cc: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/binding.h: -../../nan/nan.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8.h: -/tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h: -../../nan/nan_callbacks.h: -../../nan/nan_callbacks_12_inl.h: -../../nan/nan_maybe_43_inl.h: -../../nan/nan_converters.h: -../../nan/nan_converters_43_inl.h: -../../nan/nan_new.h: -../../nan/nan_implementation_12_inl.h: -../../nan/nan_persistent_12_inl.h: -../../nan/nan_weak.h: -../../nan/nan_object_wrap.h: -../../nan/nan_private.h: -../../nan/nan_typedarray_contents.h: -../../nan/nan_json.h: -../../nan/nan_scriptorigin.h: -../deps/librdkafka/src-cpp/rdkafkacpp.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h: -../deps/librdkafka/src/rdkafka.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/producer.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/admin.h: diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/callbacks.o.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/callbacks.o.d deleted file mode 100644 index 974072d8..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/callbacks.o.d +++ /dev/null @@ -1,78 +0,0 @@ -cmd_Release/obj.target/node-librdkafka/src/callbacks.o := g++ '-DNODE_GYP_MODULE_NAME=node-librdkafka' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-DV8_DEPRECATION_WARNINGS' '-DV8_IMMINENT_DEPRECATION_WARNINGS' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-D__STDC_FORMAT_MACROS' '-DOPENSSL_NO_PINSHARED' '-DOPENSSL_THREADS' '-DBUILDING_NODE_EXTENSION' -I/tmp/.cache/node-gyp/14.18.1/include/node -I/tmp/.cache/node-gyp/14.18.1/src -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include -I/tmp/.cache/node-gyp/14.18.1/deps/zlib -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include -I../../nan -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka -I../deps/librdkafka/src -I../deps/librdkafka/src-cpp -fPIC -pthread -Wall -Wextra -Wno-unused-parameter -m64 -O3 -fno-omit-frame-pointer -fno-exceptions -std=gnu++1y -std=c++14 -MMD -MF ./Release/.deps/Release/obj.target/node-librdkafka/src/callbacks.o.d.raw -c -o Release/obj.target/node-librdkafka/src/callbacks.o ../src/callbacks.cc -Release/obj.target/node-librdkafka/src/callbacks.o: ../src/callbacks.cc \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h ../../nan/nan.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h \ - ../../nan/nan_callbacks.h ../../nan/nan_callbacks_12_inl.h \ - ../../nan/nan_maybe_43_inl.h ../../nan/nan_converters.h \ - ../../nan/nan_converters_43_inl.h ../../nan/nan_new.h \ - ../../nan/nan_implementation_12_inl.h ../../nan/nan_persistent_12_inl.h \ - ../../nan/nan_weak.h ../../nan/nan_object_wrap.h ../../nan/nan_private.h \ - ../../nan/nan_typedarray_contents.h ../../nan/nan_json.h \ - ../../nan/nan_scriptorigin.h ../deps/librdkafka/src-cpp/rdkafkacpp.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h \ - ../deps/librdkafka/src/rdkafka.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h -../src/callbacks.cc: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h: -../../nan/nan.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8.h: -/tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h: -../../nan/nan_callbacks.h: -../../nan/nan_callbacks_12_inl.h: -../../nan/nan_maybe_43_inl.h: -../../nan/nan_converters.h: -../../nan/nan_converters_43_inl.h: -../../nan/nan_new.h: -../../nan/nan_implementation_12_inl.h: -../../nan/nan_persistent_12_inl.h: -../../nan/nan_weak.h: -../../nan/nan_object_wrap.h: -../../nan/nan_private.h: -../../nan/nan_typedarray_contents.h: -../../nan/nan_json.h: -../../nan/nan_scriptorigin.h: -../deps/librdkafka/src-cpp/rdkafkacpp.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h: -../deps/librdkafka/src/rdkafka.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h: diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/common.o.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/common.o.d deleted file mode 100644 index 0eb9cf80..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/common.o.d +++ /dev/null @@ -1,70 +0,0 @@ -cmd_Release/obj.target/node-librdkafka/src/common.o := g++ '-DNODE_GYP_MODULE_NAME=node-librdkafka' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-DV8_DEPRECATION_WARNINGS' '-DV8_IMMINENT_DEPRECATION_WARNINGS' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-D__STDC_FORMAT_MACROS' '-DOPENSSL_NO_PINSHARED' '-DOPENSSL_THREADS' '-DBUILDING_NODE_EXTENSION' -I/tmp/.cache/node-gyp/14.18.1/include/node -I/tmp/.cache/node-gyp/14.18.1/src -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include -I/tmp/.cache/node-gyp/14.18.1/deps/zlib -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include -I../../nan -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka -I../deps/librdkafka/src -I../deps/librdkafka/src-cpp -fPIC -pthread -Wall -Wextra -Wno-unused-parameter -m64 -O3 -fno-omit-frame-pointer -fno-exceptions -std=gnu++1y -std=c++14 -MMD -MF ./Release/.deps/Release/obj.target/node-librdkafka/src/common.o.d.raw -c -o Release/obj.target/node-librdkafka/src/common.o ../src/common.cc -Release/obj.target/node-librdkafka/src/common.o: ../src/common.cc \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h \ - ../../nan/nan.h /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h \ - ../../nan/nan_callbacks.h ../../nan/nan_callbacks_12_inl.h \ - ../../nan/nan_maybe_43_inl.h ../../nan/nan_converters.h \ - ../../nan/nan_converters_43_inl.h ../../nan/nan_new.h \ - ../../nan/nan_implementation_12_inl.h ../../nan/nan_persistent_12_inl.h \ - ../../nan/nan_weak.h ../../nan/nan_object_wrap.h ../../nan/nan_private.h \ - ../../nan/nan_typedarray_contents.h ../../nan/nan_json.h \ - ../../nan/nan_scriptorigin.h ../deps/librdkafka/src-cpp/rdkafkacpp.h \ - ../deps/librdkafka/src/rdkafka.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h -../src/common.cc: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h: -../../nan/nan.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8.h: -/tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h: -../../nan/nan_callbacks.h: -../../nan/nan_callbacks_12_inl.h: -../../nan/nan_maybe_43_inl.h: -../../nan/nan_converters.h: -../../nan/nan_converters_43_inl.h: -../../nan/nan_new.h: -../../nan/nan_implementation_12_inl.h: -../../nan/nan_persistent_12_inl.h: -../../nan/nan_weak.h: -../../nan/nan_object_wrap.h: -../../nan/nan_private.h: -../../nan/nan_typedarray_contents.h: -../../nan/nan_json.h: -../../nan/nan_scriptorigin.h: -../deps/librdkafka/src-cpp/rdkafkacpp.h: -../deps/librdkafka/src/rdkafka.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h: diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/config.o.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/config.o.d deleted file mode 100644 index 6da6b929..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/config.o.d +++ /dev/null @@ -1,74 +0,0 @@ -cmd_Release/obj.target/node-librdkafka/src/config.o := g++ '-DNODE_GYP_MODULE_NAME=node-librdkafka' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-DV8_DEPRECATION_WARNINGS' '-DV8_IMMINENT_DEPRECATION_WARNINGS' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-D__STDC_FORMAT_MACROS' '-DOPENSSL_NO_PINSHARED' '-DOPENSSL_THREADS' '-DBUILDING_NODE_EXTENSION' -I/tmp/.cache/node-gyp/14.18.1/include/node -I/tmp/.cache/node-gyp/14.18.1/src -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include -I/tmp/.cache/node-gyp/14.18.1/deps/zlib -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include -I../../nan -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka -I../deps/librdkafka/src -I../deps/librdkafka/src-cpp -fPIC -pthread -Wall -Wextra -Wno-unused-parameter -m64 -O3 -fno-omit-frame-pointer -fno-exceptions -std=gnu++1y -std=c++14 -MMD -MF ./Release/.deps/Release/obj.target/node-librdkafka/src/config.o.d.raw -c -o Release/obj.target/node-librdkafka/src/config.o ../src/config.cc -Release/obj.target/node-librdkafka/src/config.o: ../src/config.cc \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h \ - ../../nan/nan.h /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h \ - ../../nan/nan_callbacks.h ../../nan/nan_callbacks_12_inl.h \ - ../../nan/nan_maybe_43_inl.h ../../nan/nan_converters.h \ - ../../nan/nan_converters_43_inl.h ../../nan/nan_new.h \ - ../../nan/nan_implementation_12_inl.h ../../nan/nan_persistent_12_inl.h \ - ../../nan/nan_weak.h ../../nan/nan_object_wrap.h ../../nan/nan_private.h \ - ../../nan/nan_typedarray_contents.h ../../nan/nan_json.h \ - ../../nan/nan_scriptorigin.h ../deps/librdkafka/src-cpp/rdkafkacpp.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h \ - ../deps/librdkafka/src/rdkafka.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h -../src/config.cc: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h: -../../nan/nan.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8.h: -/tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h: -../../nan/nan_callbacks.h: -../../nan/nan_callbacks_12_inl.h: -../../nan/nan_maybe_43_inl.h: -../../nan/nan_converters.h: -../../nan/nan_converters_43_inl.h: -../../nan/nan_new.h: -../../nan/nan_implementation_12_inl.h: -../../nan/nan_persistent_12_inl.h: -../../nan/nan_weak.h: -../../nan/nan_object_wrap.h: -../../nan/nan_private.h: -../../nan/nan_typedarray_contents.h: -../../nan/nan_json.h: -../../nan/nan_scriptorigin.h: -../deps/librdkafka/src-cpp/rdkafkacpp.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h: -../deps/librdkafka/src/rdkafka.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h: diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/connection.o.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/connection.o.d deleted file mode 100644 index 21706eb4..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/connection.o.d +++ /dev/null @@ -1,86 +0,0 @@ -cmd_Release/obj.target/node-librdkafka/src/connection.o := g++ '-DNODE_GYP_MODULE_NAME=node-librdkafka' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-DV8_DEPRECATION_WARNINGS' '-DV8_IMMINENT_DEPRECATION_WARNINGS' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-D__STDC_FORMAT_MACROS' '-DOPENSSL_NO_PINSHARED' '-DOPENSSL_THREADS' '-DBUILDING_NODE_EXTENSION' -I/tmp/.cache/node-gyp/14.18.1/include/node -I/tmp/.cache/node-gyp/14.18.1/src -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include -I/tmp/.cache/node-gyp/14.18.1/deps/zlib -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include -I../../nan -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka -I../deps/librdkafka/src -I../deps/librdkafka/src-cpp -fPIC -pthread -Wall -Wextra -Wno-unused-parameter -m64 -O3 -fno-omit-frame-pointer -fno-exceptions -std=gnu++1y -std=c++14 -MMD -MF ./Release/.deps/Release/obj.target/node-librdkafka/src/connection.o.d.raw -c -o Release/obj.target/node-librdkafka/src/connection.o ../src/connection.cc -Release/obj.target/node-librdkafka/src/connection.o: ../src/connection.cc \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h \ - ../../nan/nan.h /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h \ - ../../nan/nan_callbacks.h ../../nan/nan_callbacks_12_inl.h \ - ../../nan/nan_maybe_43_inl.h ../../nan/nan_converters.h \ - ../../nan/nan_converters_43_inl.h ../../nan/nan_new.h \ - ../../nan/nan_implementation_12_inl.h ../../nan/nan_persistent_12_inl.h \ - ../../nan/nan_weak.h ../../nan/nan_object_wrap.h ../../nan/nan_private.h \ - ../../nan/nan_typedarray_contents.h ../../nan/nan_json.h \ - ../../nan/nan_scriptorigin.h ../deps/librdkafka/src-cpp/rdkafkacpp.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h \ - ../deps/librdkafka/src/rdkafka.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/workers.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/producer.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/admin.h -../src/connection.cc: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h: -../../nan/nan.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8.h: -/tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h: -../../nan/nan_callbacks.h: -../../nan/nan_callbacks_12_inl.h: -../../nan/nan_maybe_43_inl.h: -../../nan/nan_converters.h: -../../nan/nan_converters_43_inl.h: -../../nan/nan_new.h: -../../nan/nan_implementation_12_inl.h: -../../nan/nan_persistent_12_inl.h: -../../nan/nan_weak.h: -../../nan/nan_object_wrap.h: -../../nan/nan_private.h: -../../nan/nan_typedarray_contents.h: -../../nan/nan_json.h: -../../nan/nan_scriptorigin.h: -../deps/librdkafka/src-cpp/rdkafkacpp.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h: -../deps/librdkafka/src/rdkafka.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/workers.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/producer.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/admin.h: diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/errors.o.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/errors.o.d deleted file mode 100644 index ad910ec9..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/errors.o.d +++ /dev/null @@ -1,70 +0,0 @@ -cmd_Release/obj.target/node-librdkafka/src/errors.o := g++ '-DNODE_GYP_MODULE_NAME=node-librdkafka' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-DV8_DEPRECATION_WARNINGS' '-DV8_IMMINENT_DEPRECATION_WARNINGS' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-D__STDC_FORMAT_MACROS' '-DOPENSSL_NO_PINSHARED' '-DOPENSSL_THREADS' '-DBUILDING_NODE_EXTENSION' -I/tmp/.cache/node-gyp/14.18.1/include/node -I/tmp/.cache/node-gyp/14.18.1/src -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include -I/tmp/.cache/node-gyp/14.18.1/deps/zlib -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include -I../../nan -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka -I../deps/librdkafka/src -I../deps/librdkafka/src-cpp -fPIC -pthread -Wall -Wextra -Wno-unused-parameter -m64 -O3 -fno-omit-frame-pointer -fno-exceptions -std=gnu++1y -std=c++14 -MMD -MF ./Release/.deps/Release/obj.target/node-librdkafka/src/errors.o.d.raw -c -o Release/obj.target/node-librdkafka/src/errors.o ../src/errors.cc -Release/obj.target/node-librdkafka/src/errors.o: ../src/errors.cc \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h \ - ../../nan/nan.h /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h \ - ../../nan/nan_callbacks.h ../../nan/nan_callbacks_12_inl.h \ - ../../nan/nan_maybe_43_inl.h ../../nan/nan_converters.h \ - ../../nan/nan_converters_43_inl.h ../../nan/nan_new.h \ - ../../nan/nan_implementation_12_inl.h ../../nan/nan_persistent_12_inl.h \ - ../../nan/nan_weak.h ../../nan/nan_object_wrap.h ../../nan/nan_private.h \ - ../../nan/nan_typedarray_contents.h ../../nan/nan_json.h \ - ../../nan/nan_scriptorigin.h ../deps/librdkafka/src-cpp/rdkafkacpp.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h \ - ../deps/librdkafka/src/rdkafka.h -../src/errors.cc: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h: -../../nan/nan.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8.h: -/tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h: -../../nan/nan_callbacks.h: -../../nan/nan_callbacks_12_inl.h: -../../nan/nan_maybe_43_inl.h: -../../nan/nan_converters.h: -../../nan/nan_converters_43_inl.h: -../../nan/nan_new.h: -../../nan/nan_implementation_12_inl.h: -../../nan/nan_persistent_12_inl.h: -../../nan/nan_weak.h: -../../nan/nan_object_wrap.h: -../../nan/nan_private.h: -../../nan/nan_typedarray_contents.h: -../../nan/nan_json.h: -../../nan/nan_scriptorigin.h: -../deps/librdkafka/src-cpp/rdkafkacpp.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h: -../deps/librdkafka/src/rdkafka.h: diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/kafka-consumer.o.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/kafka-consumer.o.d deleted file mode 100644 index 76687716..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/kafka-consumer.o.d +++ /dev/null @@ -1,87 +0,0 @@ -cmd_Release/obj.target/node-librdkafka/src/kafka-consumer.o := g++ '-DNODE_GYP_MODULE_NAME=node-librdkafka' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-DV8_DEPRECATION_WARNINGS' '-DV8_IMMINENT_DEPRECATION_WARNINGS' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-D__STDC_FORMAT_MACROS' '-DOPENSSL_NO_PINSHARED' '-DOPENSSL_THREADS' '-DBUILDING_NODE_EXTENSION' -I/tmp/.cache/node-gyp/14.18.1/include/node -I/tmp/.cache/node-gyp/14.18.1/src -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include -I/tmp/.cache/node-gyp/14.18.1/deps/zlib -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include -I../../nan -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka -I../deps/librdkafka/src -I../deps/librdkafka/src-cpp -fPIC -pthread -Wall -Wextra -Wno-unused-parameter -m64 -O3 -fno-omit-frame-pointer -fno-exceptions -std=gnu++1y -std=c++14 -MMD -MF ./Release/.deps/Release/obj.target/node-librdkafka/src/kafka-consumer.o.d.raw -c -o Release/obj.target/node-librdkafka/src/kafka-consumer.o ../src/kafka-consumer.cc -Release/obj.target/node-librdkafka/src/kafka-consumer.o: \ - ../src/kafka-consumer.cc \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h \ - ../../nan/nan.h /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h \ - ../../nan/nan_callbacks.h ../../nan/nan_callbacks_12_inl.h \ - ../../nan/nan_maybe_43_inl.h ../../nan/nan_converters.h \ - ../../nan/nan_converters_43_inl.h ../../nan/nan_new.h \ - ../../nan/nan_implementation_12_inl.h ../../nan/nan_persistent_12_inl.h \ - ../../nan/nan_weak.h ../../nan/nan_object_wrap.h ../../nan/nan_private.h \ - ../../nan/nan_typedarray_contents.h ../../nan/nan_json.h \ - ../../nan/nan_scriptorigin.h ../deps/librdkafka/src-cpp/rdkafkacpp.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h \ - ../deps/librdkafka/src/rdkafka.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/workers.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/producer.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/admin.h -../src/kafka-consumer.cc: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h: -../../nan/nan.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8.h: -/tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h: -../../nan/nan_callbacks.h: -../../nan/nan_callbacks_12_inl.h: -../../nan/nan_maybe_43_inl.h: -../../nan/nan_converters.h: -../../nan/nan_converters_43_inl.h: -../../nan/nan_new.h: -../../nan/nan_implementation_12_inl.h: -../../nan/nan_persistent_12_inl.h: -../../nan/nan_weak.h: -../../nan/nan_object_wrap.h: -../../nan/nan_private.h: -../../nan/nan_typedarray_contents.h: -../../nan/nan_json.h: -../../nan/nan_scriptorigin.h: -../deps/librdkafka/src-cpp/rdkafkacpp.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h: -../deps/librdkafka/src/rdkafka.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/workers.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/producer.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/admin.h: diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/producer.o.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/producer.o.d deleted file mode 100644 index e498b146..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/producer.o.d +++ /dev/null @@ -1,86 +0,0 @@ -cmd_Release/obj.target/node-librdkafka/src/producer.o := g++ '-DNODE_GYP_MODULE_NAME=node-librdkafka' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-DV8_DEPRECATION_WARNINGS' '-DV8_IMMINENT_DEPRECATION_WARNINGS' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-D__STDC_FORMAT_MACROS' '-DOPENSSL_NO_PINSHARED' '-DOPENSSL_THREADS' '-DBUILDING_NODE_EXTENSION' -I/tmp/.cache/node-gyp/14.18.1/include/node -I/tmp/.cache/node-gyp/14.18.1/src -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include -I/tmp/.cache/node-gyp/14.18.1/deps/zlib -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include -I../../nan -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka -I../deps/librdkafka/src -I../deps/librdkafka/src-cpp -fPIC -pthread -Wall -Wextra -Wno-unused-parameter -m64 -O3 -fno-omit-frame-pointer -fno-exceptions -std=gnu++1y -std=c++14 -MMD -MF ./Release/.deps/Release/obj.target/node-librdkafka/src/producer.o.d.raw -c -o Release/obj.target/node-librdkafka/src/producer.o ../src/producer.cc -Release/obj.target/node-librdkafka/src/producer.o: ../src/producer.cc \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/producer.h \ - ../../nan/nan.h /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h \ - ../../nan/nan_callbacks.h ../../nan/nan_callbacks_12_inl.h \ - ../../nan/nan_maybe_43_inl.h ../../nan/nan_converters.h \ - ../../nan/nan_converters_43_inl.h ../../nan/nan_new.h \ - ../../nan/nan_implementation_12_inl.h ../../nan/nan_persistent_12_inl.h \ - ../../nan/nan_weak.h ../../nan/nan_object_wrap.h ../../nan/nan_private.h \ - ../../nan/nan_typedarray_contents.h ../../nan/nan_json.h \ - ../../nan/nan_scriptorigin.h ../deps/librdkafka/src-cpp/rdkafkacpp.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h \ - ../deps/librdkafka/src/rdkafka.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/workers.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/admin.h -../src/producer.cc: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/producer.h: -../../nan/nan.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8.h: -/tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h: -../../nan/nan_callbacks.h: -../../nan/nan_callbacks_12_inl.h: -../../nan/nan_maybe_43_inl.h: -../../nan/nan_converters.h: -../../nan/nan_converters_43_inl.h: -../../nan/nan_new.h: -../../nan/nan_implementation_12_inl.h: -../../nan/nan_persistent_12_inl.h: -../../nan/nan_weak.h: -../../nan/nan_object_wrap.h: -../../nan/nan_private.h: -../../nan/nan_typedarray_contents.h: -../../nan/nan_json.h: -../../nan/nan_scriptorigin.h: -../deps/librdkafka/src-cpp/rdkafkacpp.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h: -../deps/librdkafka/src/rdkafka.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/workers.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/admin.h: diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/topic.o.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/topic.o.d deleted file mode 100644 index cb5295b4..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/topic.o.d +++ /dev/null @@ -1,78 +0,0 @@ -cmd_Release/obj.target/node-librdkafka/src/topic.o := g++ '-DNODE_GYP_MODULE_NAME=node-librdkafka' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-DV8_DEPRECATION_WARNINGS' '-DV8_IMMINENT_DEPRECATION_WARNINGS' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-D__STDC_FORMAT_MACROS' '-DOPENSSL_NO_PINSHARED' '-DOPENSSL_THREADS' '-DBUILDING_NODE_EXTENSION' -I/tmp/.cache/node-gyp/14.18.1/include/node -I/tmp/.cache/node-gyp/14.18.1/src -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include -I/tmp/.cache/node-gyp/14.18.1/deps/zlib -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include -I../../nan -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka -I../deps/librdkafka/src -I../deps/librdkafka/src-cpp -fPIC -pthread -Wall -Wextra -Wno-unused-parameter -m64 -O3 -fno-omit-frame-pointer -fno-exceptions -std=gnu++1y -std=c++14 -MMD -MF ./Release/.deps/Release/obj.target/node-librdkafka/src/topic.o.d.raw -c -o Release/obj.target/node-librdkafka/src/topic.o ../src/topic.cc -Release/obj.target/node-librdkafka/src/topic.o: ../src/topic.cc \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h \ - ../../nan/nan.h /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h \ - ../../nan/nan_callbacks.h ../../nan/nan_callbacks_12_inl.h \ - ../../nan/nan_maybe_43_inl.h ../../nan/nan_converters.h \ - ../../nan/nan_converters_43_inl.h ../../nan/nan_new.h \ - ../../nan/nan_implementation_12_inl.h ../../nan/nan_persistent_12_inl.h \ - ../../nan/nan_weak.h ../../nan/nan_object_wrap.h ../../nan/nan_private.h \ - ../../nan/nan_typedarray_contents.h ../../nan/nan_json.h \ - ../../nan/nan_scriptorigin.h ../deps/librdkafka/src-cpp/rdkafkacpp.h \ - ../deps/librdkafka/src/rdkafka.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h -../src/topic.cc: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h: -../../nan/nan.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8.h: -/tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h: -../../nan/nan_callbacks.h: -../../nan/nan_callbacks_12_inl.h: -../../nan/nan_maybe_43_inl.h: -../../nan/nan_converters.h: -../../nan/nan_converters_43_inl.h: -../../nan/nan_new.h: -../../nan/nan_implementation_12_inl.h: -../../nan/nan_persistent_12_inl.h: -../../nan/nan_weak.h: -../../nan/nan_object_wrap.h: -../../nan/nan_private.h: -../../nan/nan_typedarray_contents.h: -../../nan/nan_json.h: -../../nan/nan_scriptorigin.h: -../deps/librdkafka/src-cpp/rdkafkacpp.h: -../deps/librdkafka/src/rdkafka.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h: diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/workers.o.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/workers.o.d deleted file mode 100644 index 55212d3c..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/Release/obj.target/node-librdkafka/src/workers.o.d +++ /dev/null @@ -1,86 +0,0 @@ -cmd_Release/obj.target/node-librdkafka/src/workers.o := g++ '-DNODE_GYP_MODULE_NAME=node-librdkafka' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-DV8_DEPRECATION_WARNINGS' '-DV8_IMMINENT_DEPRECATION_WARNINGS' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-D__STDC_FORMAT_MACROS' '-DOPENSSL_NO_PINSHARED' '-DOPENSSL_THREADS' '-DBUILDING_NODE_EXTENSION' -I/tmp/.cache/node-gyp/14.18.1/include/node -I/tmp/.cache/node-gyp/14.18.1/src -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include -I/tmp/.cache/node-gyp/14.18.1/deps/zlib -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include -I../../nan -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka -I../deps/librdkafka/src -I../deps/librdkafka/src-cpp -fPIC -pthread -Wall -Wextra -Wno-unused-parameter -m64 -O3 -fno-omit-frame-pointer -fno-exceptions -std=gnu++1y -std=c++14 -MMD -MF ./Release/.deps/Release/obj.target/node-librdkafka/src/workers.o.d.raw -c -o Release/obj.target/node-librdkafka/src/workers.o ../src/workers.cc -Release/obj.target/node-librdkafka/src/workers.o: ../src/workers.cc \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/workers.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h ../../nan/nan.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8config.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_version.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node.h \ - /tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h \ - ../../nan/nan_callbacks.h ../../nan/nan_callbacks_12_inl.h \ - ../../nan/nan_maybe_43_inl.h ../../nan/nan_converters.h \ - ../../nan/nan_converters_43_inl.h ../../nan/nan_new.h \ - ../../nan/nan_implementation_12_inl.h ../../nan/nan_persistent_12_inl.h \ - ../../nan/nan_weak.h ../../nan/nan_object_wrap.h ../../nan/nan_private.h \ - ../../nan/nan_typedarray_contents.h ../../nan/nan_json.h \ - ../../nan/nan_scriptorigin.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h \ - ../deps/librdkafka/src-cpp/rdkafkacpp.h ../deps/librdkafka/src/rdkafka.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/producer.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h \ - /tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/admin.h -../src/workers.cc: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/workers.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/errno.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/unix.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/threadpool.h: -/tmp/.cache/node-gyp/14.18.1/include/node/uv/linux.h: -../../nan/nan.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8.h: -/tmp/.cache/node-gyp/14.18.1/include/node/cppgc/common.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-internal.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8config.h: -/tmp/.cache/node-gyp/14.18.1/include/node/v8-platform.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_version.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_buffer.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node.h: -/tmp/.cache/node-gyp/14.18.1/include/node/node_object_wrap.h: -../../nan/nan_callbacks.h: -../../nan/nan_callbacks_12_inl.h: -../../nan/nan_maybe_43_inl.h: -../../nan/nan_converters.h: -../../nan/nan_converters_43_inl.h: -../../nan/nan_new.h: -../../nan/nan_implementation_12_inl.h: -../../nan/nan_persistent_12_inl.h: -../../nan/nan_weak.h: -../../nan/nan_object_wrap.h: -../../nan/nan_private.h: -../../nan/nan_typedarray_contents.h: -../../nan/nan_json.h: -../../nan/nan_scriptorigin.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/common.h: -../deps/librdkafka/src-cpp/rdkafkacpp.h: -../deps/librdkafka/src/rdkafka.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/errors.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/producer.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/connection.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/config.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/callbacks.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/topic.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/kafka-consumer.h: -/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/src/admin.h: diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/deps/librdkafka/config.h.d b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/deps/librdkafka/config.h.d deleted file mode 100644 index 2064af5c..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/.deps/deps/librdkafka/config.h.d +++ /dev/null @@ -1 +0,0 @@ -cmd_deps/librdkafka/config.h := LD_LIBRARY_PATH=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/Release/lib.host:/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/Release/lib.target:$$LD_LIBRARY_PATH; export LD_LIBRARY_PATH; cd ../deps; mkdir -p librdkafka; node ../util/configure diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/node-librdkafka.node b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/node-librdkafka.node deleted file mode 100755 index adf5d823..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/node-librdkafka.node and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/deps/librdkafka.stamp b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/deps/librdkafka.stamp deleted file mode 100644 index e69de29b..00000000 diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka.node b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka.node deleted file mode 100755 index adf5d823..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka.node and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/admin.o b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/admin.o deleted file mode 100644 index 09f193d9..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/admin.o and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/binding.o b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/binding.o deleted file mode 100644 index f61fa36d..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/binding.o and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/callbacks.o b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/callbacks.o deleted file mode 100644 index 77d96657..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/callbacks.o and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/common.o b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/common.o deleted file mode 100644 index d6ac5f13..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/common.o and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/config.o b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/config.o deleted file mode 100644 index 99f99c1c..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/config.o and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/connection.o b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/connection.o deleted file mode 100644 index de4e3a64..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/connection.o and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/errors.o b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/errors.o deleted file mode 100644 index 12204151..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/errors.o and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/kafka-consumer.o b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/kafka-consumer.o deleted file mode 100644 index bd8baeb5..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/kafka-consumer.o and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/producer.o b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/producer.o deleted file mode 100644 index 2b6799bc..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/producer.o and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/topic.o b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/topic.o deleted file mode 100644 index 803af025..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/topic.o and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/workers.o b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/workers.o deleted file mode 100644 index 7bdb7aef..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/Release/obj.target/node-librdkafka/src/workers.o and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/binding.Makefile b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/binding.Makefile deleted file mode 100644 index 04d39643..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/binding.Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# This file is generated by gyp; do not edit. - -export builddir_name ?= ./build/. -.PHONY: all -all: - $(MAKE) node-librdkafka diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/config.gypi b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/config.gypi deleted file mode 100644 index 2e5283b5..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/config.gypi +++ /dev/null @@ -1,431 +0,0 @@ -# Do not edit. File was generated by node-gyp's "configure" step -{ - "target_defaults": { - "cflags": [], - "default_configuration": "Release", - "defines": [], - "include_dirs": [], - "libraries": [] - }, - "variables": { - "asan": 0, - "build_v8_with_gn": "false", - "coverage": "false", - "dcheck_always_on": 0, - "debug_nghttp2": "false", - "debug_node": "false", - "enable_lto": "false", - "enable_pgo_generate": "false", - "enable_pgo_use": "false", - "error_on_warn": "false", - "force_dynamic_crt": 0, - "gas_version": "2.28", - "host_arch": "x64", - "icu_data_in": "../../deps/icu-tmp/icudt69l.dat", - "icu_endianness": "l", - "icu_gyp_path": "tools/icu/icu-generic.gyp", - "icu_path": "deps/icu-small", - "icu_small": "false", - "icu_ver_major": "69", - "is_debug": 0, - "llvm_version": "0.0", - "napi_build_version": "8", - "node_byteorder": "little", - "node_debug_lib": "false", - "node_enable_d8": "false", - "node_install_npm": "true", - "node_library_files": [ - "lib/readline.js", - "lib/_stream_wrap.js", - "lib/worker_threads.js", - "lib/dgram.js", - "lib/constants.js", - "lib/sys.js", - "lib/dns.js", - "lib/_stream_passthrough.js", - "lib/diagnostics_channel.js", - "lib/path.js", - "lib/_http_server.js", - "lib/tls.js", - "lib/_stream_writable.js", - "lib/process.js", - "lib/querystring.js", - "lib/wasi.js", - "lib/_stream_duplex.js", - "lib/tty.js", - "lib/v8.js", - "lib/_http_incoming.js", - "lib/perf_hooks.js", - "lib/console.js", - "lib/string_decoder.js", - "lib/buffer.js", - "lib/_stream_readable.js", - "lib/_http_client.js", - "lib/stream.js", - "lib/os.js", - "lib/_tls_wrap.js", - "lib/cluster.js", - "lib/_http_outgoing.js", - "lib/punycode.js", - "lib/http.js", - "lib/events.js", - "lib/repl.js", - "lib/child_process.js", - "lib/module.js", - "lib/fs.js", - "lib/trace_events.js", - "lib/_tls_common.js", - "lib/https.js", - "lib/domain.js", - "lib/inspector.js", - "lib/_stream_transform.js", - "lib/util.js", - "lib/logger.js", - "lib/_http_common.js", - "lib/assert.js", - "lib/async_hooks.js", - "lib/url.js", - "lib/net.js", - "lib/crypto.js", - "lib/_http_agent.js", - "lib/timers.js", - "lib/vm.js", - "lib/http2.js", - "lib/zlib.js", - "lib/fs/promises.js", - "lib/strontium/relational_require_cache.js", - "lib/internal/trace_events_async_hooks.js", - "lib/internal/js_stream_socket.js", - "lib/internal/socket_list.js", - "lib/internal/dgram.js", - "lib/internal/constants.js", - "lib/internal/stream_base_commons.js", - "lib/internal/tls.js", - "lib/internal/querystring.js", - "lib/internal/inspector_async_hook.js", - "lib/internal/tty.js", - "lib/internal/idna.js", - "lib/internal/v8_prof_processor.js", - "lib/internal/buffer.js", - "lib/internal/encoding.js", - "lib/internal/histogram.js", - "lib/internal/options.js", - "lib/internal/freeze_intrinsics.js", - "lib/internal/socketaddress.js", - "lib/internal/cli_table.js", - "lib/internal/watchdog.js", - "lib/internal/http.js", - "lib/internal/repl.js", - "lib/internal/child_process.js", - "lib/internal/errors.js", - "lib/internal/freelist.js", - "lib/internal/event_target.js", - "lib/internal/error_serdes.js", - "lib/internal/util.js", - "lib/internal/blob.js", - "lib/internal/assert.js", - "lib/internal/v8_prof_polyfill.js", - "lib/internal/async_hooks.js", - "lib/internal/fixed_queue.js", - "lib/internal/url.js", - "lib/internal/net.js", - "lib/internal/worker.js", - "lib/internal/dtrace.js", - "lib/internal/validators.js", - "lib/internal/timers.js", - "lib/internal/abort_controller.js", - "lib/internal/priority_queue.js", - "lib/internal/heap_utils.js", - "lib/internal/blocklist.js", - "lib/internal/linkedlist.js", - "lib/internal/policy/sri.js", - "lib/internal/policy/manifest.js", - "lib/internal/fs/dir.js", - "lib/internal/fs/promises.js", - "lib/internal/fs/utils.js", - "lib/internal/fs/streams.js", - "lib/internal/fs/read_file_context.js", - "lib/internal/fs/rimraf.js", - "lib/internal/fs/watchers.js", - "lib/internal/fs/sync_write_stream.js", - "lib/internal/assert/calltracker.js", - "lib/internal/assert/assertion_error.js", - "lib/internal/vm/module.js", - "lib/internal/process/policy.js", - "lib/internal/process/promises.js", - "lib/internal/process/worker_thread_only.js", - "lib/internal/process/execution.js", - "lib/internal/process/task_queues.js", - "lib/internal/process/report.js", - "lib/internal/process/per_thread.js", - "lib/internal/process/signal.js", - "lib/internal/process/warning.js", - "lib/internal/process/esm_loader.js", - "lib/internal/dns/promises.js", - "lib/internal/dns/utils.js", - "lib/internal/streams/legacy.js", - "lib/internal/streams/state.js", - "lib/internal/streams/utils.js", - "lib/internal/streams/duplexpair.js", - "lib/internal/streams/buffer_list.js", - "lib/internal/streams/end-of-stream.js", - "lib/internal/streams/pipeline.js", - "lib/internal/streams/lazy_transform.js", - "lib/internal/streams/passthrough.js", - "lib/internal/streams/readable.js", - "lib/internal/streams/writable.js", - "lib/internal/streams/duplex.js", - "lib/internal/streams/transform.js", - "lib/internal/streams/destroy.js", - "lib/internal/streams/from.js", - "lib/internal/worker/io.js", - "lib/internal/worker/js_transferable.js", - "lib/internal/console/constructor.js", - "lib/internal/console/global.js", - "lib/internal/debugger/inspect.js", - "lib/internal/debugger/inspect_repl.js", - "lib/internal/debugger/inspect_client.js", - "lib/internal/http2/compat.js", - "lib/internal/http2/core.js", - "lib/internal/http2/util.js", - "lib/internal/main/inspect.js", - "lib/internal/main/print_help.js", - "lib/internal/main/prof_process.js", - "lib/internal/main/eval_stdin.js", - "lib/internal/main/repl.js", - "lib/internal/main/run_third_party_main.js", - "lib/internal/main/check_syntax.js", - "lib/internal/main/run_main_module.js", - "lib/internal/main/worker_thread.js", - "lib/internal/main/eval_string.js", - "lib/internal/readline/utils.js", - "lib/internal/timers/promises.js", - "lib/internal/source_map/source_map_cache.js", - "lib/internal/source_map/prepare_stack_trace.js", - "lib/internal/source_map/source_map.js", - "lib/internal/relational_require_cache/require_cache.js", - "lib/internal/relational_require_cache/loader.js", - "lib/internal/relational_require_cache/cost_watcher.js", - "lib/internal/relational_require_cache/base_object.js", - "lib/internal/relational_require_cache/recorder.js", - "lib/internal/util/iterable_weak_map.js", - "lib/internal/util/inspect.js", - "lib/internal/util/types.js", - "lib/internal/util/comparisons.js", - "lib/internal/util/inspector.js", - "lib/internal/util/debuglog.js", - "lib/internal/bootstrap/pre_execution.js", - "lib/internal/bootstrap/node.js", - "lib/internal/bootstrap/environment.js", - "lib/internal/bootstrap/loaders.js", - "lib/internal/bootstrap/switches/is_not_main_thread.js", - "lib/internal/bootstrap/switches/does_not_own_process_state.js", - "lib/internal/bootstrap/switches/does_own_process_state.js", - "lib/internal/bootstrap/switches/is_main_thread.js", - "lib/internal/modules/run_main.js", - "lib/internal/modules/package_json_reader.js", - "lib/internal/modules/esm/create_dynamic_module.js", - "lib/internal/modules/esm/loader.js", - "lib/internal/modules/esm/transform_source.js", - "lib/internal/modules/esm/translators.js", - "lib/internal/modules/esm/module_map.js", - "lib/internal/modules/esm/get_format.js", - "lib/internal/modules/esm/get_source.js", - "lib/internal/modules/esm/module_job.js", - "lib/internal/modules/esm/resolve.js", - "lib/internal/modules/cjs/loader.js", - "lib/internal/modules/cjs/helpers.js", - "lib/internal/crypto/sig.js", - "lib/internal/crypto/random.js", - "lib/internal/crypto/diffiehellman.js", - "lib/internal/crypto/hash.js", - "lib/internal/crypto/certificate.js", - "lib/internal/crypto/scrypt.js", - "lib/internal/crypto/keys.js", - "lib/internal/crypto/keygen.js", - "lib/internal/crypto/cipher.js", - "lib/internal/crypto/util.js", - "lib/internal/crypto/pbkdf2.js", - "lib/internal/cluster/utils.js", - "lib/internal/cluster/master.js", - "lib/internal/cluster/round_robin_handle.js", - "lib/internal/cluster/shared_handle.js", - "lib/internal/cluster/child.js", - "lib/internal/cluster/worker.js", - "lib/internal/test/binding.js", - "lib/internal/repl/history.js", - "lib/internal/repl/utils.js", - "lib/internal/repl/await.js", - "lib/internal/child_process/serialization.js", - "lib/internal/per_context/domexception.js", - "lib/internal/per_context/primordials.js", - "lib/internal/per_context/messageport.js" - ], - "node_module_version": 83, - "node_no_browser_globals": "false", - "node_prefix": "/", - "node_release_urlbase": "", - "node_section_ordering_info": "", - "node_shared": "false", - "node_shared_brotli": "false", - "node_shared_cares": "false", - "node_shared_http_parser": "false", - "node_shared_libuv": "false", - "node_shared_nghttp2": "false", - "node_shared_openssl": "false", - "node_shared_zlib": "false", - "node_tag": "", - "node_target_type": "executable", - "node_use_bundled_v8": "true", - "node_use_dtrace": "false", - "node_use_etw": "false", - "node_use_node_code_cache": "true", - "node_use_node_snapshot": "true", - "node_use_openssl": "true", - "node_use_v8_platform": "true", - "node_with_ltcg": "false", - "node_without_node_options": "false", - "openssl_fips": "", - "openssl_is_fips": "false", - "ossfuzz": "false", - "shlib_suffix": "so.83", - "target_arch": "x64", - "v8_enable_31bit_smis_on_64bit_arch": 0, - "v8_enable_gdbjit": 0, - "v8_enable_i18n_support": 1, - "v8_enable_inspector": 1, - "v8_enable_lite_mode": 0, - "v8_enable_object_print": 1, - "v8_enable_pointer_compression": 0, - "v8_no_strict_aliasing": 1, - "v8_optimized_debug": 1, - "v8_promise_internal_field_count": 1, - "v8_random_seed": 0, - "v8_trace_maps": 0, - "v8_use_siphash": 1, - "want_separate_host_toolset": 0, - "nodedir": "/tmp/.cache/node-gyp/14.18.1", - "standalone_static_library": 1, - "cache_lock_stale": "60000", - "ham_it_up": "", - "legacy_bundling": "", - "sign_git_tag": "", - "user_agent": "npm/6.14.15 node/v14.18.1 linux x64", - "always_auth": "", - "bin_links": "true", - "key": "", - "allow_same_version": "", - "description": "true", - "fetch_retries": "2", - "heading": "npm", - "if_present": "", - "init_version": "1.0.0", - "user": "", - "prefer_online": "", - "force": "", - "only": "", - "read_only": "", - "cache_min": "10", - "init_license": "ISC", - "editor": "vi", - "rollback": "true", - "tag_version_prefix": "v", - "cache_max": "Infinity", - "timing": "", - "userconfig": "/tmp/.npmrc", - "engine_strict": "", - "init_author_name": "", - "init_author_url": "", - "preid": "", - "tmp": "/tmp", - "depth": "Infinity", - "package_lock_only": "", - "save_dev": "", - "usage": "", - "metrics_registry": "https://registry.npm.taobao.org/", - "otp": "", - "package_lock": "true", - "progress": "true", - "https_proxy": "", - "save_prod": "", - "audit": "true", - "cidr": "", - "onload_script": "", - "sso_type": "oauth", - "rebuild_bundle": "true", - "save_bundle": "", - "shell": "bash", - "dry_run": "", - "format_package_lock": "true", - "prefix": "/var/fc/lang/nodejs14_alinode", - "scope": "", - "registry": "https://registry.npm.taobao.org/", - "browser": "", - "cache_lock_wait": "10000", - "ignore_prepublish": "", - "save_optional": "", - "searchopts": "", - "versions": "", - "cache": "/tmp/.npm", - "send_metrics": "", - "global_style": "", - "ignore_scripts": "", - "version": "", - "local_address": "", - "viewer": "man", - "node_gyp": "/var/fc/lang/nodejs14_alinode/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js", - "audit_level": "low", - "prefer_offline": "", - "color": "true", - "sign_git_commit": "", - "fetch_retry_mintimeout": "10000", - "maxsockets": "50", - "offline": "", - "sso_poll_frequency": "500", - "umask": "0022", - "fund": "true", - "fetch_retry_maxtimeout": "60000", - "logs_max": "10", - "message": "%s", - "ca": "", - "cert": "", - "global": "", - "link": "", - "access": "", - "also": "", - "save": "true", - "unicode": "true", - "production": "true", - "before": "", - "long": "", - "searchlimit": "20", - "unsafe_perm": "true", - "update_notifier": "true", - "auth_type": "legacy", - "node_version": "14.18.1", - "tag": "latest", - "git_tag_version": "true", - "commit_hooks": "true", - "script_shell": "", - "shrinkwrap": "true", - "fetch_retry_factor": "10", - "save_exact": "", - "strict_ssl": "true", - "dev": "", - "globalconfig": "/var/fc/lang/nodejs14_alinode/etc/npmrc", - "init_module": "/tmp/.npm-init.js", - "parseable": "", - "globalignorefile": "/var/fc/lang/nodejs14_alinode/etc/npmignore", - "cache_lock_retries": "10", - "searchstaleness": "900", - "node_options": "", - "save_prefix": "^", - "scripts_prepend_node_path": "warn-only", - "group": "10001", - "init_author_email": "", - "searchexclude": "", - "git": "git", - "optional": "true", - "json": "" - } -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/include/librdkafka/rdkafka.h b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/include/librdkafka/rdkafka.h deleted file mode 100755 index b85ba909..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/include/librdkafka/rdkafka.h +++ /dev/null @@ -1,7781 +0,0 @@ -/* - * librdkafka - Apache Kafka C library - * - * Copyright (c) 2012-2020 Magnus Edenhill - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file rdkafka.h - * @brief Apache Kafka C/C++ consumer and producer client library. - * - * rdkafka.h contains the public API for librdkafka. - * The API is documented in this file as comments prefixing the function, type, - * enum, define, etc. - * - * @sa For the C++ interface see rdkafkacpp.h - * - * @tableofcontents - */ - - -/* @cond NO_DOC */ -#ifndef _RDKAFKA_H_ -#define _RDKAFKA_H_ - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* Restore indent */ -#endif -#endif - -#ifdef _WIN32 -#include -#ifndef WIN32_MEAN_AND_LEAN -#define WIN32_MEAN_AND_LEAN -#endif -#include /* for sockaddr, .. */ -#ifndef _SSIZE_T_DEFINED -#define _SSIZE_T_DEFINED -typedef SSIZE_T ssize_t; -#endif -#define RD_UNUSED -#define RD_INLINE __inline -#define RD_DEPRECATED __declspec(deprecated) -#define RD_FORMAT(...) -#undef RD_EXPORT -#ifdef LIBRDKAFKA_STATICLIB -#define RD_EXPORT -#else -#ifdef LIBRDKAFKA_EXPORTS -#define RD_EXPORT __declspec(dllexport) -#else -#define RD_EXPORT __declspec(dllimport) -#endif -#ifndef LIBRDKAFKA_TYPECHECKS -#define LIBRDKAFKA_TYPECHECKS 0 -#endif -#endif - -#else -#include /* for sockaddr, .. */ - -#define RD_UNUSED __attribute__((unused)) -#define RD_INLINE inline -#define RD_EXPORT -#define RD_DEPRECATED __attribute__((deprecated)) - -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) -#define RD_FORMAT(...) __attribute__((format (__VA_ARGS__))) -#else -#define RD_FORMAT(...) -#endif - -#ifndef LIBRDKAFKA_TYPECHECKS -#define LIBRDKAFKA_TYPECHECKS 1 -#endif -#endif - - -/** - * @brief Type-checking macros - * Compile-time checking that \p ARG is of type \p TYPE. - * @returns \p RET - */ -#if LIBRDKAFKA_TYPECHECKS -#define _LRK_TYPECHECK(RET,TYPE,ARG) \ - ({ if (0) { TYPE __t RD_UNUSED = (ARG); } RET; }) - -#define _LRK_TYPECHECK2(RET,TYPE,ARG,TYPE2,ARG2) \ - ({ \ - if (0) { \ - TYPE __t RD_UNUSED = (ARG); \ - TYPE2 __t2 RD_UNUSED = (ARG2); \ - } \ - RET; }) - -#define _LRK_TYPECHECK3(RET,TYPE,ARG,TYPE2,ARG2,TYPE3,ARG3) \ - ({ \ - if (0) { \ - TYPE __t RD_UNUSED = (ARG); \ - TYPE2 __t2 RD_UNUSED = (ARG2); \ - TYPE3 __t3 RD_UNUSED = (ARG3); \ - } \ - RET; }) -#else -#define _LRK_TYPECHECK(RET,TYPE,ARG) (RET) -#define _LRK_TYPECHECK2(RET,TYPE,ARG,TYPE2,ARG2) (RET) -#define _LRK_TYPECHECK3(RET,TYPE,ARG,TYPE2,ARG2,TYPE3,ARG3) (RET) -#endif - -/* @endcond */ - - -/** - * @name librdkafka version - * @{ - * - * - */ - -/** - * @brief librdkafka version - * - * Interpreted as hex \c MM.mm.rr.xx: - * - MM = Major - * - mm = minor - * - rr = revision - * - xx = pre-release id (0xff is the final release) - * - * E.g.: \c 0x000801ff = 0.8.1 - * - * @remark This value should only be used during compile time, - * for runtime checks of version use rd_kafka_version() - */ -#define RD_KAFKA_VERSION 0x010802ff - -/** - * @brief Returns the librdkafka version as integer. - * - * @returns Version integer. - * - * @sa See RD_KAFKA_VERSION for how to parse the integer format. - * @sa Use rd_kafka_version_str() to retreive the version as a string. - */ -RD_EXPORT -int rd_kafka_version(void); - -/** - * @brief Returns the librdkafka version as string. - * - * @returns Version string - */ -RD_EXPORT -const char *rd_kafka_version_str (void); - -/**@}*/ - - -/** - * @name Constants, errors, types - * @{ - * - * - */ - - -/** - * @enum rd_kafka_type_t - * - * @brief rd_kafka_t handle type. - * - * @sa rd_kafka_new() - */ -typedef enum rd_kafka_type_t { - RD_KAFKA_PRODUCER, /**< Producer client */ - RD_KAFKA_CONSUMER /**< Consumer client */ -} rd_kafka_type_t; - - -/*! - * Timestamp types - * - * @sa rd_kafka_message_timestamp() - */ -typedef enum rd_kafka_timestamp_type_t { - RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ - RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */ - RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */ -} rd_kafka_timestamp_type_t; - - - -/** - * @brief Retrieve supported debug contexts for use with the \c \"debug\" - * configuration property. (runtime) - * - * @returns Comma-separated list of available debugging contexts. - */ -RD_EXPORT -const char *rd_kafka_get_debug_contexts(void); - -/** - * @brief Supported debug contexts. (compile time) - * - * @deprecated This compile time value may be outdated at runtime due to - * linking another version of the library. - * Use rd_kafka_get_debug_contexts() instead. - */ -#define RD_KAFKA_DEBUG_CONTEXTS \ - "all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp,security,fetch,interceptor,plugin,consumer,admin,eos,mock,assignor,conf" - - -/* @cond NO_DOC */ -/* Private types to provide ABI compatibility */ -typedef struct rd_kafka_s rd_kafka_t; -typedef struct rd_kafka_topic_s rd_kafka_topic_t; -typedef struct rd_kafka_conf_s rd_kafka_conf_t; -typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t; -typedef struct rd_kafka_queue_s rd_kafka_queue_t; -typedef struct rd_kafka_op_s rd_kafka_event_t; -typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t; -typedef struct rd_kafka_consumer_group_metadata_s -rd_kafka_consumer_group_metadata_t; -typedef struct rd_kafka_error_s rd_kafka_error_t; -typedef struct rd_kafka_headers_s rd_kafka_headers_t; -typedef struct rd_kafka_group_result_s rd_kafka_group_result_t; -/* @endcond */ - - -/** - * @enum rd_kafka_resp_err_t - * @brief Error codes. - * - * The negative error codes delimited by two underscores - * (\c RD_KAFKA_RESP_ERR__..) denotes errors internal to librdkafka and are - * displayed as \c \"Local: \\", while the error codes - * delimited by a single underscore (\c RD_KAFKA_RESP_ERR_..) denote broker - * errors and are displayed as \c \"Broker: \\". - * - * @sa Use rd_kafka_err2str() to translate an error code a human readable string - */ -typedef enum { - /* Internal errors to rdkafka: */ - /** Begin internal error codes */ - RD_KAFKA_RESP_ERR__BEGIN = -200, - /** Received message is incorrect */ - RD_KAFKA_RESP_ERR__BAD_MSG = -199, - /** Bad/unknown compression */ - RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198, - /** Broker is going away */ - RD_KAFKA_RESP_ERR__DESTROY = -197, - /** Generic failure */ - RD_KAFKA_RESP_ERR__FAIL = -196, - /** Broker transport failure */ - RD_KAFKA_RESP_ERR__TRANSPORT = -195, - /** Critical system resource */ - RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194, - /** Failed to resolve broker */ - RD_KAFKA_RESP_ERR__RESOLVE = -193, - /** Produced message timed out*/ - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192, - /** Reached the end of the topic+partition queue on - * the broker. Not really an error. - * This event is disabled by default, - * see the `enable.partition.eof` configuration property. */ - RD_KAFKA_RESP_ERR__PARTITION_EOF = -191, - /** Permanent: Partition does not exist in cluster. */ - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190, - /** File or filesystem error */ - RD_KAFKA_RESP_ERR__FS = -189, - /** Permanent: Topic does not exist in cluster. */ - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188, - /** All broker connections are down. */ - RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187, - /** Invalid argument, or invalid configuration */ - RD_KAFKA_RESP_ERR__INVALID_ARG = -186, - /** Operation timed out */ - RD_KAFKA_RESP_ERR__TIMED_OUT = -185, - /** Queue is full */ - RD_KAFKA_RESP_ERR__QUEUE_FULL = -184, - /** ISR count < required.acks */ - RD_KAFKA_RESP_ERR__ISR_INSUFF = -183, - /** Broker node update */ - RD_KAFKA_RESP_ERR__NODE_UPDATE = -182, - /** SSL error */ - RD_KAFKA_RESP_ERR__SSL = -181, - /** Waiting for coordinator to become available. */ - RD_KAFKA_RESP_ERR__WAIT_COORD = -180, - /** Unknown client group */ - RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179, - /** Operation in progress */ - RD_KAFKA_RESP_ERR__IN_PROGRESS = -178, - /** Previous operation in progress, wait for it to finish. */ - RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177, - /** This operation would interfere with an existing subscription */ - RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176, - /** Assigned partitions (rebalance_cb) */ - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175, - /** Revoked partitions (rebalance_cb) */ - RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174, - /** Conflicting use */ - RD_KAFKA_RESP_ERR__CONFLICT = -173, - /** Wrong state */ - RD_KAFKA_RESP_ERR__STATE = -172, - /** Unknown protocol */ - RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171, - /** Not implemented */ - RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170, - /** Authentication failure*/ - RD_KAFKA_RESP_ERR__AUTHENTICATION = -169, - /** No stored offset */ - RD_KAFKA_RESP_ERR__NO_OFFSET = -168, - /** Outdated */ - RD_KAFKA_RESP_ERR__OUTDATED = -167, - /** Timed out in queue */ - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166, - /** Feature not supported by broker */ - RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165, - /** Awaiting cache update */ - RD_KAFKA_RESP_ERR__WAIT_CACHE = -164, - /** Operation interrupted (e.g., due to yield)) */ - RD_KAFKA_RESP_ERR__INTR = -163, - /** Key serialization error */ - RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = -162, - /** Value serialization error */ - RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = -161, - /** Key deserialization error */ - RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = -160, - /** Value deserialization error */ - RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = -159, - /** Partial response */ - RD_KAFKA_RESP_ERR__PARTIAL = -158, - /** Modification attempted on read-only object */ - RD_KAFKA_RESP_ERR__READ_ONLY = -157, - /** No such entry / item not found */ - RD_KAFKA_RESP_ERR__NOENT = -156, - /** Read underflow */ - RD_KAFKA_RESP_ERR__UNDERFLOW = -155, - /** Invalid type */ - RD_KAFKA_RESP_ERR__INVALID_TYPE = -154, - /** Retry operation */ - RD_KAFKA_RESP_ERR__RETRY = -153, - /** Purged in queue */ - RD_KAFKA_RESP_ERR__PURGE_QUEUE = -152, - /** Purged in flight */ - RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = -151, - /** Fatal error: see rd_kafka_fatal_error() */ - RD_KAFKA_RESP_ERR__FATAL = -150, - /** Inconsistent state */ - RD_KAFKA_RESP_ERR__INCONSISTENT = -149, - /** Gap-less ordering would not be guaranteed if proceeding */ - RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = -148, - /** Maximum poll interval exceeded */ - RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = -147, - /** Unknown broker */ - RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = -146, - /** Functionality not configured */ - RD_KAFKA_RESP_ERR__NOT_CONFIGURED = -145, - /** Instance has been fenced */ - RD_KAFKA_RESP_ERR__FENCED = -144, - /** Application generated error */ - RD_KAFKA_RESP_ERR__APPLICATION = -143, - /** Assignment lost */ - RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = -142, - /** No operation performed */ - RD_KAFKA_RESP_ERR__NOOP = -141, - /** No offset to automatically reset to */ - RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = -140, - - /** End internal error codes */ - RD_KAFKA_RESP_ERR__END = -100, - - /* Kafka broker errors: */ - /** Unknown broker error */ - RD_KAFKA_RESP_ERR_UNKNOWN = -1, - /** Success */ - RD_KAFKA_RESP_ERR_NO_ERROR = 0, - /** Offset out of range */ - RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1, - /** Invalid message */ - RD_KAFKA_RESP_ERR_INVALID_MSG = 2, - /** Unknown topic or partition */ - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3, - /** Invalid message size */ - RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4, - /** Leader not available */ - RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5, - /** Not leader for partition */ - RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6, - /** Request timed out */ - RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7, - /** Broker not available */ - RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8, - /** Replica not available */ - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9, - /** Message size too large */ - RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10, - /** StaleControllerEpochCode */ - RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11, - /** Offset metadata string too large */ - RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12, - /** Broker disconnected before response received */ - RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13, - /** Coordinator load in progress */ - RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14, - /** Group coordinator load in progress */ -#define RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS \ - RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS - /** Coordinator not available */ - RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15, - /** Group coordinator not available */ -#define RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE \ - RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE - /** Not coordinator */ - RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16, - /** Not coordinator for group */ -#define RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP \ - RD_KAFKA_RESP_ERR_NOT_COORDINATOR - /** Invalid topic */ - RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17, - /** Message batch larger than configured server segment size */ - RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18, - /** Not enough in-sync replicas */ - RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19, - /** Message(s) written to insufficient number of in-sync replicas */ - RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, - /** Invalid required acks value */ - RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21, - /** Specified group generation id is not valid */ - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22, - /** Inconsistent group protocol */ - RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23, - /** Invalid group.id */ - RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24, - /** Unknown member */ - RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25, - /** Invalid session timeout */ - RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26, - /** Group rebalance in progress */ - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27, - /** Commit offset data size is not valid */ - RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28, - /** Topic authorization failed */ - RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29, - /** Group authorization failed */ - RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30, - /** Cluster authorization failed */ - RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31, - /** Invalid timestamp */ - RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32, - /** Unsupported SASL mechanism */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33, - /** Illegal SASL state */ - RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34, - /** Unuspported version */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35, - /** Topic already exists */ - RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36, - /** Invalid number of partitions */ - RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37, - /** Invalid replication factor */ - RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38, - /** Invalid replica assignment */ - RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39, - /** Invalid config */ - RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40, - /** Not controller for cluster */ - RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41, - /** Invalid request */ - RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42, - /** Message format on broker does not support request */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, - /** Policy violation */ - RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44, - /** Broker received an out of order sequence number */ - RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45, - /** Broker received a duplicate sequence number */ - RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46, - /** Producer attempted an operation with an old epoch */ - RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47, - /** Producer attempted a transactional operation in an invalid state */ - RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48, - /** Producer attempted to use a producer id which is not - * currently assigned to its transactional id */ - RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49, - /** Transaction timeout is larger than the maximum - * value allowed by the broker's max.transaction.timeout.ms */ - RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50, - /** Producer attempted to update a transaction while another - * concurrent operation on the same transaction was ongoing */ - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51, - /** Indicates that the transaction coordinator sending a - * WriteTxnMarker is no longer the current coordinator for a - * given producer */ - RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52, - /** Transactional Id authorization failed */ - RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53, - /** Security features are disabled */ - RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54, - /** Operation not attempted */ - RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55, - /** Disk error when trying to access log file on the disk */ - RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56, - /** The user-specified log directory is not found in the broker config */ - RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57, - /** SASL Authentication failed */ - RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58, - /** Unknown Producer Id */ - RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59, - /** Partition reassignment is in progress */ - RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60, - /** Delegation Token feature is not enabled */ - RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61, - /** Delegation Token is not found on server */ - RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62, - /** Specified Principal is not valid Owner/Renewer */ - RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63, - /** Delegation Token requests are not allowed on this connection */ - RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64, - /** Delegation Token authorization failed */ - RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65, - /** Delegation Token is expired */ - RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66, - /** Supplied principalType is not supported */ - RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67, - /** The group is not empty */ - RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68, - /** The group id does not exist */ - RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69, - /** The fetch session ID was not found */ - RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70, - /** The fetch session epoch is invalid */ - RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71, - /** No matching listener */ - RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72, - /** Topic deletion is disabled */ - RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73, - /** Leader epoch is older than broker epoch */ - RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74, - /** Leader epoch is newer than broker epoch */ - RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75, - /** Unsupported compression type */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76, - /** Broker epoch has changed */ - RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77, - /** Leader high watermark is not caught up */ - RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78, - /** Group member needs a valid member ID */ - RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79, - /** Preferred leader was not available */ - RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80, - /** Consumer group has reached maximum size */ - RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81, - /** Static consumer fenced by other consumer with same - * group.instance.id. */ - RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82, - /** Eligible partition leaders are not available */ - RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83, - /** Leader election not needed for topic partition */ - RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84, - /** No partition reassignment is in progress */ - RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85, - /** Deleting offsets of a topic while the consumer group is - * subscribed to it */ - RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86, - /** Broker failed to validate record */ - RD_KAFKA_RESP_ERR_INVALID_RECORD = 87, - /** There are unstable offsets that need to be cleared */ - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88, - /** Throttling quota has been exceeded */ - RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89, - /** There is a newer producer with the same transactionalId - * which fences the current one */ - RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90, - /** Request illegally referred to resource that does not exist */ - RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91, - /** Request illegally referred to the same resource twice */ - RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92, - /** Requested credential would not meet criteria for acceptability */ - RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93, - /** Indicates that the either the sender or recipient of a - * voter-only request is not one of the expected voters */ - RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94, - /** Invalid update version */ - RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95, - /** Unable to update finalized features due to server error */ - RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96, - /** Request principal deserialization failed during forwarding */ - RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97, - - RD_KAFKA_RESP_ERR_END_ALL, -} rd_kafka_resp_err_t; - - -/** - * @brief Error code value, name and description. - * Typically for use with language bindings to automatically expose - * the full set of librdkafka error codes. - */ -struct rd_kafka_err_desc { - rd_kafka_resp_err_t code;/**< Error code */ - const char *name; /**< Error name, same as code enum sans prefix */ - const char *desc; /**< Human readable error description. */ -}; - - -/** - * @brief Returns the full list of error codes. - */ -RD_EXPORT -void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs, - size_t *cntp); - - - - -/** - * @brief Returns a human readable representation of a kafka error. - * - * @param err Error code to translate - */ -RD_EXPORT -const char *rd_kafka_err2str (rd_kafka_resp_err_t err); - - - -/** - * @brief Returns the error code name (enum name). - * - * @param err Error code to translate - */ -RD_EXPORT -const char *rd_kafka_err2name (rd_kafka_resp_err_t err); - - -/** - * @brief Returns the last error code generated by a legacy API call - * in the current thread. - * - * The legacy APIs are the ones using errno to propagate error value, namely: - * - rd_kafka_topic_new() - * - rd_kafka_consume_start() - * - rd_kafka_consume_stop() - * - rd_kafka_consume() - * - rd_kafka_consume_batch() - * - rd_kafka_consume_callback() - * - rd_kafka_consume_queue() - * - rd_kafka_produce() - * - * The main use for this function is to avoid converting system \p errno - * values to rd_kafka_resp_err_t codes for legacy APIs. - * - * @remark The last error is stored per-thread, if multiple rd_kafka_t handles - * are used in the same application thread the developer needs to - * make sure rd_kafka_last_error() is called immediately after - * a failed API call. - * - * @remark errno propagation from librdkafka is not safe on Windows - * and should not be used, use rd_kafka_last_error() instead. - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_last_error (void); - - -/** - * @brief Converts the system errno value \p errnox to a rd_kafka_resp_err_t - * error code upon failure from the following functions: - * - rd_kafka_topic_new() - * - rd_kafka_consume_start() - * - rd_kafka_consume_stop() - * - rd_kafka_consume() - * - rd_kafka_consume_batch() - * - rd_kafka_consume_callback() - * - rd_kafka_consume_queue() - * - rd_kafka_produce() - * - * @param errnox System errno value to convert - * - * @returns Appropriate error code for \p errnox - * - * @remark A better alternative is to call rd_kafka_last_error() immediately - * after any of the above functions return -1 or NULL. - * - * @deprecated Use rd_kafka_last_error() to retrieve the last error code - * set by the legacy librdkafka APIs. - * - * @sa rd_kafka_last_error() - */ -RD_EXPORT RD_DEPRECATED -rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); - - -/** - * @brief Returns the thread-local system errno - * - * On most platforms this is the same as \p errno but in case of different - * runtimes between library and application (e.g., Windows static DLLs) - * this provides a means for exposing the errno librdkafka uses. - * - * @remark The value is local to the current calling thread. - * - * @deprecated Use rd_kafka_last_error() to retrieve the last error code - * set by the legacy librdkafka APIs. - */ -RD_EXPORT RD_DEPRECATED -int rd_kafka_errno (void); - - - - -/** - * @brief Returns the first fatal error set on this client instance, - * or RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has occurred. - * - * This function is to be used with the Idempotent Producer and \c error_cb - * to detect fatal errors. - * - * Generally all errors raised by \c error_cb are to be considered - * informational and temporary, the client will try to recover from all - * errors in a graceful fashion (by retrying, etc). - * - * However, some errors should logically be considered fatal to retain - * consistency; in particular a set of errors that may occur when using the - * Idempotent Producer and the in-order or exactly-once producer guarantees - * can't be satisfied. - * - * @param rk Client instance. - * @param errstr A human readable error string (nul-terminated) is written to - * this location that must be of at least \p errstr_size bytes. - * The \p errstr is only written to if there is a fatal error. - * @param errstr_size Writable size in \p errstr. - * - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has been raised, else - * any other error code. - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, - char *errstr, size_t errstr_size); - - -/** - * @brief Trigger a fatal error for testing purposes. - * - * Since there is no practical way to trigger real fatal errors in the - * idempotent producer, this method allows an application to trigger - * fabricated fatal errors in tests to check its error handling code. - * - * @param rk Client instance. - * @param err The underlying error code. - * @param reason A human readable error reason. - * Will be prefixed with "test_fatal_error: " to differentiate - * from real fatal errors. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR if a fatal error was triggered, or - * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous fatal error - * has already been triggered. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_test_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason); - - -/** - * @returns the error code for \p error or RD_KAFKA_RESP_ERR_NO_ERROR if - * \p error is NULL. - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_error_code (const rd_kafka_error_t *error); - -/** - * @returns the error code name for \p error, e.g, "ERR_UNKNOWN_MEMBER_ID", - * or an empty string if \p error is NULL. - * - * @remark The lifetime of the returned pointer is the same as the error object. - * - * @sa rd_kafka_err2name() - */ -RD_EXPORT -const char *rd_kafka_error_name (const rd_kafka_error_t *error); - -/** - * @returns a human readable error string for \p error, - * or an empty string if \p error is NULL. - * - * @remark The lifetime of the returned pointer is the same as the error object. - */ -RD_EXPORT -const char *rd_kafka_error_string (const rd_kafka_error_t *error); - - -/** - * @returns 1 if the error is a fatal error, indicating that the client - * instance is no longer usable, else 0 (also if \p error is NULL). - */ -RD_EXPORT -int rd_kafka_error_is_fatal (const rd_kafka_error_t *error); - - -/** - * @returns 1 if the operation may be retried, - * else 0 (also if \p error is NULL). - */ -RD_EXPORT -int rd_kafka_error_is_retriable (const rd_kafka_error_t *error); - - -/** - * @returns 1 if the error is an abortable transaction error in which case - * the application must call rd_kafka_abort_transaction() and - * start a new transaction with rd_kafka_begin_transaction() if it - * wishes to proceed with transactions. - * Else returns 0 (also if \p error is NULL). - * - * @remark The return value of this method is only valid for errors returned - * by the transactional API. - */ -RD_EXPORT -int rd_kafka_error_txn_requires_abort (const rd_kafka_error_t *error); - -/** - * @brief Free and destroy an error object. - * - * @remark As a conveniance it is permitted to pass a NULL \p error. - */ -RD_EXPORT -void rd_kafka_error_destroy (rd_kafka_error_t *error); - - -/** - * @brief Create a new error object with error \p code and optional - * human readable error string in \p fmt. - * - * This method is mainly to be used for mocking errors in application test code. - * - * The returned object must be destroyed with rd_kafka_error_destroy(). - */ -RD_EXPORT -rd_kafka_error_t *rd_kafka_error_new (rd_kafka_resp_err_t code, - const char *fmt, ...) - RD_FORMAT(printf, 2, 3); - - -/** - * @brief Topic+Partition place holder - * - * Generic place holder for a Topic+Partition and its related information - * used for multiple purposes: - * - consumer offset (see rd_kafka_commit(), et.al.) - * - group rebalancing callback (rd_kafka_conf_set_rebalance_cb()) - * - offset commit result callback (rd_kafka_conf_set_offset_commit_cb()) - */ - -/** - * @brief Generic place holder for a specific Topic+Partition. - * - * @sa rd_kafka_topic_partition_list_new() - */ -typedef struct rd_kafka_topic_partition_s { - char *topic; /**< Topic name */ - int32_t partition; /**< Partition */ - int64_t offset; /**< Offset */ - void *metadata; /**< Metadata */ - size_t metadata_size; /**< Metadata size */ - void *opaque; /**< Opaque value for application use */ - rd_kafka_resp_err_t err; /**< Error code, depending on use. */ - void *_private; /**< INTERNAL USE ONLY, - * INITIALIZE TO ZERO, DO NOT TOUCH */ -} rd_kafka_topic_partition_t; - - -/** - * @brief Destroy a rd_kafka_topic_partition_t. - * @remark This must not be called for elements in a topic partition list. - */ -RD_EXPORT -void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar); - - -/** - * @brief A growable list of Topic+Partitions. - * - */ -typedef struct rd_kafka_topic_partition_list_s { - int cnt; /**< Current number of elements */ - int size; /**< Current allocated size */ - rd_kafka_topic_partition_t *elems; /**< Element array[] */ -} rd_kafka_topic_partition_list_t; - - -/** - * @brief Create a new list/vector Topic+Partition container. - * - * @param size Initial allocated size used when the expected number of - * elements is known or can be estimated. - * Avoids reallocation and possibly relocation of the - * elems array. - * - * @returns A newly allocated Topic+Partition list. - * - * @remark Use rd_kafka_topic_partition_list_destroy() to free all resources - * in use by a list and the list itself. - * @sa rd_kafka_topic_partition_list_add() - */ -RD_EXPORT -rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new (int size); - - -/** - * @brief Free all resources used by the list and the list itself. - */ -RD_EXPORT -void -rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rkparlist); - -/** - * @brief Add topic+partition to list - * - * @param rktparlist List to extend - * @param topic Topic name (copied) - * @param partition Partition id - * - * @returns The object which can be used to fill in additionals fields. - */ -RD_EXPORT -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); - - -/** - * @brief Add range of partitions from \p start to \p stop inclusive. - * - * @param rktparlist List to extend - * @param topic Topic name (copied) - * @param start Start partition of range - * @param stop Last partition of range (inclusive) - */ -RD_EXPORT -void -rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t - *rktparlist, - const char *topic, - int32_t start, int32_t stop); - - - -/** - * @brief Delete partition from list. - * - * @param rktparlist List to modify - * @param topic Topic name to match - * @param partition Partition to match - * - * @returns 1 if partition was found (and removed), else 0. - * - * @remark Any held indices to elems[] are unusable after this call returns 1. - */ -RD_EXPORT -int -rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); - - -/** - * @brief Delete partition from list by elems[] index. - * - * @returns 1 if partition was found (and removed), else 0. - * - * @sa rd_kafka_topic_partition_list_del() - */ -RD_EXPORT -int -rd_kafka_topic_partition_list_del_by_idx ( - rd_kafka_topic_partition_list_t *rktparlist, - int idx); - - -/** - * @brief Make a copy of an existing list. - * - * @param src The existing list to copy. - * - * @returns A new list fully populated to be identical to \p src - */ -RD_EXPORT -rd_kafka_topic_partition_list_t * -rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src); - - - - -/** - * @brief Set offset to \p offset for \p topic and \p partition - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or - * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if \p partition was not found - * in the list. - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset ( - rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition, int64_t offset); - - - -/** - * @brief Find element by \p topic and \p partition. - * - * @returns a pointer to the first matching element, or NULL if not found. - */ -RD_EXPORT -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_find ( - const rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); - - -/** - * @brief Sort list using comparator \p cmp. - * - * If \p cmp is NULL the default comparator will be used that - * sorts by ascending topic name and partition. - * - * \p cmp_opaque is provided as the \p cmp_opaque argument to \p cmp. - * - */ -RD_EXPORT void -rd_kafka_topic_partition_list_sort (rd_kafka_topic_partition_list_t *rktparlist, - int (*cmp) (const void *a, const void *b, - void *cmp_opaque), - void *cmp_opaque); - - -/**@}*/ - - - -/** - * @name Var-arg tag types - * @{ - * - */ - -/** - * @enum rd_kafka_vtype_t - * - * @brief Var-arg tag types - * - * @sa rd_kafka_producev() - */ -typedef enum rd_kafka_vtype_t { - RD_KAFKA_VTYPE_END, /**< va-arg sentinel */ - RD_KAFKA_VTYPE_TOPIC, /**< (const char *) Topic name */ - RD_KAFKA_VTYPE_RKT, /**< (rd_kafka_topic_t *) Topic handle */ - RD_KAFKA_VTYPE_PARTITION, /**< (int32_t) Partition */ - RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/ - RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */ - RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Per-message application opaque - * value. This is the same as - * the _private field in - * rd_kafka_message_t, also known - * as the msg_opaque. */ - RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */ - RD_KAFKA_VTYPE_TIMESTAMP, /**< (int64_t) Milliseconds since epoch UTC */ - RD_KAFKA_VTYPE_HEADER, /**< (const char *, const void *, ssize_t) - * Message Header */ - RD_KAFKA_VTYPE_HEADERS, /**< (rd_kafka_headers_t *) Headers list */ -} rd_kafka_vtype_t; - - -/** - * @brief VTYPE + argument container for use with rd_kafka_produce_va() - * - * See RD_KAFKA_V_..() macros below for which union field corresponds - * to which RD_KAFKA_VTYPE_... - */ -typedef struct rd_kafka_vu_s { - rd_kafka_vtype_t vtype; /**< RD_KAFKA_VTYPE_.. */ - /** Value union, see RD_KAFKA_V_.. macros for which field to use. */ - union { - const char *cstr; - rd_kafka_topic_t *rkt; - int i; - int32_t i32; - int64_t i64; - struct { - void *ptr; - size_t size; - } mem; - struct { - const char *name; - const void *val; - ssize_t size; - } header; - rd_kafka_headers_t *headers; - void *ptr; - char _pad[64]; /**< Padding size for future-proofness */ - } u; -} rd_kafka_vu_t; - -/** - * @brief Convenience macros for rd_kafka_vtype_t that takes the - * correct arguments for each vtype. - */ - -/*! - * va-arg end sentinel used to terminate the variable argument list - */ -#define RD_KAFKA_V_END RD_KAFKA_VTYPE_END - -/*! - * Topic name (const char *) - * - * rd_kafka_vu_t field: u.cstr - */ -#define RD_KAFKA_V_TOPIC(topic) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic), \ - (const char *)topic -/*! - * Topic object (rd_kafka_topic_t *) - * - * rd_kafka_vu_t field: u.rkt - */ -#define RD_KAFKA_V_RKT(rkt) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt), \ - (rd_kafka_topic_t *)rkt -/*! - * Partition (int32_t) - * - * rd_kafka_vu_t field: u.i32 - */ -#define RD_KAFKA_V_PARTITION(partition) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition), \ - (int32_t)partition -/*! - * Message value/payload pointer and length (void *, size_t) - * - * rd_kafka_vu_t fields: u.mem.ptr, u.mem.size - */ -#define RD_KAFKA_V_VALUE(VALUE,LEN) \ - _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \ - (void *)VALUE, (size_t)LEN -/*! - * Message key pointer and length (const void *, size_t) - * - * rd_kafka_vu_t field: u.mem.ptr, rd_kafka_vu.t.u.mem.size - */ -#define RD_KAFKA_V_KEY(KEY,LEN) \ - _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \ - (void *)KEY, (size_t)LEN -/*! - * Message opaque pointer (void *) - * Same as \c msg_opaque, \c produce(.., msg_opaque), - * and \c rkmessage->_private . - * - * rd_kafka_vu_t field: u.ptr - */ -#define RD_KAFKA_V_OPAQUE(msg_opaque) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, msg_opaque), \ - (void *)msg_opaque -/*! - * Message flags (int) - * @sa RD_KAFKA_MSG_F_COPY, et.al. - * - * rd_kafka_vu_t field: u.i - */ -#define RD_KAFKA_V_MSGFLAGS(msgflags) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags), \ - (int)msgflags -/*! - * Timestamp in milliseconds since epoch UTC (int64_t). - * A value of 0 will use the current wall-clock time. - * - * rd_kafka_vu_t field: u.i64 - */ -#define RD_KAFKA_V_TIMESTAMP(timestamp) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp), \ - (int64_t)timestamp -/*! - * Add Message Header (const char *NAME, const void *VALUE, ssize_t LEN). - * @sa rd_kafka_header_add() - * @remark RD_KAFKA_V_HEADER() and RD_KAFKA_V_HEADERS() MUST NOT be mixed - * in the same call to producev(). - * - * rd_kafka_vu_t fields: u.header.name, u.header.val, u.header.size - */ -#define RD_KAFKA_V_HEADER(NAME,VALUE,LEN) \ - _LRK_TYPECHECK3(RD_KAFKA_VTYPE_HEADER, const char *, NAME, \ - const void *, VALUE, ssize_t, LEN), \ - (const char *)NAME, (const void *)VALUE, (ssize_t)LEN - -/*! - * Message Headers list (rd_kafka_headers_t *). - * The message object will assume ownership of the headers (unless producev() - * fails). - * Any existing headers will be replaced. - * @sa rd_kafka_message_set_headers() - * @remark RD_KAFKA_V_HEADER() and RD_KAFKA_V_HEADERS() MUST NOT be mixed - * in the same call to producev(). - * - * rd_kafka_vu_t fields: u.headers - */ -#define RD_KAFKA_V_HEADERS(HDRS) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_HEADERS, rd_kafka_headers_t *, HDRS), \ - (rd_kafka_headers_t *)HDRS - - -/**@}*/ - - -/** - * @name Message headers - * @{ - * - * @brief Message headers consist of a list of (string key, binary value) pairs. - * Duplicate keys are supported and the order in which keys were - * added are retained. - * - * Header values are considered binary and may have three types of - * value: - * - proper value with size > 0 and a valid pointer - * - empty value with size = 0 and any non-NULL pointer - * - null value with size = 0 and a NULL pointer - * - * Headers require Apache Kafka broker version v0.11.0.0 or later. - * - * Header operations are O(n). - */ - - -/** - * @brief Create a new headers list. - * - * @param initial_count Preallocate space for this number of headers. - * Any number of headers may be added, updated and - * removed regardless of the initial count. - */ -RD_EXPORT rd_kafka_headers_t *rd_kafka_headers_new (size_t initial_count); - -/** - * @brief Destroy the headers list. The object and any returned value pointers - * are not usable after this call. - */ -RD_EXPORT void rd_kafka_headers_destroy (rd_kafka_headers_t *hdrs); - -/** - * @brief Make a copy of headers list \p src. - */ -RD_EXPORT rd_kafka_headers_t * -rd_kafka_headers_copy (const rd_kafka_headers_t *src); - -/** - * @brief Add header with name \p name and value \p val (copied) of size - * \p size (not including null-terminator). - * - * @param hdrs Headers list. - * @param name Header name. - * @param name_size Header name size (not including the null-terminator). - * If -1 the \p name length is automatically acquired using - * strlen(). - * @param value Pointer to header value, or NULL (set size to 0 or -1). - * @param value_size Size of header value. If -1 the \p value is assumed to be a - * null-terminated string and the length is automatically - * acquired using strlen(). - * - * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, - * else RD_KAFKA_RESP_ERR_NO_ERROR. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_add (rd_kafka_headers_t *hdrs, - const char *name, ssize_t name_size, - const void *value, ssize_t value_size); - -/** - * @brief Remove all headers for the given key (if any). - * - * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, - * RD_KAFKA_RESP_ERR__NOENT if no matching headers were found, - * else RD_KAFKA_RESP_ERR_NO_ERROR if headers were removed. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_remove (rd_kafka_headers_t *hdrs, const char *name); - - -/** - * @brief Find last header in list \p hdrs matching \p name. - * - * @param hdrs Headers list. - * @param name Header to find (last match). - * @param valuep (out) Set to a (null-terminated) const pointer to the value - * (may be NULL). - * @param sizep (out) Set to the value's size (not including null-terminator). - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR if an entry was found, else - * RD_KAFKA_RESP_ERR__NOENT. - * - * @remark The returned pointer in \p valuep includes a trailing null-terminator - * that is not accounted for in \p sizep. - * @remark The returned pointer is only valid as long as the headers list and - * the header item is valid. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, - const char *name, const void **valuep, size_t *sizep); - -/** - * @brief Iterator for headers matching \p name. - * - * Same semantics as rd_kafka_header_get_last() - * - * @param hdrs Headers to iterate. - * @param idx Iterator index, start at 0 and increment by one for each call - * as long as RD_KAFKA_RESP_ERR_NO_ERROR is returned. - * @param name Header name to match. - * @param valuep (out) Set to a (null-terminated) const pointer to the value - * (may be NULL). - * @param sizep (out) Set to the value's size (not including null-terminator). - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_get (const rd_kafka_headers_t *hdrs, size_t idx, - const char *name, const void **valuep, size_t *sizep); - - -/** - * @brief Iterator for all headers. - * - * Same semantics as rd_kafka_header_get() - * - * @sa rd_kafka_header_get() - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_get_all (const rd_kafka_headers_t *hdrs, size_t idx, - const char **namep, - const void **valuep, size_t *sizep); - - - -/**@}*/ - - - -/** - * @name Kafka messages - * @{ - * - */ - - - -// FIXME: This doesn't show up in docs for some reason -// "Compound rd_kafka_message_t is not documented." - -/** - * @brief A Kafka message as returned by the \c rd_kafka_consume*() family - * of functions as well as provided to the Producer \c dr_msg_cb(). - * - * For the consumer this object has two purposes: - * - provide the application with a consumed message. (\c err == 0) - * - report per-topic+partition consumer errors (\c err != 0) - * - * The application must check \c err to decide what action to take. - * - * When the application is finished with a message it must call - * rd_kafka_message_destroy() unless otherwise noted. - */ -typedef struct rd_kafka_message_s { - rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */ - rd_kafka_topic_t *rkt; /**< Topic */ - int32_t partition; /**< Partition */ - void *payload; /**< Producer: original message payload. - * Consumer: Depends on the value of \c err : - * - \c err==0: Message payload. - * - \c err!=0: Error string */ - size_t len; /**< Depends on the value of \c err : - * - \c err==0: Message payload length - * - \c err!=0: Error string length */ - void *key; /**< Depends on the value of \c err : - * - \c err==0: Optional message key */ - size_t key_len; /**< Depends on the value of \c err : - * - \c err==0: Optional message key length*/ - int64_t offset; /**< Consumer: - * - Message offset (or offset for error - * if \c err!=0 if applicable). - * Producer, dr_msg_cb: - * Message offset assigned by broker. - * May be RD_KAFKA_OFFSET_INVALID - * for retried messages when - * idempotence is enabled. */ - void *_private; /**< Consumer: - * - rdkafka private pointer: DO NOT MODIFY - * Producer: - * - dr_msg_cb: - * msg_opaque from produce() call or - * RD_KAFKA_V_OPAQUE from producev(). */ -} rd_kafka_message_t; - - -/** - * @brief Frees resources for \p rkmessage and hands ownership back to rdkafka. - */ -RD_EXPORT -void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage); - - - - -/** - * @brief Returns the error string for an errored rd_kafka_message_t or NULL if - * there was no error. - * - * @remark This function MUST NOT be used with the producer. - */ -RD_EXPORT -const char *rd_kafka_message_errstr (const rd_kafka_message_t *rkmessage); - - -/** - * @brief Returns the message timestamp for a consumed message. - * - * The timestamp is the number of milliseconds since the epoch (UTC). - * - * \p tstype (if not NULL) is updated to indicate the type of timestamp. - * - * @returns message timestamp, or -1 if not available. - * - * @remark Message timestamps require broker version 0.10.0 or later. - */ -RD_EXPORT -int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, - rd_kafka_timestamp_type_t *tstype); - - - -/** - * @brief Returns the latency for a produced message measured from - * the produce() call. - * - * @returns the latency in microseconds, or -1 if not available. - */ -RD_EXPORT -int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage); - - -/** - * @brief Returns the broker id of the broker the message was produced to - * or fetched from. - * - * @returns a broker id if known, else -1. - */ -RD_EXPORT -int32_t rd_kafka_message_broker_id (const rd_kafka_message_t *rkmessage); - - -/** - * @brief Get the message header list. - * - * The returned pointer in \p *hdrsp is associated with the \p rkmessage and - * must not be used after destruction of the message object or the header - * list is replaced with rd_kafka_message_set_headers(). - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR if headers were returned, - * RD_KAFKA_RESP_ERR__NOENT if the message has no headers, - * or another error code if the headers could not be parsed. - * - * @remark Headers require broker version 0.11.0.0 or later. - * - * @remark As an optimization the raw protocol headers are parsed on - * the first call to this function. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_message_headers (const rd_kafka_message_t *rkmessage, - rd_kafka_headers_t **hdrsp); - -/** - * @brief Get the message header list and detach the list from the message - * making the application the owner of the headers. - * The application must eventually destroy the headers using - * rd_kafka_headers_destroy(). - * The message's headers will be set to NULL. - * - * Otherwise same semantics as rd_kafka_message_headers() - * - * @sa rd_kafka_message_headers - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_message_detach_headers (rd_kafka_message_t *rkmessage, - rd_kafka_headers_t **hdrsp); - - -/** - * @brief Replace the message's current headers with a new list. - * - * @param rkmessage The message to set headers. - * @param hdrs New header list. The message object assumes ownership of - * the list, the list will be destroyed automatically with - * the message object. - * The new headers list may be updated until the message object - * is passed or returned to librdkafka. - * - * @remark The existing headers object, if any, will be destroyed. - */ -RD_EXPORT -void rd_kafka_message_set_headers (rd_kafka_message_t *rkmessage, - rd_kafka_headers_t *hdrs); - - -/** - * @brief Returns the number of header key/value pairs - * - * @param hdrs Headers to count - */ -RD_EXPORT size_t rd_kafka_header_cnt (const rd_kafka_headers_t *hdrs); - - -/** - * @enum rd_kafka_msg_status_t - * @brief Message persistence status can be used by the application to - * find out if a produced message was persisted in the topic log. - */ -typedef enum { - /** Message was never transmitted to the broker, or failed with - * an error indicating it was not written to the log. - * Application retry risks ordering, but not duplication. */ - RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0, - - /** Message was transmitted to broker, but no acknowledgement was - * received. - * Application retry risks ordering and duplication. */ - RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1, - - /** Message was written to the log and acknowledged by the broker. - * No reason for application to retry. - * Note: this value should only be trusted with \c acks=all. */ - RD_KAFKA_MSG_STATUS_PERSISTED = 2 -} rd_kafka_msg_status_t; - - -/** - * @brief Returns the message's persistence status in the topic log. - * - * @remark The message status is not available in on_acknowledgement - * interceptors. - */ -RD_EXPORT rd_kafka_msg_status_t -rd_kafka_message_status (const rd_kafka_message_t *rkmessage); - -/**@}*/ - - -/** - * @name Configuration interface - * @{ - * - * @brief Main/global configuration property interface - * - */ - -/** - * @enum rd_kafka_conf_res_t - * @brief Configuration result type - */ -typedef enum { - RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */ - RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value or - * property or value not supported in - * this build. */ - RD_KAFKA_CONF_OK = 0 /**< Configuration okay */ -} rd_kafka_conf_res_t; - - -/** - * @brief Create configuration object. - * - * When providing your own configuration to the \c rd_kafka_*_new_*() calls - * the rd_kafka_conf_t objects needs to be created with this function - * which will set up the defaults. - * I.e.: - * @code - * rd_kafka_conf_t *myconf; - * rd_kafka_conf_res_t res; - * - * myconf = rd_kafka_conf_new(); - * res = rd_kafka_conf_set(myconf, "socket.timeout.ms", "600", - * errstr, sizeof(errstr)); - * if (res != RD_KAFKA_CONF_OK) - * die("%s\n", errstr); - * - * rk = rd_kafka_new(..., myconf); - * @endcode - * - * Please see CONFIGURATION.md for the default settings or use - * rd_kafka_conf_properties_show() to provide the information at runtime. - * - * The properties are identical to the Apache Kafka configuration properties - * whenever possible. - * - * @remark A successful call to rd_kafka_new() will assume ownership of - * the conf object and rd_kafka_conf_destroy() must not be called. - * - * @returns A new rd_kafka_conf_t object with defaults set. - * - * @sa rd_kafka_new(), rd_kafka_conf_set(), rd_kafka_conf_destroy() - */ -RD_EXPORT -rd_kafka_conf_t *rd_kafka_conf_new(void); - - -/** - * @brief Destroys a conf object. - */ -RD_EXPORT -void rd_kafka_conf_destroy(rd_kafka_conf_t *conf); - - -/** - * @brief Creates a copy/duplicate of configuration object \p conf - * - * @remark Interceptors are NOT copied to the new configuration object. - * @sa rd_kafka_interceptor_f_on_conf_dup - */ -RD_EXPORT -rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf); - - -/** - * @brief Same as rd_kafka_conf_dup() but with an array of property name - * prefixes to filter out (ignore) when copying. - */ -RD_EXPORT -rd_kafka_conf_t *rd_kafka_conf_dup_filter (const rd_kafka_conf_t *conf, - size_t filter_cnt, - const char **filter); - - - -/** - * @returns the configuration object used by an rd_kafka_t instance. - * For use with rd_kafka_conf_get(), et.al., to extract configuration - * properties from a running client. - * - * @remark the returned object is read-only and its lifetime is the same - * as the rd_kafka_t object. - */ -RD_EXPORT -const rd_kafka_conf_t *rd_kafka_conf (rd_kafka_t *rk); - - -/** - * @brief Sets a configuration property. - * - * \p conf must have been previously created with rd_kafka_conf_new(). - * - * Fallthrough: - * Topic-level configuration properties may be set using this interface - * in which case they are applied on the \c default_topic_conf. - * If no \c default_topic_conf has been set one will be created. - * Any sub-sequent rd_kafka_conf_set_default_topic_conf() calls will - * replace the current default topic configuration. - * - * @returns \c rd_kafka_conf_res_t to indicate success or failure. - * In case of failure \p errstr is updated to contain a human readable - * error string. - * - * @remark Setting properties or values that were disabled at build time due to - * missing dependencies will return RD_KAFKA_CONF_INVALID. - */ -RD_EXPORT -rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, - const char *name, - const char *value, - char *errstr, size_t errstr_size); - - -/** - * @brief Enable event sourcing. - * \p events is a bitmask of \c RD_KAFKA_EVENT_* of events to enable - * for consumption by `rd_kafka_queue_poll()`. - */ -RD_EXPORT -void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events); - - -/** - * @brief Generic event callback to be used with the event API to trigger - * callbacks for \c rd_kafka_event_t objects from a background - * thread serving the background queue. - * - * How to use: - * 1. First set the event callback on the configuration object with this - * function, followed by creating an rd_kafka_t instance - * with rd_kafka_new(). - * 2. Get the instance's background queue with rd_kafka_queue_get_background() - * and pass it as the reply/response queue to an API that takes an - * event queue, such as rd_kafka_CreateTopics(). - * 3. As the response event is ready and enqueued on the background queue the - * event callback will be triggered from the background thread. - * 4. Prior to destroying the client instance, loose your reference to the - * background queue by calling rd_kafka_queue_destroy(). - * - * The application must destroy the \c rkev passed to \p event cb using - * rd_kafka_event_destroy(). - * - * The \p event_cb \c opaque argument is the opaque set with - * rd_kafka_conf_set_opaque(). - * - * @remark This callback is a specialized alternative to the poll-based - * event API described in the Event interface section. - * - * @remark The \p event_cb will be called spontaneously from a background - * thread completely managed by librdkafka. - * Take care to perform proper locking of application objects. - * - * @warning The application MUST NOT call rd_kafka_destroy() from the - * event callback. - * - * @sa rd_kafka_queue_get_background - */ -RD_EXPORT void -rd_kafka_conf_set_background_event_cb (rd_kafka_conf_t *conf, - void (*event_cb) (rd_kafka_t *rk, - rd_kafka_event_t *rkev, - void *opaque)); - - -/** - * @deprecated See rd_kafka_conf_set_dr_msg_cb() - */ -RD_EXPORT -void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, - void (*dr_cb) (rd_kafka_t *rk, - void *payload, size_t len, - rd_kafka_resp_err_t err, - void *opaque, void *msg_opaque)); - -/** - * @brief \b Producer: Set delivery report callback in provided \p conf object. - * - * The delivery report callback will be called once for each message - * accepted by rd_kafka_produce() (et.al) with \p err set to indicate - * the result of the produce request. - * - * The callback is called when a message is succesfully produced or - * if librdkafka encountered a permanent failure. - * Delivery errors occur when the retry count is exceeded, when the - * message.timeout.ms timeout is exceeded or there is a permanent error - * like RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART. - * - * An application must call rd_kafka_poll() at regular intervals to - * serve queued delivery report callbacks. - * - * The broker-assigned offset can be retrieved with \c rkmessage->offset - * and the timestamp can be retrieved using rd_kafka_message_timestamp(). - * - * The \p dr_msg_cb \c opaque argument is the opaque set with - * rd_kafka_conf_set_opaque(). - * The per-message msg_opaque value is available in - * \c rd_kafka_message_t._private. - * - * @remark The Idempotent Producer may return invalid timestamp - * (RD_KAFKA_TIMESTAMP_NOT_AVAILABLE), and - * and offset (RD_KAFKA_OFFSET_INVALID) for retried messages - * that were previously successfully delivered but not properly - * acknowledged. - */ -RD_EXPORT -void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, - void (*dr_msg_cb) (rd_kafka_t *rk, - const rd_kafka_message_t * - rkmessage, - void *opaque)); - - -/** - * @brief \b Consumer: Set consume callback for use with - * rd_kafka_consumer_poll() - * - * The \p consume_cb \p opaque argument is the opaque set with - * rd_kafka_conf_set_opaque(). - */ -RD_EXPORT -void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, - void (*consume_cb) (rd_kafka_message_t * - rkmessage, - void *opaque)); - -/** - * @brief \b Consumer: Set rebalance callback for use with - * coordinated consumer group balancing. - * - * The \p err field is set to either RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS - * or RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS and 'partitions' - * contains the full partition set that was either assigned or revoked. - * - * Registering a \p rebalance_cb turns off librdkafka's automatic - * partition assignment/revocation and instead delegates that responsibility - * to the application's \p rebalance_cb. - * - * The rebalance callback is responsible for updating librdkafka's - * assignment set based on the two events: RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS - * and RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS but should also be able to handle - * arbitrary rebalancing failures where \p err is neither of those. - * @remark In this latter case (arbitrary error), the application must - * call rd_kafka_assign(rk, NULL) to synchronize state. - * - * For eager/non-cooperative `partition.assignment.strategy` assignors, - * such as `range` and `roundrobin`, the application must use - * rd_kafka_assign() to set or clear the entire assignment. - * For the cooperative assignors, such as `cooperative-sticky`, the application - * must use rd_kafka_incremental_assign() for - * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS and rd_kafka_incremental_unassign() - * for RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. - * - * Without a rebalance callback this is done automatically by librdkafka - * but registering a rebalance callback gives the application flexibility - * in performing other operations along with the assigning/revocation, - * such as fetching offsets from an alternate location (on assign) - * or manually committing offsets (on revoke). - * - * rebalance_cb is always triggered exactly once when a rebalance completes - * with a new assignment, even if that assignment is empty. If an - * eager/non-cooperative assignor is configured, there will eventually be - * exactly one corresponding call to rebalance_cb to revoke these partitions - * (even if empty), whether this is due to a group rebalance or lost - * partitions. In the cooperative case, rebalance_cb will never be called if - * the set of partitions being revoked is empty (whether or not lost). - * - * The callback's \p opaque argument is the opaque set with - * rd_kafka_conf_set_opaque(). - * - * @remark The \p partitions list is destroyed by librdkafka on return - * return from the rebalance_cb and must not be freed or - * saved by the application. - * - * @remark Be careful when modifying the \p partitions list. - * Changing this list should only be done to change the initial - * offsets for each partition. - * But a function like `rd_kafka_position()` might have unexpected - * effects for instance when a consumer gets assigned a partition - * it used to consume at an earlier rebalance. In this case, the - * list of partitions will be updated with the old offset for that - * partition. In this case, it is generally better to pass a copy - * of the list (see `rd_kafka_topic_partition_list_copy()`). - * The result of `rd_kafka_position()` is typically outdated in - * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. - * - * @sa rd_kafka_assign() - * @sa rd_kafka_incremental_assign() - * @sa rd_kafka_incremental_unassign() - * @sa rd_kafka_assignment_lost() - * @sa rd_kafka_rebalance_protocol() - * - * The following example shows the application's responsibilities: - * @code - * static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - * rd_kafka_topic_partition_list_t *partitions, - * void *opaque) { - * - * switch (err) - * { - * case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: - * // application may load offets from arbitrary external - * // storage here and update \p partitions - * if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) - * rd_kafka_incremental_assign(rk, partitions); - * else // EAGER - * rd_kafka_assign(rk, partitions); - * break; - * - * case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: - * if (manual_commits) // Optional explicit manual commit - * rd_kafka_commit(rk, partitions, 0); // sync commit - * - * if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) - * rd_kafka_incremental_unassign(rk, partitions); - * else // EAGER - * rd_kafka_assign(rk, NULL); - * break; - * - * default: - * handle_unlikely_error(err); - * rd_kafka_assign(rk, NULL); // sync state - * break; - * } - * } - * @endcode - * - * @remark The above example lacks error handling for assign calls, see - * the examples/ directory. - */ -RD_EXPORT -void rd_kafka_conf_set_rebalance_cb ( - rd_kafka_conf_t *conf, - void (*rebalance_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque)); - - - -/** - * @brief \b Consumer: Set offset commit callback for use with consumer groups. - * - * The results of automatic or manual offset commits will be scheduled - * for this callback and is served by rd_kafka_consumer_poll(). - * - * If no partitions had valid offsets to commit this callback will be called - * with \p err == RD_KAFKA_RESP_ERR__NO_OFFSET which is not to be considered - * an error. - * - * The \p offsets list contains per-partition information: - * - \c offset: committed offset (attempted) - * - \c err: commit error - * - * The callback's \p opaque argument is the opaque set with - * rd_kafka_conf_set_opaque(). - */ -RD_EXPORT -void rd_kafka_conf_set_offset_commit_cb ( - rd_kafka_conf_t *conf, - void (*offset_commit_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque)); - - -/** - * @brief Set error callback in provided conf object. - * - * The error callback is used by librdkafka to signal warnings and errors - * back to the application. - * - * These errors should generally be considered informational and non-permanent, - * the client will try to recover automatically from all type of errors. - * Given that the client and cluster configuration is correct the - * application should treat these as temporary errors. - * - * \p error_cb will be triggered with \c err set to RD_KAFKA_RESP_ERR__FATAL - * if a fatal error has been raised; in this case use rd_kafka_fatal_error() to - * retrieve the fatal error code and error string, and then begin terminating - * the client instance. - * - * If no \p error_cb is registered, or RD_KAFKA_EVENT_ERROR has not been set - * with rd_kafka_conf_set_events, then the errors will be logged instead. - * - * The callback's \p opaque argument is the opaque set with - * rd_kafka_conf_set_opaque(). - */ -RD_EXPORT -void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, - void (*error_cb) (rd_kafka_t *rk, int err, - const char *reason, - void *opaque)); - -/** - * @brief Set throttle callback. - * - * The throttle callback is used to forward broker throttle times to the - * application for Produce and Fetch (consume) requests. - * - * Callbacks are triggered whenever a non-zero throttle time is returned by - * the broker, or when the throttle time drops back to zero. - * - * An application must call rd_kafka_poll() or rd_kafka_consumer_poll() at - * regular intervals to serve queued callbacks. - * - * The callback's \p opaque argument is the opaque set with - * rd_kafka_conf_set_opaque(). - * - * @remark Requires broker version 0.9.0 or later. - */ -RD_EXPORT -void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf, - void (*throttle_cb) ( - rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int throttle_time_ms, - void *opaque)); - - -/** - * @brief Set logger callback. - * - * The default is to print to stderr, but a syslog logger is also available, - * see rd_kafka_log_print and rd_kafka_log_syslog for the builtin alternatives. - * Alternatively the application may provide its own logger callback. - * Or pass \p func as NULL to disable logging. - * - * This is the configuration alternative to the deprecated rd_kafka_set_logger() - * - * @remark The log_cb will be called spontaneously from librdkafka's internal - * threads unless logs have been forwarded to a poll queue through - * \c rd_kafka_set_log_queue(). - * An application MUST NOT call any librdkafka APIs or do any prolonged - * work in a non-forwarded \c log_cb. - */ -RD_EXPORT -void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, - void (*log_cb) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf)); - - -/** - * @brief Set statistics callback in provided conf object. - * - * The statistics callback is triggered from rd_kafka_poll() every - * \c statistics.interval.ms (needs to be configured separately). - * Function arguments: - * - \p rk - Kafka handle - * - \p json - String containing the statistics data in JSON format - * - \p json_len - Length of \p json string. - * - \p opaque - Application-provided opaque as set by - * rd_kafka_conf_set_opaque(). - * - * For more information on the format of \p json, see - * https://github.com/edenhill/librdkafka/wiki/Statistics - * - * If the application wishes to hold on to the \p json pointer and free - * it at a later time it must return 1 from the \p stats_cb. - * If the application returns 0 from the \p stats_cb then librdkafka - * will immediately free the \p json pointer. - * - * See STATISTICS.md for a full definition of the JSON object. - */ -RD_EXPORT -void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, - int (*stats_cb) (rd_kafka_t *rk, - char *json, - size_t json_len, - void *opaque)); - -/** - * @brief Set SASL/OAUTHBEARER token refresh callback in provided conf object. - * - * @param conf the configuration to mutate. - * @param oauthbearer_token_refresh_cb the callback to set; callback function - * arguments:
- * \p rk - Kafka handle
- * \p oauthbearer_config - Value of configuration property - * sasl.oauthbearer.config. - * \p opaque - Application-provided opaque set via - * rd_kafka_conf_set_opaque() - * - * The SASL/OAUTHBEARER token refresh callback is triggered via rd_kafka_poll() - * whenever OAUTHBEARER is the SASL mechanism and a token needs to be retrieved, - * typically based on the configuration defined in \c sasl.oauthbearer.config. - * - * The callback should invoke rd_kafka_oauthbearer_set_token() - * or rd_kafka_oauthbearer_set_token_failure() to indicate success - * or failure, respectively. - * - * The refresh operation is eventable and may be received via - * rd_kafka_queue_poll() with an event type of - * \c RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH. - * - * Note that before any SASL/OAUTHBEARER broker connection can succeed the - * application must call rd_kafka_oauthbearer_set_token() once -- either - * directly or, more typically, by invoking either rd_kafka_poll() or - * rd_kafka_queue_poll() -- in order to cause retrieval of an initial token to - * occur. - * - * An unsecured JWT refresh handler is provided by librdkafka for development - * and testing purposes, it is enabled by setting - * the \c enable.sasl.oauthbearer.unsecure.jwt property to true and is - * mutually exclusive to using a refresh callback. - */ -RD_EXPORT -void rd_kafka_conf_set_oauthbearer_token_refresh_cb ( - rd_kafka_conf_t *conf, - void (*oauthbearer_token_refresh_cb) (rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque)); - -/** - * @brief Set socket callback. - * - * The socket callback is responsible for opening a socket - * according to the supplied \p domain, \p type and \p protocol. - * The socket shall be created with \c CLOEXEC set in a racefree fashion, if - * possible. - * - * The callback's \p opaque argument is the opaque set with - * rd_kafka_conf_set_opaque(). - * - * Default: - * - on linux: racefree CLOEXEC - * - others : non-racefree CLOEXEC - * - * @remark The callback will be called from an internal librdkafka thread. - */ -RD_EXPORT -void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, - int (*socket_cb) (int domain, int type, - int protocol, - void *opaque)); - - - -/** - * @brief Set connect callback. - * - * The connect callback is responsible for connecting socket \p sockfd - * to peer address \p addr. - * The \p id field contains the broker identifier. - * - * \p connect_cb shall return 0 on success (socket connected) or an error - * number (errno) on error. - * - * The callback's \p opaque argument is the opaque set with - * rd_kafka_conf_set_opaque(). - * - * @remark The callback will be called from an internal librdkafka thread. - */ -RD_EXPORT void -rd_kafka_conf_set_connect_cb (rd_kafka_conf_t *conf, - int (*connect_cb) (int sockfd, - const struct sockaddr *addr, - int addrlen, - const char *id, - void *opaque)); - -/** - * @brief Set close socket callback. - * - * Close a socket (optionally opened with socket_cb()). - * - * The callback's \p opaque argument is the opaque set with - * rd_kafka_conf_set_opaque(). - * - * @remark The callback will be called from an internal librdkafka thread. - */ -RD_EXPORT void -rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf, - int (*closesocket_cb) (int sockfd, - void *opaque)); - - - -#ifndef _WIN32 -/** - * @brief Set open callback. - * - * The open callback is responsible for opening the file specified by - * pathname, flags and mode. - * The file shall be opened with \c CLOEXEC set in a racefree fashion, if - * possible. - * - * Default: - * - on linux: racefree CLOEXEC - * - others : non-racefree CLOEXEC - * - * The callback's \p opaque argument is the opaque set with - * rd_kafka_conf_set_opaque(). - * - * @remark The callback will be called from an internal librdkafka thread. - */ -RD_EXPORT -void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf, - int (*open_cb) (const char *pathname, - int flags, mode_t mode, - void *opaque)); -#endif - - -/** - * @brief Sets the verification callback of the broker certificate - * - * The verification callback is triggered from internal librdkafka threads - * upon connecting to a broker. On each connection attempt the callback - * will be called for each certificate in the broker's certificate chain, - * starting at the root certification, as long as the application callback - * returns 1 (valid certificate). - * \c broker_name and \c broker_id correspond to the broker the connection - * is being made to. - * The \c x509_error argument indicates if OpenSSL's verification of - * the certificate succeed (0) or failed (an OpenSSL error code). - * The application may set the SSL context error code by returning 0 - * from the verify callback and providing a non-zero SSL context error code - * in \c x509_error. - * If the verify callback sets \c x509_error to 0, returns 1, and the - * original \c x509_error was non-zero, the error on the SSL context will - * be cleared. - * \c x509_error is always a valid pointer to an int. - * - * \c depth is the depth of the current certificate in the chain, starting - * at the root certificate. - * - * The certificate itself is passed in binary DER format in \c buf of - * size \c size. - * - * The callback must return 1 if verification succeeds, or - * 0 if verification fails and then write a human-readable error message - * to \c errstr (limited to \c errstr_size bytes, including nul-term). - * - * The callback's \p opaque argument is the opaque set with - * rd_kafka_conf_set_opaque(). - * - * @returns RD_KAFKA_CONF_OK if SSL is supported in this build, else - * RD_KAFKA_CONF_INVALID. - * - * @warning This callback will be called from internal librdkafka threads. - * - * @remark See in the OpenSSL source distribution - * for a list of \p x509_error codes. - */ -RD_EXPORT -rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb ( - rd_kafka_conf_t *conf, - int (*ssl_cert_verify_cb) (rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - char *errstr, size_t errstr_size, - void *opaque)); - - -/** - * @enum rd_kafka_cert_type_t - * - * @brief SSL certificate type - * - * @sa rd_kafka_conf_set_ssl_cert - */ -typedef enum rd_kafka_cert_type_t { - RD_KAFKA_CERT_PUBLIC_KEY, /**< Client's public key */ - RD_KAFKA_CERT_PRIVATE_KEY, /**< Client's private key */ - RD_KAFKA_CERT_CA, /**< CA certificate */ - RD_KAFKA_CERT__CNT, -} rd_kafka_cert_type_t; - -/** - * @enum rd_kafka_cert_enc_t - * - * @brief SSL certificate encoding - * - * @sa rd_kafka_conf_set_ssl_cert - */ -typedef enum rd_kafka_cert_enc_t { - RD_KAFKA_CERT_ENC_PKCS12, /**< PKCS#12 */ - RD_KAFKA_CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ - RD_KAFKA_CERT_ENC_PEM, /**< PEM */ - RD_KAFKA_CERT_ENC__CNT, -} rd_kafka_cert_enc_t; - - -/** - * @brief Set certificate/key \p cert_type from the \p cert_enc encoded - * memory at \p buffer of \p size bytes. - * - * @param conf Configuration object. - * @param cert_type Certificate or key type to configure. - * @param cert_enc Buffer \p encoding type. - * @param buffer Memory pointer to encoded certificate or key. - * The memory is not referenced after this function returns. - * @param size Size of memory at \p buffer. - * @param errstr Memory were a human-readable error string will be written - * on failure. - * @param errstr_size Size of \p errstr, including space for nul-terminator. - * - * @returns RD_KAFKA_CONF_OK on success or RD_KAFKA_CONF_INVALID if the - * memory in \p buffer is of incorrect encoding, or if librdkafka - * was not built with SSL support. - * - * @remark Calling this method multiple times with the same \p cert_type - * will replace the previous value. - * - * @remark Calling this method with \p buffer set to NULL will clear the - * configuration for \p cert_type. - * - * @remark The private key may require a password, which must be specified - * with the `ssl.key.password` configuration property prior to - * calling this function. - * - * @remark Private and public keys in PEM format may also be set with the - * `ssl.key.pem` and `ssl.certificate.pem` configuration properties. - * - * @remark CA certificate in PEM format may also be set with the - * `ssl.ca.pem` configuration property. - */ -RD_EXPORT rd_kafka_conf_res_t -rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, - rd_kafka_cert_type_t cert_type, - rd_kafka_cert_enc_t cert_enc, - const void *buffer, size_t size, - char *errstr, size_t errstr_size); - - -/** - * @brief Set callback_data for OpenSSL engine. - * - * @param conf Configuration object. - * @param callback_data passed to engine callbacks, - * e.g. \c ENGINE_load_ssl_client_cert. - * - * @remark The \c ssl.engine.location configuration must be set for this - * to have affect. - * - * @remark The memory pointed to by \p value must remain valid for the - * lifetime of the configuration object and any Kafka clients that - * use it. - */ -RD_EXPORT -void rd_kafka_conf_set_engine_callback_data (rd_kafka_conf_t *conf, - void *callback_data); - - -/** - * @brief Sets the application's opaque pointer that will be passed to callbacks - * - * @sa rd_kafka_opaque() - */ -RD_EXPORT -void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque); - -/** - * @brief Retrieves the opaque pointer previously set - * with rd_kafka_conf_set_opaque() - */ -RD_EXPORT -void *rd_kafka_opaque(const rd_kafka_t *rk); - - - -/** - * @brief Sets the default topic configuration to use for automatically - * subscribed topics (e.g., through pattern-matched topics). - * The topic config object is not usable after this call. - * - * @warning Any topic configuration settings that have been set on the - * global rd_kafka_conf_t object will be overwritten by this call - * since the implicitly created default topic config object is - * replaced by the user-supplied one. - * - * @deprecated Set default topic level configuration on the - * global rd_kafka_conf_t object instead. - */ -RD_EXPORT -void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf); - -/** - * @brief Gets the default topic configuration as previously set with - * rd_kafka_conf_set_default_topic_conf() or that was implicitly created - * by configuring a topic-level property on the global \p conf object. - * - * @returns the \p conf's default topic configuration (if any), or NULL. - * - * @warning The returned topic configuration object is owned by the \p conf - * object. It may be modified but not destroyed and its lifetime is - * the same as the \p conf object or the next call to - * rd_kafka_conf_set_default_topic_conf(). - */ -RD_EXPORT rd_kafka_topic_conf_t * -rd_kafka_conf_get_default_topic_conf (rd_kafka_conf_t *conf); - - -/** - * @brief Retrieve configuration value for property \p name. - * - * If \p dest is non-NULL the value will be written to \p dest with at - * most \p dest_size. - * - * \p *dest_size is updated to the full length of the value, thus if - * \p *dest_size initially is smaller than the full length the application - * may reallocate \p dest to fit the returned \p *dest_size and try again. - * - * If \p dest is NULL only the full length of the value is returned. - * - * Fallthrough: - * Topic-level configuration properties from the \c default_topic_conf - * may be retrieved using this interface. - * - * @returns \p RD_KAFKA_CONF_OK if the property name matched, else - * \p RD_KAFKA_CONF_UNKNOWN. - */ -RD_EXPORT -rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, - const char *name, - char *dest, size_t *dest_size); - - -/** - * @brief Retrieve topic configuration value for property \p name. - * - * @sa rd_kafka_conf_get() - */ -RD_EXPORT -rd_kafka_conf_res_t rd_kafka_topic_conf_get (const rd_kafka_topic_conf_t *conf, - const char *name, - char *dest, size_t *dest_size); - - -/** - * @brief Dump the configuration properties and values of \p conf to an array - * with \"key\", \"value\" pairs. - * - * The number of entries in the array is returned in \p *cntp. - * - * The dump must be freed with `rd_kafka_conf_dump_free()`. - */ -RD_EXPORT -const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp); - - -/** - * @brief Dump the topic configuration properties and values of \p conf - * to an array with \"key\", \"value\" pairs. - * - * The number of entries in the array is returned in \p *cntp. - * - * The dump must be freed with `rd_kafka_conf_dump_free()`. - */ -RD_EXPORT -const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, - size_t *cntp); - -/** - * @brief Frees a configuration dump returned from `rd_kafka_conf_dump()` or - * `rd_kafka_topic_conf_dump(). - */ -RD_EXPORT -void rd_kafka_conf_dump_free(const char **arr, size_t cnt); - -/** - * @brief Prints a table to \p fp of all supported configuration properties, - * their default values as well as a description. - * - * @remark All properties and properties and values are shown, even those - * that have been disabled at build time due to missing dependencies. - */ -RD_EXPORT -void rd_kafka_conf_properties_show(FILE *fp); - -/**@}*/ - - -/** - * @name Topic configuration - * @{ - * - * @brief Topic configuration property interface - * - */ - - -/** - * @brief Create topic configuration object - * - * @sa Same semantics as for rd_kafka_conf_new(). - */ -RD_EXPORT -rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void); - - -/** - * @brief Creates a copy/duplicate of topic configuration object \p conf. - */ -RD_EXPORT -rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t - *conf); - -/** - * @brief Creates a copy/duplicate of \p rk 's default topic configuration - * object. - */ -RD_EXPORT -rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup (rd_kafka_t *rk); - - -/** - * @brief Destroys a topic conf object. - */ -RD_EXPORT -void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf); - - -/** - * @brief Sets a single rd_kafka_topic_conf_t value by property name. - * - * \p topic_conf should have been previously set up - * with `rd_kafka_topic_conf_new()`. - * - * @returns rd_kafka_conf_res_t to indicate success or failure. - */ -RD_EXPORT -rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, - const char *name, - const char *value, - char *errstr, size_t errstr_size); - -/** - * @brief Sets the application's opaque pointer that will be passed to all topic - * callbacks as the \c rkt_opaque argument. - * - * @sa rd_kafka_topic_opaque() - */ -RD_EXPORT -void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, - void *rkt_opaque); - - -/** - * @brief \b Producer: Set partitioner callback in provided topic conf object. - * - * The partitioner may be called in any thread at any time, - * it may be called multiple times for the same message/key. - * - * The callback's \p rkt_opaque argument is the opaque set by - * rd_kafka_topic_conf_set_opaque(). - * The callback's \p msg_opaque argument is the per-message opaque - * passed to produce(). - * - * Partitioner function constraints: - * - MUST NOT call any rd_kafka_*() functions except: - * rd_kafka_topic_partition_available() - * - MUST NOT block or execute for prolonged periods of time. - * - MUST return a value between 0 and partition_cnt-1, or the - * special \c RD_KAFKA_PARTITION_UA value if partitioning - * could not be performed. - */ -RD_EXPORT -void -rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf, - int32_t (*partitioner) ( - const rd_kafka_topic_t *rkt, - const void *keydata, - size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque)); - - -/** - * @brief \b Producer: Set message queueing order comparator callback. - * - * The callback may be called in any thread at any time, - * it may be called multiple times for the same message. - * - * Ordering comparator function constraints: - * - MUST be stable sort (same input gives same output). - * - MUST NOT call any rd_kafka_*() functions. - * - MUST NOT block or execute for prolonged periods of time. - * - * The comparator shall compare the two messages and return: - * - < 0 if message \p a should be inserted before message \p b. - * - >=0 if message \p a should be inserted after message \p b. - * - * @remark Insert sorting will be used to enqueue the message in the - * correct queue position, this comes at a cost of O(n). - * - * @remark If `queuing.strategy=fifo` new messages are enqueued to the - * tail of the queue regardless of msg_order_cmp, but retried messages - * are still affected by msg_order_cmp. - * - * @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL, - * DO NOT USE IN PRODUCTION. - */ -RD_EXPORT void -rd_kafka_topic_conf_set_msg_order_cmp (rd_kafka_topic_conf_t *topic_conf, - int (*msg_order_cmp) ( - const rd_kafka_message_t *a, - const rd_kafka_message_t *b)); - - -/** - * @brief Check if partition is available (has a leader broker). - * - * @returns 1 if the partition is available, else 0. - * - * @warning This function must only be called from inside a partitioner function - */ -RD_EXPORT -int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, - int32_t partition); - - -/******************************************************************* - * * - * Partitioners provided by rdkafka * - * * - *******************************************************************/ - -/** - * @brief Random partitioner. - * - * Will try not to return unavailable partitions. - * - * The \p rkt_opaque argument is the opaque set by - * rd_kafka_topic_conf_set_opaque(). - * The \p msg_opaque argument is the per-message opaque - * passed to produce(). - * - * @returns a random partition between 0 and \p partition_cnt - 1. - * - */ -RD_EXPORT -int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, void *msg_opaque); - -/** - * @brief Consistent partitioner. - * - * Uses consistent hashing to map identical keys onto identical partitions. - * - * The \p rkt_opaque argument is the opaque set by - * rd_kafka_topic_conf_set_opaque(). - * The \p msg_opaque argument is the per-message opaque - * passed to produce(). - * - * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on - * the CRC value of the key - */ -RD_EXPORT -int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, void *msg_opaque); - -/** - * @brief Consistent-Random partitioner. - * - * This is the default partitioner. - * Uses consistent hashing to map identical keys onto identical partitions, and - * messages without keys will be assigned via the random partitioner. - * - * The \p rkt_opaque argument is the opaque set by - * rd_kafka_topic_conf_set_opaque(). - * The \p msg_opaque argument is the per-message opaque - * passed to produce(). - * - * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on - * the CRC value of the key (if provided) - */ -RD_EXPORT -int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, void *msg_opaque); - - -/** - * @brief Murmur2 partitioner (Java compatible). - * - * Uses consistent hashing to map identical keys onto identical partitions - * using Java-compatible Murmur2 hashing. - * - * The \p rkt_opaque argument is the opaque set by - * rd_kafka_topic_conf_set_opaque(). - * The \p msg_opaque argument is the per-message opaque - * passed to produce(). - * - * @returns a partition between 0 and \p partition_cnt - 1. - */ -RD_EXPORT -int32_t rd_kafka_msg_partitioner_murmur2 (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); - -/** - * @brief Consistent-Random Murmur2 partitioner (Java compatible). - * - * Uses consistent hashing to map identical keys onto identical partitions - * using Java-compatible Murmur2 hashing. - * Messages without keys will be assigned via the random partitioner. - * - * The \p rkt_opaque argument is the opaque set by - * rd_kafka_topic_conf_set_opaque(). - * The \p msg_opaque argument is the per-message opaque - * passed to produce(). - * - * @returns a partition between 0 and \p partition_cnt - 1. - */ -RD_EXPORT -int32_t rd_kafka_msg_partitioner_murmur2_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); - - -/** - * @brief FNV-1a partitioner. - * - * Uses consistent hashing to map identical keys onto identical partitions - * using FNV-1a hashing. - * - * The \p rkt_opaque argument is the opaque set by - * rd_kafka_topic_conf_set_opaque(). - * The \p msg_opaque argument is the per-message opaque - * passed to produce(). - * - * @returns a partition between 0 and \p partition_cnt - 1. - */ -RD_EXPORT -int32_t rd_kafka_msg_partitioner_fnv1a (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); - - -/** - * @brief Consistent-Random FNV-1a partitioner. - * - * Uses consistent hashing to map identical keys onto identical partitions - * using FNV-1a hashing. - * Messages without keys will be assigned via the random partitioner. - * - * The \p rkt_opaque argument is the opaque set by - * rd_kafka_topic_conf_set_opaque(). - * The \p msg_opaque argument is the per-message opaque - * passed to produce(). - * - * @returns a partition between 0 and \p partition_cnt - 1. - */ -RD_EXPORT -int32_t rd_kafka_msg_partitioner_fnv1a_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); - - -/**@}*/ - - - -/** - * @name Main Kafka and Topic object handles - * @{ - * - * - */ - - - - -/** - * @brief Creates a new Kafka handle and starts its operation according to the - * specified \p type (\p RD_KAFKA_CONSUMER or \p RD_KAFKA_PRODUCER). - * - * \p conf is an optional struct created with `rd_kafka_conf_new()` that will - * be used instead of the default configuration. - * The \p conf object is freed by this function on success and must not be used - * or destroyed by the application sub-sequently. - * See `rd_kafka_conf_set()` et.al for more information. - * - * \p errstr must be a pointer to memory of at least size \p errstr_size where - * `rd_kafka_new()` may write a human readable error message in case the - * creation of a new handle fails. In which case the function returns NULL. - * - * @remark \b RD_KAFKA_CONSUMER: When a new \p RD_KAFKA_CONSUMER - * rd_kafka_t handle is created it may either operate in the - * legacy simple consumer mode using the rd_kafka_consume_start() - * interface, or the High-level KafkaConsumer API. - * @remark An application must only use one of these groups of APIs on a given - * rd_kafka_t RD_KAFKA_CONSUMER handle. - - * - * @returns The Kafka handle on success or NULL on error (see \p errstr) - * - * @sa To destroy the Kafka handle, use rd_kafka_destroy(). - */ -RD_EXPORT -rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, - char *errstr, size_t errstr_size); - - -/** - * @brief Destroy Kafka handle. - * - * @remark This is a blocking operation. - * @remark rd_kafka_consumer_close() will be called from this function - * if the instance type is RD_KAFKA_CONSUMER, a \c group.id was - * configured, and the rd_kafka_consumer_close() was not - * explicitly called by the application. This in turn may - * trigger consumer callbacks, such as rebalance_cb. - * Use rd_kafka_destroy_flags() with - * RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE to avoid this behaviour. - * - * @sa rd_kafka_destroy_flags() - */ -RD_EXPORT -void rd_kafka_destroy(rd_kafka_t *rk); - - -/** - * @brief Destroy Kafka handle according to specified destroy flags - * - */ -RD_EXPORT -void rd_kafka_destroy_flags (rd_kafka_t *rk, int flags); - -/** - * @brief Flags for rd_kafka_destroy_flags() - */ - -/*! - * Don't call consumer_close() to leave group and commit final offsets. - * - * This also disables consumer callbacks to be called from rd_kafka_destroy*(), - * such as rebalance_cb. - * - * The consumer group handler is still closed internally, but from an - * application perspective none of the functionality from consumer_close() - * is performed. - */ -#define RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE 0x8 - - - -/** - * @brief Returns Kafka handle name. - */ -RD_EXPORT -const char *rd_kafka_name(const rd_kafka_t *rk); - - -/** - * @brief Returns Kafka handle type. - */ -RD_EXPORT -rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk); - - -/** - * @brief Returns this client's broker-assigned group member id. - * - * @remark This currently requires the high-level KafkaConsumer - * - * @returns An allocated string containing the current broker-assigned group - * member id, or NULL if not available. - * The application must free the string with \p free() or - * rd_kafka_mem_free() - */ -RD_EXPORT -char *rd_kafka_memberid (const rd_kafka_t *rk); - - - -/** - * @brief Returns the ClusterId as reported in broker metadata. - * - * @param rk Client instance. - * @param timeout_ms If there is no cached value from metadata retrieval - * then this specifies the maximum amount of time - * (in milliseconds) the call will block waiting - * for metadata to be retrieved. - * Use 0 for non-blocking calls. - - * @remark Requires broker version >=0.10.0 and api.version.request=true. - * - * @remark The application must free the returned pointer - * using rd_kafka_mem_free(). - * - * @returns a newly allocated string containing the ClusterId, or NULL - * if no ClusterId could be retrieved in the allotted timespan. - */ -RD_EXPORT -char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms); - - -/** - * @brief Returns the current ControllerId as reported in broker metadata. - * - * @param rk Client instance. - * @param timeout_ms If there is no cached value from metadata retrieval - * then this specifies the maximum amount of time - * (in milliseconds) the call will block waiting - * for metadata to be retrieved. - * Use 0 for non-blocking calls. - - * @remark Requires broker version >=0.10.0 and api.version.request=true. - * - * @returns the controller broker id (>= 0), or -1 if no ControllerId could be - * retrieved in the allotted timespan. - */ -RD_EXPORT -int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms); - - -/** - * @brief Creates a new topic handle for topic named \p topic. - * - * \p conf is an optional configuration for the topic created with - * `rd_kafka_topic_conf_new()` that will be used instead of the default - * topic configuration. - * The \p conf object is freed by this function and must not be used or - * destroyed by the application sub-sequently. - * See `rd_kafka_topic_conf_set()` et.al for more information. - * - * Topic handles are refcounted internally and calling rd_kafka_topic_new() - * again with the same topic name will return the previous topic handle - * without updating the original handle's configuration. - * Applications must eventually call rd_kafka_topic_destroy() for each - * succesfull call to rd_kafka_topic_new() to clear up resources. - * - * @returns the new topic handle or NULL on error (use rd_kafka_errno2err() - * to convert system \p errno to an rd_kafka_resp_err_t error code. - * - * @sa rd_kafka_topic_destroy() - */ -RD_EXPORT -rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, - rd_kafka_topic_conf_t *conf); - - - -/** - * @brief Loose application's topic handle refcount as previously created - * with `rd_kafka_topic_new()`. - * - * @remark Since topic objects are refcounted (both internally and for the app) - * the topic object might not actually be destroyed by this call, - * but the application must consider the object destroyed. - */ -RD_EXPORT -void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt); - - -/** - * @brief Returns the topic name. - */ -RD_EXPORT -const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt); - - -/** - * @brief Get the \p rkt_opaque pointer that was set in the topic configuration - * with rd_kafka_topic_conf_set_opaque(). - */ -RD_EXPORT -void *rd_kafka_topic_opaque (const rd_kafka_topic_t *rkt); - - -/** - * @brief Unassigned partition. - * - * The unassigned partition is used by the producer API for messages - * that should be partitioned using the configured or default partitioner. - */ -#define RD_KAFKA_PARTITION_UA ((int32_t)-1) - - -/** - * @brief Polls the provided kafka handle for events. - * - * Events will cause application provided callbacks to be called. - * - * The \p timeout_ms argument specifies the maximum amount of time - * (in milliseconds) that the call will block waiting for events. - * For non-blocking calls, provide 0 as \p timeout_ms. - * To wait indefinately for an event, provide -1. - * - * @remark An application should make sure to call poll() at regular - * intervals to serve any queued callbacks waiting to be called. - * @remark If your producer doesn't have any callback set (in particular - * via rd_kafka_conf_set_dr_msg_cb or rd_kafka_conf_set_error_cb) - * you might chose not to call poll(), though this is not - * recommended. - * - * Events: - * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer] - * - error callbacks (rd_kafka_conf_set_error_cb()) [all] - * - stats callbacks (rd_kafka_conf_set_stats_cb()) [all] - * - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all] - * - OAUTHBEARER token refresh callbacks (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all] - * - * @returns the number of events served. - */ -RD_EXPORT -int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms); - - -/** - * @brief Cancels the current callback dispatcher (rd_kafka_poll(), - * rd_kafka_consume_callback(), etc). - * - * A callback may use this to force an immediate return to the calling - * code (caller of e.g. rd_kafka_poll()) without processing any further - * events. - * - * @remark This function MUST ONLY be called from within a librdkafka callback. - */ -RD_EXPORT -void rd_kafka_yield (rd_kafka_t *rk); - - - - -/** - * @brief Pause producing or consumption for the provided list of partitions. - * - * Success or error is returned per-partition \p err in the \p partitions list. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_pause_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); - - - -/** - * @brief Resume producing consumption for the provided list of partitions. - * - * Success or error is returned per-partition \p err in the \p partitions list. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_resume_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); - - - - -/** - * @brief Query broker for low (oldest/beginning) and high (newest/end) offsets - * for partition. - * - * Offsets are returned in \p *low and \p *high respectively. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_query_watermark_offsets (rd_kafka_t *rk, - const char *topic, int32_t partition, - int64_t *low, int64_t *high, int timeout_ms); - - -/** - * @brief Get last known low (oldest/beginning) and high (newest/end) offsets - * for partition. - * - * The low offset is updated periodically (if statistics.interval.ms is set) - * while the high offset is updated on each fetched message set from the broker. - * - * If there is no cached offset (either low or high, or both) then - * RD_KAFKA_OFFSET_INVALID will be returned for the respective offset. - * - * Offsets are returned in \p *low and \p *high respectively. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. - * - * @remark Shall only be used with an active consumer instance. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_get_watermark_offsets (rd_kafka_t *rk, - const char *topic, int32_t partition, - int64_t *low, int64_t *high); - - - -/** - * @brief Look up the offsets for the given partitions by timestamp. - * - * The returned offset for each partition is the earliest offset whose - * timestamp is greater than or equal to the given timestamp in the - * corresponding partition. - * - * The timestamps to query are represented as \c offset in \p offsets - * on input, and \c offset will contain the offset on output. - * - * The function will block for at most \p timeout_ms milliseconds. - * - * @remark Duplicate Topic+Partitions are not supported. - * @remark Per-partition errors may be returned in \c rd_kafka_topic_partition_t.err - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR if offsets were be queried (do note - * that per-partition errors might be set), - * RD_KAFKA_RESP_ERR__TIMED_OUT if not all offsets could be fetched - * within \p timeout_ms, - * RD_KAFKA_RESP_ERR__INVALID_ARG if the \p offsets list is empty, - * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if all partitions are unknown, - * RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE if unable to query leaders - * for the given partitions. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_offsets_for_times (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *offsets, - int timeout_ms); - - - -/** - * @brief Allocate and zero memory using the same allocator librdkafka uses. - * - * This is typically an abstraction for the calloc(3) call and makes sure - * the application can use the same memory allocator as librdkafka for - * allocating pointers that are used by librdkafka. - * - * \p rk can be set to return memory allocated by a specific \c rk instance - * otherwise pass NULL for \p rk. - * - * @remark Memory allocated by rd_kafka_mem_calloc() must be freed using - * rd_kafka_mem_free() - */ -RD_EXPORT -void *rd_kafka_mem_calloc (rd_kafka_t *rk, size_t num, size_t size); - - - -/** - * @brief Allocate memory using the same allocator librdkafka uses. - * - * This is typically an abstraction for the malloc(3) call and makes sure - * the application can use the same memory allocator as librdkafka for - * allocating pointers that are used by librdkafka. - * - * \p rk can be set to return memory allocated by a specific \c rk instance - * otherwise pass NULL for \p rk. - * - * @remark Memory allocated by rd_kafka_mem_malloc() must be freed using - * rd_kafka_mem_free() - */ -RD_EXPORT -void *rd_kafka_mem_malloc (rd_kafka_t *rk, size_t size); - - - -/** - * @brief Free pointer returned by librdkafka - * - * This is typically an abstraction for the free(3) call and makes sure - * the application can use the same memory allocator as librdkafka for - * freeing pointers returned by librdkafka. - * - * In standard setups it is usually not necessary to use this interface - * rather than the free(3) functione. - * - * \p rk must be set for memory returned by APIs that take an \c rk argument, - * for other APIs pass NULL for \p rk. - * - * @remark rd_kafka_mem_free() must only be used for pointers returned by APIs - * that explicitly mention using this function for freeing. - */ -RD_EXPORT -void rd_kafka_mem_free (rd_kafka_t *rk, void *ptr); - - -/**@}*/ - - - - - -/** - * @name Queue API - * @{ - * - * Message queues allows the application to re-route consumed messages - * from multiple topic+partitions into one single queue point. - * This queue point containing messages from a number of topic+partitions - * may then be served by a single rd_kafka_consume*_queue() call, - * rather than one call per topic+partition combination. - */ - - -/** - * @brief Create a new message queue. - * - * See rd_kafka_consume_start_queue(), rd_kafka_consume_queue(), et.al. - */ -RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk); - -/** - * Destroy a queue, purging all of its enqueued messages. - */ -RD_EXPORT -void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu); - - -/** - * @returns a reference to the main librdkafka event queue. - * This is the queue served by rd_kafka_poll(). - * - * Use rd_kafka_queue_destroy() to loose the reference. - */ -RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk); - - -/** - * @returns a reference to the librdkafka consumer queue. - * This is the queue served by rd_kafka_consumer_poll(). - * - * Use rd_kafka_queue_destroy() to loose the reference. - * - * @remark rd_kafka_queue_destroy() MUST be called on this queue - * prior to calling rd_kafka_consumer_close(). - */ -RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk); - -/** - * @returns a reference to the partition's queue, or NULL if - * partition is invalid. - * - * Use rd_kafka_queue_destroy() to loose the reference. - * - * @remark rd_kafka_queue_destroy() MUST be called on this queue - * - * @remark This function only works on consumers. - */ -RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk, - const char *topic, - int32_t partition); - -/** - * @returns a reference to the background thread queue, or NULL if the - * background queue is not enabled. - * - * To enable the background thread queue set a generic event handler callback - * with rd_kafka_conf_set_background_event_cb() on the client instance - * configuration object (rd_kafka_conf_t). - * - * The background queue is polled and served by librdkafka and MUST NOT be - * polled, forwarded, or otherwise managed by the application, it may only - * be used as the destination queue passed to queue-enabled APIs, such as - * the Admin API. - * - * The background thread queue provides the application with an automatically - * polled queue that triggers the event callback in a background thread, - * this background thread is completely managed by librdkafka. - * - * Use rd_kafka_queue_destroy() to loose the reference. - * - * @warning The background queue MUST NOT be read from (polled, consumed, etc), - * or forwarded from. - */ -RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_background (rd_kafka_t *rk); - - -/** - * @brief Forward/re-route queue \p src to \p dst. - * If \p dst is \c NULL the forwarding is removed. - * - * The internal refcounts for both queues are increased. - * - * @remark Regardless of whether \p dst is NULL or not, after calling this - * function, \p src will not forward it's fetch queue to the consumer - * queue. - */ -RD_EXPORT -void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst); - -/** - * @brief Forward librdkafka logs (and debug) to the specified queue - * for serving with one of the ..poll() calls. - * - * This allows an application to serve log callbacks (\c log_cb) - * in its thread of choice. - * - * @param rk Client instance. - * @param rkqu Queue to forward logs to. If the value is NULL the logs - * are forwarded to the main queue. - * - * @remark The configuration property \c log.queue MUST also be set to true. - * - * @remark librdkafka maintains its own reference to the provided queue. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_set_log_queue (rd_kafka_t *rk, - rd_kafka_queue_t *rkqu); - - -/** - * @returns the current number of elements in queue. - */ -RD_EXPORT -size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu); - - -/** - * @brief Enable IO event triggering for queue. - * - * To ease integration with IO based polling loops this API - * allows an application to create a separate file-descriptor - * that librdkafka will write \p payload (of size \p size) to - * whenever a new element is enqueued on a previously empty queue. - * - * To remove event triggering call with \p fd = -1. - * - * librdkafka will maintain a copy of the \p payload. - * - * @remark IO and callback event triggering are mutually exclusive. - * @remark When using forwarded queues the IO event must only be enabled - * on the final forwarded-to (destination) queue. - * @remark The file-descriptor/socket must be set to non-blocking. - */ -RD_EXPORT -void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd, - const void *payload, size_t size); - -/** - * @brief Enable callback event triggering for queue. - * - * The callback will be called from an internal librdkafka thread - * when a new element is enqueued on a previously empty queue. - * - * To remove event triggering call with \p event_cb = NULL. - * - * The \p qev_opaque is passed to the callback's \p qev_opaque argument. - * - * @remark IO and callback event triggering are mutually exclusive. - * @remark Since the callback may be triggered from internal librdkafka - * threads, the application must not perform any pro-longed work in - * the callback, or call any librdkafka APIs (for the same rd_kafka_t - * handle). - */ -RD_EXPORT -void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, - void (*event_cb) (rd_kafka_t *rk, - void *qev_opaque), - void *qev_opaque); - - -/** - * @brief Cancels the current rd_kafka_queue_poll() on \p rkqu. - * - * An application may use this from another thread to force - * an immediate return to the calling code (caller of rd_kafka_queue_poll()). - * Must not be used from signal handlers since that may cause deadlocks. - */ -RD_EXPORT -void rd_kafka_queue_yield (rd_kafka_queue_t *rkqu); - - -/**@}*/ - -/** - * - * @name Simple Consumer API (legacy) - * @{ - * - */ - - -#define RD_KAFKA_OFFSET_BEGINNING -2 /**< Start consuming from beginning of - * kafka partition queue: oldest msg */ -#define RD_KAFKA_OFFSET_END -1 /**< Start consuming from end of kafka - * partition queue: next msg */ -#define RD_KAFKA_OFFSET_STORED -1000 /**< Start consuming from offset retrieved - * from offset store */ -#define RD_KAFKA_OFFSET_INVALID -1001 /**< Invalid offset */ - - -/** @cond NO_DOC */ -#define RD_KAFKA_OFFSET_TAIL_BASE -2000 /* internal: do not use */ -/** @endcond */ - -/** - * @brief Start consuming \p CNT messages from topic's current end offset. - * - * That is, if current end offset is 12345 and \p CNT is 200, it will start - * consuming from offset \c 12345-200 = \c 12145. */ -#define RD_KAFKA_OFFSET_TAIL(CNT) (RD_KAFKA_OFFSET_TAIL_BASE - (CNT)) - -/** - * @brief Start consuming messages for topic \p rkt and \p partition - * at offset \p offset which may either be an absolute \c (0..N) - * or one of the logical offsets: - * - RD_KAFKA_OFFSET_BEGINNING - * - RD_KAFKA_OFFSET_END - * - RD_KAFKA_OFFSET_STORED - * - RD_KAFKA_OFFSET_TAIL - * - * rdkafka will attempt to keep \c queued.min.messages (config property) - * messages in the local queue by repeatedly fetching batches of messages - * from the broker until the threshold is reached. - * - * The application shall use one of the `rd_kafka_consume*()` functions - * to consume messages from the local queue, each kafka message being - * represented as a `rd_kafka_message_t *` object. - * - * `rd_kafka_consume_start()` must not be called multiple times for the same - * topic and partition without stopping consumption first with - * `rd_kafka_consume_stop()`. - * - * @returns 0 on success or -1 on error in which case errno is set accordingly: - * - EBUSY - Conflicts with an existing or previous subscription - * (RD_KAFKA_RESP_ERR__CONFLICT) - * - EINVAL - Invalid offset, or incomplete configuration (lacking group.id) - * (RD_KAFKA_RESP_ERR__INVALID_ARG) - * - ESRCH - requested \p partition is invalid. - * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - * - ENOENT - topic is unknown in the Kafka cluster. - * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) - * - * Use `rd_kafka_errno2err()` to convert sytem \c errno to `rd_kafka_resp_err_t` - */ -RD_EXPORT -int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, - int64_t offset); - -/** - * @brief Same as rd_kafka_consume_start() but re-routes incoming messages to - * the provided queue \p rkqu (which must have been previously allocated - * with `rd_kafka_queue_new()`. - * - * The application must use one of the `rd_kafka_consume_*_queue()` functions - * to receive fetched messages. - * - * `rd_kafka_consume_start_queue()` must not be called multiple times for the - * same topic and partition without stopping consumption first with - * `rd_kafka_consume_stop()`. - * `rd_kafka_consume_start()` and `rd_kafka_consume_start_queue()` must not - * be combined for the same topic and partition. - */ -RD_EXPORT -int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, - int64_t offset, rd_kafka_queue_t *rkqu); - -/** - * @brief Stop consuming messages for topic \p rkt and \p partition, purging - * all messages currently in the local queue. - * - * NOTE: To enforce synchronisation this call will block until the internal - * fetcher has terminated and offsets are committed to configured - * storage method. - * - * The application needs to be stop all consumers before calling - * `rd_kafka_destroy()` on the main object handle. - * - * @returns 0 on success or -1 on error (see `errno`). - */ -RD_EXPORT -int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition); - - - -/** - * @brief Seek consumer for topic+partition to \p offset which is either an - * absolute or logical offset. - * - * If \p timeout_ms is not 0 the call will wait this long for the - * seek to be performed. If the timeout is reached the internal state - * will be unknown and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. - * If \p timeout_ms is 0 it will initiate the seek but return - * immediately without any error reporting (e.g., async). - * - * This call will purge all pre-fetched messages for the given partition, which - * may be up to \c queued.max.message.kbytes in size. Repeated use of seek - * may thus lead to increased network usage as messages are re-fetched from - * the broker. - * - * @remark Seek must only be performed for already assigned/consumed partitions, - * use rd_kafka_assign() (et.al) to set the initial starting offset - * for a new assignmenmt. - * - * @returns `RD_KAFKA_RESP_ERR__NO_ERROR` on success else an error code. - * - * @deprecated Use rd_kafka_seek_partitions(). - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *rkt, - int32_t partition, - int64_t offset, - int timeout_ms); - - - -/** - * @brief Seek consumer for partitions in \p partitions to the per-partition - * offset in the \c .offset field of \p partitions. - * - * The offset may be either absolute (>= 0) or a logical offset. - * - * If \p timeout_ms is not 0 the call will wait this long for the - * seeks to be performed. If the timeout is reached the internal state - * will be unknown for the remaining partitions to seek and this function - * will return an error with the error code set to - * `RD_KAFKA_RESP_ERR__TIMED_OUT`. - * - * If \p timeout_ms is 0 it will initiate the seek but return - * immediately without any error reporting (e.g., async). - * - * This call will purge all pre-fetched messages for the given partition, which - * may be up to \c queued.max.message.kbytes in size. Repeated use of seek - * may thus lead to increased network usage as messages are re-fetched from - * the broker. - * - * Individual partition errors are reported in the per-partition \c .err field - * of \p partitions. - * - * @remark Seek must only be performed for already assigned/consumed partitions, - * use rd_kafka_assign() (et.al) to set the initial starting offset - * for a new assignmenmt. - * - * @returns NULL on success or an error object on failure. - */ -RD_EXPORT rd_kafka_error_t * -rd_kafka_seek_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions, - int timeout_ms); - - -/** - * @brief Consume a single message from topic \p rkt and \p partition - * - * \p timeout_ms is maximum amount of time to wait for a message to be received. - * Consumer must have been previously started with `rd_kafka_consume_start()`. - * - * @returns a message object on success or \c NULL on error. - * The message object must be destroyed with `rd_kafka_message_destroy()` - * when the application is done with it. - * - * Errors (when returning NULL): - * - ETIMEDOUT - \p timeout_ms was reached with no new messages fetched. - * - ENOENT - \p rkt + \p partition is unknown. - * (no prior `rd_kafka_consume_start()` call) - * - * NOTE: The returned message's \c ..->err must be checked for errors. - * NOTE: \c ..->err \c == \c RD_KAFKA_RESP_ERR__PARTITION_EOF signals that the - * end of the partition has been reached, which should typically not be - * considered an error. The application should handle this case - * (e.g., ignore). - * - * @remark on_consume() interceptors may be called from this function prior to - * passing message to application. - */ -RD_EXPORT -rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, - int timeout_ms); - - - -/** - * @brief Consume up to \p rkmessages_size from topic \p rkt and \p partition - * putting a pointer to each message in the application provided - * array \p rkmessages (of size \p rkmessages_size entries). - * - * `rd_kafka_consume_batch()` provides higher throughput performance - * than `rd_kafka_consume()`. - * - * \p timeout_ms is the maximum amount of time to wait for all of - * \p rkmessages_size messages to be put into \p rkmessages. - * If no messages were available within the timeout period this function - * returns 0 and \p rkmessages remains untouched. - * This differs somewhat from `rd_kafka_consume()`. - * - * The message objects must be destroyed with `rd_kafka_message_destroy()` - * when the application is done with it. - * - * @returns the number of rkmessages added in \p rkmessages, - * or -1 on error (same error codes as for `rd_kafka_consume()`. - * - * @sa rd_kafka_consume() - * - * @remark on_consume() interceptors may be called from this function prior to - * passing message to application. - */ -RD_EXPORT -ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size); - - - -/** - * @brief Consumes messages from topic \p rkt and \p partition, calling - * the provided callback for each consumed messsage. - * - * `rd_kafka_consume_callback()` provides higher throughput performance - * than both `rd_kafka_consume()` and `rd_kafka_consume_batch()`. - * - * \p timeout_ms is the maximum amount of time to wait for one or more messages - * to arrive. - * - * The provided \p consume_cb function is called for each message, - * the application \b MUST \b NOT call `rd_kafka_message_destroy()` on the - * provided \p rkmessage. - * - * The \p commit_opaque argument is passed to the \p consume_cb - * as \p commit_opaque. - * - * @returns the number of messages processed or -1 on error. - * - * @sa rd_kafka_consume() - * - * @remark on_consume() interceptors may be called from this function prior to - * passing message to application. - * - * @remark This function will return early if a transaction control message is - * received, these messages are not exposed to the application but - * still enqueued on the consumer queue to make sure their - * offsets are stored. - * - * @deprecated This API is deprecated and subject for future removal. - * There is no new callback-based consume interface, use the - * poll/queue based alternatives. - */ -RD_EXPORT -int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, - int timeout_ms, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *commit_opaque), - void *commit_opaque); - - -/** - * @name Simple Consumer API (legacy): Queue consumers - * @{ - * - * The following `..._queue()` functions are analogue to the functions above - * but reads messages from the provided queue \p rkqu instead. - * \p rkqu must have been previously created with `rd_kafka_queue_new()` - * and the topic consumer must have been started with - * `rd_kafka_consume_start_queue()` utilising the the same queue. - */ - -/** - * @brief Consume from queue - * - * @sa rd_kafka_consume() - */ -RD_EXPORT -rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, - int timeout_ms); - -/** - * @brief Consume batch of messages from queue - * - * @sa rd_kafka_consume_batch() - */ -RD_EXPORT -ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size); - -/** - * @brief Consume multiple messages from queue with callback - * - * @sa rd_kafka_consume_callback() - * - * @deprecated This API is deprecated and subject for future removal. - * There is no new callback-based consume interface, use the - * poll/queue based alternatives. - */ -RD_EXPORT -int rd_kafka_consume_callback_queue (rd_kafka_queue_t *rkqu, - int timeout_ms, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *commit_opaque), - void *commit_opaque); - - -/**@}*/ - - - - -/** - * @name Simple Consumer API (legacy): Topic+partition offset store. - * @{ - * - * If \c auto.commit.enable is true the offset is stored automatically prior to - * returning of the message(s) in each of the rd_kafka_consume*() functions - * above. - */ - - -/** - * @brief Store offset \p offset + 1 for topic \p rkt partition \p partition. - * - * The \c offset + 1 will be committed (written) to broker (or file) according - * to \c `auto.commit.interval.ms` or manual offset-less commit() - * - * @remark \c `enable.auto.offset.store` must be set to "false" when using - * this API. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_offset_store (rd_kafka_topic_t *rkt, - int32_t partition, int64_t offset); - - -/** - * @brief Store offsets for next auto-commit for one or more partitions. - * - * The offset will be committed (written) to the offset store according - * to \c `auto.commit.interval.ms` or manual offset-less commit(). - * - * Per-partition success/error status propagated through each partition's - * \c .err field. - * - * @remark The \c .offset field is stored as is, it will NOT be + 1. - * - * @remark \c `enable.auto.offset.store` must be set to "false" when using - * this API. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or - * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if none of the - * offsets could be stored, or - * RD_KAFKA_RESP_ERR__INVALID_ARG if \c enable.auto.offset.store - * is true. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_offsets_store (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *offsets); -/**@}*/ - - - - -/** - * @name KafkaConsumer (C) - * @{ - * @brief High-level KafkaConsumer C API - * - * - * - */ - -/** - * @brief Subscribe to topic set using balanced consumer groups. - * - * Wildcard (regex) topics are supported: - * any topic name in the \p topics list that is prefixed with \c \"^\" will - * be regex-matched to the full list of topics in the cluster and matching - * topics will be added to the subscription list. - * - * The full topic list is retrieved every \c topic.metadata.refresh.interval.ms - * to pick up new or delete topics that match the subscription. - * If there is any change to the matched topics the consumer will - * immediately rejoin the group with the updated set of subscribed topics. - * - * Regex and full topic names can be mixed in \p topics. - * - * @remark Only the \c .topic field is used in the supplied \p topics list, - * all other fields are ignored. - * - * @remark subscribe() is an asynchronous method which returns immediately: - * background threads will (re)join the group, wait for group rebalance, - * issue any registered rebalance_cb, assign() the assigned partitions, - * and then start fetching messages. This cycle may take up to - * \c session.timeout.ms * 2 or more to complete. - * - * @remark A consumer error will be raised for each unavailable topic in the - * \p topics. The error will be RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART - * for non-existent topics, and - * RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics. - * The consumer error will be raised through rd_kafka_consumer_poll() - * (et.al.) with the \c rd_kafka_message_t.err field set to one of the - * error codes mentioned above. - * The subscribe function itself is asynchronous and will not return - * an error on unavailable topics. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or - * RD_KAFKA_RESP_ERR__INVALID_ARG if list is empty, contains invalid - * topics or regexes or duplicate entries, - * RD_KAFKA_RESP_ERR__FATAL if the consumer has raised a fatal error. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_subscribe (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *topics); - - -/** - * @brief Unsubscribe from the current subscription set. - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk); - - -/** - * @brief Returns the current topic subscription - * - * @returns An error code on failure, otherwise \p topic is updated - * to point to a newly allocated topic list (possibly empty). - * - * @remark The application is responsible for calling - * rd_kafka_topic_partition_list_destroy on the returned list. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_subscription (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t **topics); - - - -/** - * @brief Poll the consumer for messages or events. - * - * Will block for at most \p timeout_ms milliseconds. - * - * @remark An application should make sure to call consumer_poll() at regular - * intervals, even if no messages are expected, to serve any - * queued callbacks waiting to be called. This is especially - * important when a rebalance_cb has been registered as it needs - * to be called and handled properly to synchronize internal - * consumer state. - * - * @returns A message object which is a proper message if \p ->err is - * RD_KAFKA_RESP_ERR_NO_ERROR, or an event or error for any other - * value. - * - * @remark on_consume() interceptors may be called from this function prior to - * passing message to application. - * - * @remark When subscribing to topics the application must call poll at - * least every \c max.poll.interval.ms to remain a member of the - * consumer group. - * - * Noteworthy errors returned in \c ->err: - * - RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED - application failed to call - * poll within `max.poll.interval.ms`. - * - * @sa rd_kafka_message_t - */ -RD_EXPORT -rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, int timeout_ms); - -/** - * @brief Close down the KafkaConsumer. - * - * @remark This call will block until the consumer has revoked its assignment, - * calling the \c rebalance_cb if it is configured, committed offsets - * to broker, and left the consumer group. - * The maximum blocking time is roughly limited to session.timeout.ms. - * - * @returns An error code indicating if the consumer close was succesful - * or not. - * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised - * a fatal error. - * - * @remark The application still needs to call rd_kafka_destroy() after - * this call finishes to clean up the underlying handle resources. - * - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk); - - -/** - * @brief Incrementally add \p partitions to the current assignment. - * - * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, - * this method should be used in a rebalance callback to adjust the current - * assignment appropriately in the case where the rebalance type is - * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. The application must pass the - * partition list passed to the callback (or a copy of it), even if the - * list is empty. \p partitions must not be NULL. This method may also be - * used outside the context of a rebalance callback. - * - * @returns NULL on success, or an error object if the operation was - * unsuccessful. - * - * @remark The returned error object (if not NULL) must be destroyed with - * rd_kafka_error_destroy(). - */ -RD_EXPORT rd_kafka_error_t * -rd_kafka_incremental_assign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t - *partitions); - - -/** - * @brief Incrementally remove \p partitions from the current assignment. - * - * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, - * this method should be used in a rebalance callback to adjust the current - * assignment appropriately in the case where the rebalance type is - * RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. The application must pass the - * partition list passed to the callback (or a copy of it), even if the - * list is empty. \p partitions must not be NULL. This method may also be - * used outside the context of a rebalance callback. - * - * @returns NULL on success, or an error object if the operation was - * unsuccessful. - * - * @remark The returned error object (if not NULL) must be destroyed with - * rd_kafka_error_destroy(). - */ -RD_EXPORT rd_kafka_error_t * -rd_kafka_incremental_unassign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t - *partitions); - - -/** - * @brief The rebalance protocol currently in use. This will be - * "NONE" if the consumer has not (yet) joined a group, else it will - * match the rebalance protocol ("EAGER", "COOPERATIVE") of the - * configured and selected assignor(s). All configured - * assignors must have the same protocol type, meaning - * online migration of a consumer group from using one - * protocol to another (in particular upgading from EAGER - * to COOPERATIVE) without a restart is not currently - * supported. - * - * @returns NULL on error, or one of "NONE", "EAGER", "COOPERATIVE" on success. - */ -RD_EXPORT -const char *rd_kafka_rebalance_protocol (rd_kafka_t *rk); - - -/** - * @brief Atomic assignment of partitions to consume. - * - * The new \p partitions will replace the existing assignment. - * - * A zero-length \p partitions will treat the partitions as a valid, - * albeit empty assignment, and maintain internal state, while a \c NULL - * value for \p partitions will reset and clear the internal state. - * - * When used from a rebalance callback, the application should pass the - * partition list passed to the callback (or a copy of it) even if the list - * is empty (i.e. should not pass NULL in this case) so as to maintain - * internal join state. This is not strictly required - the application - * may adjust the assignment provided by the group. However, this is rarely - * useful in practice. - * - * @returns An error code indicating if the new assignment was applied or not. - * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised - * a fatal error. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_assign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *partitions); - -/** - * @brief Returns the current partition assignment as set by rd_kafka_assign() - * or rd_kafka_incremental_assign(). - * - * @returns An error code on failure, otherwise \p partitions is updated - * to point to a newly allocated partition list (possibly empty). - * - * @remark The application is responsible for calling - * rd_kafka_topic_partition_list_destroy on the returned list. - * - * @remark This assignment represents the partitions assigned through the - * assign functions and not the partitions assigned to this consumer - * instance by the consumer group leader. - * They are usually the same following a rebalance but not necessarily - * since an application is free to assign any partitions. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_assignment (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t **partitions); - - -/** - * @brief Check whether the consumer considers the current assignment to - * have been lost involuntarily. This method is only applicable for - * use with a high level subscribing consumer. Assignments are revoked - * immediately when determined to have been lost, so this method - * is only useful when reacting to a RD_KAFKA_EVENT_REBALANCE event - * or from within a rebalance_cb. Partitions that have been lost may - * already be owned by other members in the group and therefore - * commiting offsets, for example, may fail. - * - * @remark Calling rd_kafka_assign(), rd_kafka_incremental_assign() or - * rd_kafka_incremental_unassign() resets this flag. - * - * @returns Returns 1 if the current partition assignment is considered - * lost, 0 otherwise. - */ -RD_EXPORT int -rd_kafka_assignment_lost (rd_kafka_t *rk); - - -/** - * @brief Commit offsets on broker for the provided list of partitions. - * - * \p offsets should contain \c topic, \c partition, \c offset and possibly - * \c metadata. The \c offset should be the offset where consumption will - * resume, i.e., the last processed offset + 1. - * If \p offsets is NULL the current partition assignment will be used instead. - * - * If \p async is false this operation will block until the broker offset commit - * is done, returning the resulting success or error code. - * - * If a rd_kafka_conf_set_offset_commit_cb() offset commit callback has been - * configured the callback will be enqueued for a future call to - * rd_kafka_poll(), rd_kafka_consumer_poll() or similar. - * - * @returns An error code indiciating if the commit was successful, - * or successfully scheduled if asynchronous, or failed. - * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised - * a fatal error. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_commit (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, - int async); - - -/** - * @brief Commit message's offset on broker for the message's partition. - * The committed offset is the message's offset + 1. - * - * @sa rd_kafka_commit - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - int async); - - -/** - * @brief Commit offsets on broker for the provided list of partitions. - * - * See rd_kafka_commit for \p offsets semantics. - * - * The result of the offset commit will be posted on the provided \p rkqu queue. - * - * If the application uses one of the poll APIs (rd_kafka_poll(), - * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), ..) to serve the queue - * the \p cb callback is required. - * - * The \p commit_opaque argument is passed to the callback as \p commit_opaque, - * or if using the event API the callback is ignored and the offset commit - * result will be returned as an RD_KAFKA_EVENT_COMMIT event and the - * \p commit_opaque value will be available with rd_kafka_event_opaque(). - * - * If \p rkqu is NULL a temporary queue will be created and the callback will - * be served by this call. - * - * @sa rd_kafka_commit() - * @sa rd_kafka_conf_set_offset_commit_cb() - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_commit_queue (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_queue_t *rkqu, - void (*cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *commit_opaque), - void *commit_opaque); - - -/** - * @brief Retrieve committed offsets for topics+partitions. - * - * The \p offset field of each requested partition will either be set to - * stored offset or to RD_KAFKA_OFFSET_INVALID in case there was no stored - * offset for that partition. - * - * Committed offsets will be returned according to the `isolation.level` - * configuration property, if set to `read_committed` (default) then only - * stable offsets for fully committed transactions will be returned, while - * `read_uncommitted` may return offsets for not yet committed transactions. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the - * \p offset or \p err field of each \p partitions' element is filled - * in with the stored offset, or a partition specific error. - * Else returns an error code. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_committed (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions, - int timeout_ms); - - - -/** - * @brief Retrieve current positions (offsets) for topics+partitions. - * - * The \p offset field of each requested partition will be set to the offset - * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there was - * no previous message. - * - * @remark In this context the last consumed message is the offset consumed - * by the current librdkafka instance and, in case of rebalancing, not - * necessarily the last message fetched from the partition. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the - * \p offset or \p err field of each \p partitions' element is filled - * in with the stored offset, or a partition specific error. - * Else returns an error code. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_position (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); - - - - -/** - * @returns the current consumer group metadata associated with this consumer, - * or NULL if \p rk is not a consumer configured with a \c group.id. - * This metadata object should be passed to the transactional - * producer's rd_kafka_send_offsets_to_transaction() API. - * - * @remark The returned pointer must be freed by the application using - * rd_kafka_consumer_group_metadata_destroy(). - * - * @sa rd_kafka_send_offsets_to_transaction() - */ -RD_EXPORT rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata (rd_kafka_t *rk); - - -/** - * @brief Create a new consumer group metadata object. - * This is typically only used for writing tests. - * - * @param group_id The group id. - * - * @remark The returned pointer must be freed by the application using - * rd_kafka_consumer_group_metadata_destroy(). - */ -RD_EXPORT rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata_new (const char *group_id); - - -/** - * @brief Create a new consumer group metadata object. - * This is typically only used for writing tests. - * - * @param group_id The group id. - * @param generation_id The group generation id. - * @param member_id The group member id. - * @param group_instance_id The group instance id (may be NULL). - * - * @remark The returned pointer must be freed by the application using - * rd_kafka_consumer_group_metadata_destroy(). - */ -RD_EXPORT rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata_new_with_genid (const char *group_id, - int32_t generation_id, - const char *member_id, - const char - *group_instance_id); - - -/** - * @brief Frees the consumer group metadata object as returned by - * rd_kafka_consumer_group_metadata(). - */ -RD_EXPORT void -rd_kafka_consumer_group_metadata_destroy (rd_kafka_consumer_group_metadata_t *); - - -/** - * @brief Serialize the consumer group metadata to a binary format. - * This is mainly for client binding use and not for application use. - * - * @remark The serialized metadata format is private and is not compatible - * across different versions or even builds of librdkafka. - * It should only be used in the same process runtime and must only - * be passed to rd_kafka_consumer_group_metadata_read(). - * - * @param cgmd Metadata to be serialized. - * @param bufferp On success this pointer will be updated to point to na - * allocated buffer containing the serialized metadata. - * The buffer must be freed with rd_kafka_mem_free(). - * @param sizep The pointed to size will be updated with the size of - * the serialized buffer. - * - * @returns NULL on success or an error object on failure. - * - * @sa rd_kafka_consumer_group_metadata_read() - */ -RD_EXPORT rd_kafka_error_t * -rd_kafka_consumer_group_metadata_write ( - const rd_kafka_consumer_group_metadata_t *cgmd, - void **bufferp, size_t *sizep); - -/** - * @brief Reads serialized consumer group metadata and returns a - * consumer group metadata object. - * This is mainly for client binding use and not for application use. - * - * @remark The serialized metadata format is private and is not compatible - * across different versions or even builds of librdkafka. - * It should only be used in the same process runtime and must only - * be passed to rd_kafka_consumer_group_metadata_read(). - * - * @param cgmdp On success this pointer will be updated to point to a new - * consumer group metadata object which must be freed with - * rd_kafka_consumer_group_metadata_destroy(). - * @param buffer Pointer to the serialized data. - * @param size Size of the serialized data. - * - * @returns NULL on success or an error object on failure. - * - * @sa rd_kafka_consumer_group_metadata_write() - */ -RD_EXPORT rd_kafka_error_t * -rd_kafka_consumer_group_metadata_read ( - rd_kafka_consumer_group_metadata_t **cgmdp, - const void *buffer, size_t size); - -/**@}*/ - - - -/** - * @name Producer API - * @{ - * - * - */ - - -/** - * @brief Producer message flags - */ -#define RD_KAFKA_MSG_F_FREE 0x1 /**< Delegate freeing of payload to rdkafka. */ -#define RD_KAFKA_MSG_F_COPY 0x2 /**< rdkafka will make a copy of the payload. */ -#define RD_KAFKA_MSG_F_BLOCK 0x4 /**< Block produce*() on message queue full. - * WARNING: If a delivery report callback - * is used the application MUST - * call rd_kafka_poll() (or equiv.) - * to make sure delivered messages - * are drained from the internal - * delivery report queue. - * Failure to do so will result - * in indefinately blocking on - * the produce() call when the - * message queue is full. */ -#define RD_KAFKA_MSG_F_PARTITION 0x8 /**< produce_batch() will honor - * per-message partition. */ - - - -/** - * @brief Produce and send a single message to broker. - * - * \p rkt is the target topic which must have been previously created with - * `rd_kafka_topic_new()`. - * - * `rd_kafka_produce()` is an asynch non-blocking API. - * See `rd_kafka_conf_set_dr_msg_cb` on how to setup a callback to be called - * once the delivery status (success or failure) is known. The delivery report - * is trigged by the application calling `rd_kafka_poll()` (at regular - * intervals) or `rd_kafka_flush()` (at termination). - * - * Since producing is asynchronous, you should call `rd_kafka_flush()` before - * you destroy the producer. Otherwise, any outstanding messages will be - * silently discarded. - * - * When temporary errors occur, librdkafka automatically retries to produce the - * messages. Retries are triggered after retry.backoff.ms and when the - * leader broker for the given partition is available. Otherwise, librdkafka - * falls back to polling the topic metadata to monitor when a new leader is - * elected (see the topic.metadata.refresh.fast.interval.ms and - * topic.metadata.refresh.interval.ms configurations) and then performs a - * retry. A delivery error will occur if the message could not be produced - * within message.timeout.ms. - * - * See the "Message reliability" chapter in INTRODUCTION.md for more - * information. - * - * \p partition is the target partition, either: - * - RD_KAFKA_PARTITION_UA (unassigned) for - * automatic partitioning using the topic's partitioner function, or - * - a fixed partition (0..N) - * - * \p msgflags is zero or more of the following flags OR:ed together: - * RD_KAFKA_MSG_F_BLOCK - block \p produce*() call if - * \p queue.buffering.max.messages or - * \p queue.buffering.max.kbytes are exceeded. - * Messages are considered in-queue from the point they - * are accepted by produce() until their corresponding - * delivery report callback/event returns. - * It is thus a requirement to call - * rd_kafka_poll() (or equiv.) from a separate - * thread when F_BLOCK is used. - * See WARNING on \c RD_KAFKA_MSG_F_BLOCK above. - * - * RD_KAFKA_MSG_F_FREE - rdkafka will free(3) \p payload when it is done - * with it. - * RD_KAFKA_MSG_F_COPY - the \p payload data will be copied and the - * \p payload pointer will not be used by rdkafka - * after the call returns. - * RD_KAFKA_MSG_F_PARTITION - produce_batch() will honour per-message - * partition, either set manually or by the - * configured partitioner. - * - * .._F_FREE and .._F_COPY are mutually exclusive. If neither of these are - * set, the caller must ensure that the memory backing \p payload remains - * valid and is not modified or reused until the delivery callback is - * invoked. Other buffers passed to `rd_kafka_produce()` don't have this - * restriction on reuse, i.e. the memory backing the key or the topic name - * may be reused as soon as `rd_kafka_produce()` returns. - * - * If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then - * the memory associated with the payload is still the caller's - * responsibility. - * - * \p payload is the message payload of size \p len bytes. - * - * \p key is an optional message key of size \p keylen bytes, if non-NULL it - * will be passed to the topic partitioner as well as be sent with the - * message to the broker and passed on to the consumer. - * - * \p msg_opaque is an optional application-provided per-message opaque - * pointer that will provided in the message's delivery report callback - * (\c dr_msg_cb or \c dr_cb) and the \c rd_kafka_message_t \c _private field. - * - * @remark on_send() and on_acknowledgement() interceptors may be called - * from this function. on_acknowledgement() will only be called if the - * message fails partitioning. - * - * @remark If the producer is transactional (\c transactional.id is configured) - * producing is only allowed during an on-going transaction, namely - * after rd_kafka_begin_transaction() has been called. - * - * @returns 0 on success or -1 on error in which case errno is set accordingly: - * - ENOBUFS - maximum number of outstanding messages has been reached: - * "queue.buffering.max.messages" - * (RD_KAFKA_RESP_ERR__QUEUE_FULL) - * - EMSGSIZE - message is larger than configured max size: - * "messages.max.bytes". - * (RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE) - * - ESRCH - requested \p partition is unknown in the Kafka cluster. - * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - * - ENOENT - topic is unknown in the Kafka cluster. - * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) - * - ECANCELED - fatal error has been raised on producer, see - * rd_kafka_fatal_error(), - * (RD_KAFKA_RESP_ERR__FATAL). - * - ENOEXEC - transactional state forbids producing - * (RD_KAFKA_RESP_ERR__STATE) - * - * @sa Use rd_kafka_errno2err() to convert `errno` to rdkafka error code. - */ -RD_EXPORT -int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t keylen, - void *msg_opaque); - - -/** - * @brief Produce and send a single message to broker. - * - * The message is defined by a va-arg list using \c rd_kafka_vtype_t - * tag tuples which must be terminated with a single \c RD_KAFKA_V_END. - * - * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, else an error code as - * described in rd_kafka_produce(). - * \c RD_KAFKA_RESP_ERR__CONFLICT is returned if _V_HEADER and - * _V_HEADERS are mixed. - * - * @sa rd_kafka_produce, rd_kafka_produceva, RD_KAFKA_V_END - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...); - - -/** - * @brief Produce and send a single message to broker. - * - * The message is defined by an array of \c rd_kafka_vu_t of - * count \p cnt. - * - * @returns an error object on failure or NULL on success. - * See rd_kafka_producev() for specific error codes. - * - * @sa rd_kafka_produce, rd_kafka_producev, RD_KAFKA_V_END - */ -RD_EXPORT -rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, - const rd_kafka_vu_t *vus, - size_t cnt); - - -/** - * @brief Produce multiple messages. - * - * If partition is RD_KAFKA_PARTITION_UA the configured partitioner will - * be run for each message (slower), otherwise the messages will be enqueued - * to the specified partition directly (faster). - * - * The messages are provided in the array \p rkmessages of count \p message_cnt - * elements. - * The \p partition and \p msgflags are used for all provided messages. - * - * Honoured \p rkmessages[] fields are: - * - payload,len Message payload and length - * - key,key_len Optional message key - * - _private Message opaque pointer (msg_opaque) - * - err Will be set according to success or failure, see - * rd_kafka_produce() for possible error codes. - * Application only needs to check for errors if - * return value != \p message_cnt. - * - * @remark If \c RD_KAFKA_MSG_F_PARTITION is set in \p msgflags, the - * \c .partition field of the \p rkmessages is used instead of - * \p partition. - * - * @returns the number of messages succesfully enqueued for producing. - * - * @remark This interface does NOT support setting message headers on - * the provided \p rkmessages. - */ -RD_EXPORT -int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, - int msgflags, - rd_kafka_message_t *rkmessages, int message_cnt); - - - - -/** - * @brief Wait until all outstanding produce requests, et.al, are completed. - * This should typically be done prior to destroying a producer instance - * to make sure all queued and in-flight produce requests are completed - * before terminating. - * - * @remark This function will call rd_kafka_poll() and thus trigger callbacks. - * - * @remark The \c linger.ms time will be ignored for the duration of the call, - * queued messages will be sent to the broker as soon as possible. - * - * @remark If RD_KAFKA_EVENT_DR has been enabled - * (through rd_kafka_conf_set_events()) this function will not call - * rd_kafka_poll() but instead wait for the librdkafka-handled - * message count to reach zero. This requires the application to - * serve the event queue in a separate thread. - * In this mode only messages are counted, not other types of - * queued events. - * - * @returns RD_KAFKA_RESP_ERR__TIMED_OUT if \p timeout_ms was reached before all - * outstanding requests were completed, else RD_KAFKA_RESP_ERR_NO_ERROR - * - * @sa rd_kafka_outq_len() - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms); - - - -/** - * @brief Purge messages currently handled by the producer instance. - * - * @param rk Client instance. - * @param purge_flags Tells which messages to purge and how. - * - * The application will need to call rd_kafka_poll() or rd_kafka_flush() - * afterwards to serve the delivery report callbacks of the purged messages. - * - * Messages purged from internal queues fail with the delivery report - * error code set to RD_KAFKA_RESP_ERR__PURGE_QUEUE, while purged messages that - * are in-flight to or from the broker will fail with the error code set to - * RD_KAFKA_RESP_ERR__PURGE_INFLIGHT. - * - * @warning Purging messages that are in-flight to or from the broker - * will ignore any sub-sequent acknowledgement for these messages - * received from the broker, effectively making it impossible - * for the application to know if the messages were successfully - * produced or not. This may result in duplicate messages if the - * application retries these messages at a later time. - * - * @remark This call may block for a short time while background thread - * queues are purged. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, - * RD_KAFKA_RESP_ERR__INVALID_ARG if the \p purge flags are invalid - * or unknown, - * RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if called on a non-producer - * client instance. - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags); - - -/** - * @brief Flags for rd_kafka_purge() - */ - -/*! - * Purge messages in internal queues. - */ -#define RD_KAFKA_PURGE_F_QUEUE 0x1 - -/*! - * Purge messages in-flight to or from the broker. - * Purging these messages will void any future acknowledgements from the - * broker, making it impossible for the application to know if these - * messages were successfully delivered or not. - * Retrying these messages may lead to duplicates. - */ -#define RD_KAFKA_PURGE_F_INFLIGHT 0x2 - - -/*! - * Don't wait for background thread queue purging to finish. - */ -#define RD_KAFKA_PURGE_F_NON_BLOCKING 0x4 - - -/**@}*/ - - -/** -* @name Metadata API -* @{ -* -* -*/ - - -/** - * @brief Broker information - */ -typedef struct rd_kafka_metadata_broker { - int32_t id; /**< Broker Id */ - char *host; /**< Broker hostname */ - int port; /**< Broker listening port */ -} rd_kafka_metadata_broker_t; - -/** - * @brief Partition information - */ -typedef struct rd_kafka_metadata_partition { - int32_t id; /**< Partition Id */ - rd_kafka_resp_err_t err; /**< Partition error reported by broker */ - int32_t leader; /**< Leader broker */ - int replica_cnt; /**< Number of brokers in \p replicas */ - int32_t *replicas; /**< Replica brokers */ - int isr_cnt; /**< Number of ISR brokers in \p isrs */ - int32_t *isrs; /**< In-Sync-Replica brokers */ -} rd_kafka_metadata_partition_t; - -/** - * @brief Topic information - */ -typedef struct rd_kafka_metadata_topic { - char *topic; /**< Topic name */ - int partition_cnt; /**< Number of partitions in \p partitions*/ - struct rd_kafka_metadata_partition *partitions; /**< Partitions */ - rd_kafka_resp_err_t err; /**< Topic error reported by broker */ -} rd_kafka_metadata_topic_t; - - -/** - * @brief Metadata container - */ -typedef struct rd_kafka_metadata { - int broker_cnt; /**< Number of brokers in \p brokers */ - struct rd_kafka_metadata_broker *brokers; /**< Brokers */ - - int topic_cnt; /**< Number of topics in \p topics */ - struct rd_kafka_metadata_topic *topics; /**< Topics */ - - int32_t orig_broker_id; /**< Broker originating this metadata */ - char *orig_broker_name; /**< Name of originating broker */ -} rd_kafka_metadata_t; - - -/** - * @brief Request Metadata from broker. - * - * Parameters: - * - \p all_topics if non-zero: request info about all topics in cluster, - * if zero: only request info about locally known topics. - * - \p only_rkt only request info about this topic - * - \p metadatap pointer to hold metadata result. - * The \p *metadatap pointer must be released - * with rd_kafka_metadata_destroy(). - * - \p timeout_ms maximum response time before failing. - * - * @remark Consumer: If \p all_topics is non-zero the Metadata response - * information may trigger a re-join if any subscribed topics - * have changed partition count or existence state. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap) - * will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or - * other error code on error. - */ -RD_EXPORT -rd_kafka_resp_err_t -rd_kafka_metadata (rd_kafka_t *rk, int all_topics, - rd_kafka_topic_t *only_rkt, - const struct rd_kafka_metadata **metadatap, - int timeout_ms); - -/** - * @brief Release metadata memory. - */ -RD_EXPORT -void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata); - - -/**@}*/ - - - -/** -* @name Client group information -* @{ -* -* -*/ - - -/** - * @brief Group member information - * - * For more information on \p member_metadata format, see - * https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI - * - */ -struct rd_kafka_group_member_info { - char *member_id; /**< Member id (generated by broker) */ - char *client_id; /**< Client's \p client.id */ - char *client_host; /**< Client's hostname */ - void *member_metadata; /**< Member metadata (binary), - * format depends on \p protocol_type. */ - int member_metadata_size; /**< Member metadata size in bytes */ - void *member_assignment; /**< Member assignment (binary), - * format depends on \p protocol_type. */ - int member_assignment_size; /**< Member assignment size in bytes */ -}; - -/** - * @brief Group information - */ -struct rd_kafka_group_info { - struct rd_kafka_metadata_broker broker; /**< Originating broker info */ - char *group; /**< Group name */ - rd_kafka_resp_err_t err; /**< Broker-originated error */ - char *state; /**< Group state */ - char *protocol_type; /**< Group protocol type */ - char *protocol; /**< Group protocol */ - struct rd_kafka_group_member_info *members; /**< Group members */ - int member_cnt; /**< Group member count */ -}; - -/** - * @brief List of groups - * - * @sa rd_kafka_group_list_destroy() to release list memory. - */ -struct rd_kafka_group_list { - struct rd_kafka_group_info *groups; /**< Groups */ - int group_cnt; /**< Group count */ -}; - - -/** - * @brief List and describe client groups in cluster. - * - * \p group is an optional group name to describe, otherwise (\p NULL) all - * groups are returned. - * - * \p timeout_ms is the (approximate) maximum time to wait for response - * from brokers and must be a positive value. - * - * @returns \c RD_KAFKA_RESP_ERR__NO_ERROR on success and \p grplistp is - * updated to point to a newly allocated list of groups. - * \c RD_KAFKA_RESP_ERR__PARTIAL if not all brokers responded - * in time but at least one group is returned in \p grplistlp. - * \c RD_KAFKA_RESP_ERR__TIMED_OUT if no groups were returned in the - * given timeframe but not all brokers have yet responded, or - * if the list of brokers in the cluster could not be obtained within - * the given timeframe. - * \c RD_KAFKA_RESP_ERR__TRANSPORT if no brokers were found. - * Other error codes may also be returned from the request layer. - * - * The \p grplistp remains untouched if any error code is returned, - * with the exception of RD_KAFKA_RESP_ERR__PARTIAL which behaves - * as RD_KAFKA_RESP_ERR__NO_ERROR (success) but with an incomplete - * group list. - * - * @sa Use rd_kafka_group_list_destroy() to release list memory. - */ -RD_EXPORT -rd_kafka_resp_err_t -rd_kafka_list_groups (rd_kafka_t *rk, const char *group, - const struct rd_kafka_group_list **grplistp, - int timeout_ms); - -/** - * @brief Release list memory - */ -RD_EXPORT -void rd_kafka_group_list_destroy (const struct rd_kafka_group_list *grplist); - - -/**@}*/ - - - -/** - * @name Miscellaneous APIs - * @{ - * - */ - - -/** - * @brief Adds one or more brokers to the kafka handle's list of initial - * bootstrap brokers. - * - * Additional brokers will be discovered automatically as soon as rdkafka - * connects to a broker by querying the broker metadata. - * - * If a broker name resolves to multiple addresses (and possibly - * address families) all will be used for connection attempts in - * round-robin fashion. - * - * \p brokerlist is a ,-separated list of brokers in the format: - * \c \,\,.. - * Where each broker is in either the host or URL based format: - * \c \[:\] - * \c \://\[:port] - * \c \ is either \c PLAINTEXT, \c SSL, \c SASL, \c SASL_PLAINTEXT - * The two formats can be mixed but ultimately the value of the - * `security.protocol` config property decides what brokers are allowed. - * - * Example: - * brokerlist = "broker1:10000,broker2" - * brokerlist = "SSL://broker3:9000,ssl://broker2" - * - * @returns the number of brokers successfully added. - * - * @remark Brokers may also be defined with the \c metadata.broker.list or - * \c bootstrap.servers configuration property (preferred method). - * - * @deprecated Set bootstrap servers with the \c bootstrap.servers - * configuration property. - */ -RD_EXPORT -int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist); - - - - -/** - * @brief Set logger function. - * - * The default is to print to stderr, but a syslog logger is also available, - * see rd_kafka_log_(print|syslog) for the builtin alternatives. - * Alternatively the application may provide its own logger callback. - * Or pass 'func' as NULL to disable logging. - * - * @deprecated Use rd_kafka_conf_set_log_cb() - * - * @remark \p rk may be passed as NULL in the callback. - */ -RD_EXPORT RD_DEPRECATED -void rd_kafka_set_logger(rd_kafka_t *rk, - void (*func) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf)); - - -/** - * @brief Specifies the maximum logging level emitted by - * internal kafka logging and debugging. - * - * @deprecated Set the \c "log_level" configuration property instead. - * - * @remark If the \p \"debug\" configuration property is set the log level is - * automatically adjusted to \c LOG_DEBUG (7). - */ -RD_EXPORT -void rd_kafka_set_log_level(rd_kafka_t *rk, int level); - - -/** - * @brief Builtin (default) log sink: print to stderr - */ -RD_EXPORT -void rd_kafka_log_print(const rd_kafka_t *rk, int level, - const char *fac, const char *buf); - - -/** - * @brief Builtin log sink: print to syslog. - * @remark This logger is only available if librdkafka was built - * with syslog support. - */ -RD_EXPORT -void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, - const char *fac, const char *buf); - - -/** - * @brief Returns the current out queue length. - * - * The out queue length is the sum of: - * - number of messages waiting to be sent to, or acknowledged by, - * the broker. - * - number of delivery reports (e.g., dr_msg_cb) waiting to be served - * by rd_kafka_poll() or rd_kafka_flush(). - * - number of callbacks (e.g., error_cb, stats_cb, etc) waiting to be - * served by rd_kafka_poll(), rd_kafka_consumer_poll() or rd_kafka_flush(). - * - number of events waiting to be served by background_event_cb() in - * the background queue (see rd_kafka_conf_set_background_event_cb). - * - * An application should wait for the return value of this function to reach - * zero before terminating to make sure outstanding messages, - * requests (such as offset commits), callbacks and events are fully processed. - * See rd_kafka_flush(). - * - * @returns number of messages and events waiting in queues. - * - * @sa rd_kafka_flush() - */ -RD_EXPORT -int rd_kafka_outq_len(rd_kafka_t *rk); - - - -/** - * @brief Dumps rdkafka's internal state for handle \p rk to stream \p fp - * - * This is only useful for debugging rdkafka, showing state and statistics - * for brokers, topics, partitions, etc. - */ -RD_EXPORT -void rd_kafka_dump(FILE *fp, rd_kafka_t *rk); - - - -/** - * @brief Retrieve the current number of threads in use by librdkafka. - * - * Used by regression tests. - */ -RD_EXPORT -int rd_kafka_thread_cnt(void); - - -/** - * @enum rd_kafka_thread_type_t - * - * @brief librdkafka internal thread type. - * - * @sa rd_kafka_interceptor_add_on_thread_start() - */ -typedef enum rd_kafka_thread_type_t { - RD_KAFKA_THREAD_MAIN, /**< librdkafka's internal main thread */ - RD_KAFKA_THREAD_BACKGROUND, /**< Background thread (if enabled) */ - RD_KAFKA_THREAD_BROKER /**< Per-broker thread */ -} rd_kafka_thread_type_t; - - -/** - * @brief Wait for all rd_kafka_t objects to be destroyed. - * - * Returns 0 if all kafka objects are now destroyed, or -1 if the - * timeout was reached. - * - * @remark This function is deprecated. - */ -RD_EXPORT -int rd_kafka_wait_destroyed(int timeout_ms); - - -/** - * @brief Run librdkafka's built-in unit-tests. - * - * @returns the number of failures, or 0 if all tests passed. - */ -RD_EXPORT -int rd_kafka_unittest (void); - - -/**@}*/ - - - - -/** - * @name Experimental APIs - * @{ - */ - -/** - * @brief Redirect the main (rd_kafka_poll()) queue to the KafkaConsumer's - * queue (rd_kafka_consumer_poll()). - * - * @warning It is not permitted to call rd_kafka_poll() after directing the - * main queue with rd_kafka_poll_set_consumer(). - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk); - - -/**@}*/ - -/** - * @name Event interface - * - * @brief The event API provides an alternative pollable non-callback interface - * to librdkafka's message and event queues. - * - * @{ - */ - - -/** - * @brief Event types - */ -typedef int rd_kafka_event_type_t; -#define RD_KAFKA_EVENT_NONE 0x0 /**< Unset value */ -#define RD_KAFKA_EVENT_DR 0x1 /**< Producer Delivery report batch */ -#define RD_KAFKA_EVENT_FETCH 0x2 /**< Fetched message (consumer) */ -#define RD_KAFKA_EVENT_LOG 0x4 /**< Log message */ -#define RD_KAFKA_EVENT_ERROR 0x8 /**< Error */ -#define RD_KAFKA_EVENT_REBALANCE 0x10 /**< Group rebalance (consumer) */ -#define RD_KAFKA_EVENT_OFFSET_COMMIT 0x20 /**< Offset commit result */ -#define RD_KAFKA_EVENT_STATS 0x40 /**< Stats */ -#define RD_KAFKA_EVENT_CREATETOPICS_RESULT 100 /**< CreateTopics_result_t */ -#define RD_KAFKA_EVENT_DELETETOPICS_RESULT 101 /**< DeleteTopics_result_t */ -#define RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT 102 /**< CreatePartitions_result_t */ -#define RD_KAFKA_EVENT_ALTERCONFIGS_RESULT 103 /**< AlterConfigs_result_t */ -#define RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT 104 /**< DescribeConfigs_result_t */ -#define RD_KAFKA_EVENT_DELETERECORDS_RESULT 105 /**< DeleteRecords_result_t */ -#define RD_KAFKA_EVENT_DELETEGROUPS_RESULT 106 /**< DeleteGroups_result_t */ -/** DeleteConsumerGroupOffsets_result_t */ -#define RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT 107 -#define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH 0x100 /**< SASL/OAUTHBEARER - token needs to be - refreshed */ - - -/** - * @returns the event type for the given event. - * - * @remark As a convenience it is okay to pass \p rkev as NULL in which case - * RD_KAFKA_EVENT_NONE is returned. - */ -RD_EXPORT -rd_kafka_event_type_t rd_kafka_event_type (const rd_kafka_event_t *rkev); - -/** - * @returns the event type's name for the given event. - * - * @remark As a convenience it is okay to pass \p rkev as NULL in which case - * the name for RD_KAFKA_EVENT_NONE is returned. - */ -RD_EXPORT -const char *rd_kafka_event_name (const rd_kafka_event_t *rkev); - - -/** - * @brief Destroy an event. - * - * @remark Any references to this event, such as extracted messages, - * will not be usable after this call. - * - * @remark As a convenience it is okay to pass \p rkev as NULL in which case - * no action is performed. - */ -RD_EXPORT -void rd_kafka_event_destroy (rd_kafka_event_t *rkev); - - -/** - * @returns the next message from an event. - * - * Call repeatedly until it returns NULL. - * - * Event types: - * - RD_KAFKA_EVENT_FETCH (1 message) - * - RD_KAFKA_EVENT_DR (>=1 message(s)) - * - * @remark The returned message(s) MUST NOT be - * freed with rd_kafka_message_destroy(). - * - * @remark on_consume() interceptor may be called - * from this function prior to passing message to application. - */ -RD_EXPORT -const rd_kafka_message_t *rd_kafka_event_message_next (rd_kafka_event_t *rkev); - - -/** - * @brief Extacts \p size message(s) from the event into the - * pre-allocated array \p rkmessages. - * - * Event types: - * - RD_KAFKA_EVENT_FETCH (1 message) - * - RD_KAFKA_EVENT_DR (>=1 message(s)) - * - * @returns the number of messages extracted. - * - * @remark on_consume() interceptor may be called - * from this function prior to passing message to application. - */ -RD_EXPORT -size_t rd_kafka_event_message_array (rd_kafka_event_t *rkev, - const rd_kafka_message_t **rkmessages, - size_t size); - - -/** - * @returns the number of remaining messages in the event. - * - * Event types: - * - RD_KAFKA_EVENT_FETCH (1 message) - * - RD_KAFKA_EVENT_DR (>=1 message(s)) - */ -RD_EXPORT -size_t rd_kafka_event_message_count (rd_kafka_event_t *rkev); - - -/** - * @returns the associated configuration string for the event, or NULL - * if the configuration property is not set or if - * not applicable for the given event type. - * - * The returned memory is read-only and its lifetime is the same as the - * event object. - * - * Event types: - * - RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: value of sasl.oauthbearer.config - */ -RD_EXPORT -const char *rd_kafka_event_config_string (rd_kafka_event_t *rkev); - - -/** - * @returns the error code for the event. - * - * Use rd_kafka_event_error_is_fatal() to detect if this is a fatal error. - * - * Event types: - * - all - */ -RD_EXPORT -rd_kafka_resp_err_t rd_kafka_event_error (rd_kafka_event_t *rkev); - - -/** - * @returns the error string (if any). - * An application should check that rd_kafka_event_error() returns - * non-zero before calling this function. - * - * Event types: - * - all - */ -RD_EXPORT -const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev); - - -/** - * @returns 1 if the error is a fatal error, else 0. - * - * Event types: - * - RD_KAFKA_EVENT_ERROR - * - * @sa rd_kafka_fatal_error() - */ -RD_EXPORT -int rd_kafka_event_error_is_fatal (rd_kafka_event_t *rkev); - - -/** - * @returns the event opaque (if any) as passed to rd_kafka_commit() (et.al) or - * rd_kafka_AdminOptions_set_opaque(), depending on event type. - * - * Event types: - * - RD_KAFKA_EVENT_OFFSET_COMMIT - * - RD_KAFKA_EVENT_CREATETOPICS_RESULT - * - RD_KAFKA_EVENT_DELETETOPICS_RESULT - * - RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT - * - RD_KAFKA_EVENT_ALTERCONFIGS_RESULT - * - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT - * - RD_KAFKA_EVENT_DELETEGROUPS_RESULT - * - RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT - * - RD_KAFKA_EVENT_DELETERECORDS_RESULT - */ -RD_EXPORT -void *rd_kafka_event_opaque (rd_kafka_event_t *rkev); - - -/** - * @brief Extract log message from the event. - * - * Event types: - * - RD_KAFKA_EVENT_LOG - * - * @returns 0 on success or -1 if unsupported event type. - */ -RD_EXPORT -int rd_kafka_event_log (rd_kafka_event_t *rkev, - const char **fac, const char **str, int *level); - - -/** - * @brief Extract log debug context from event. - * - * Event types: - * - RD_KAFKA_EVENT_LOG - * - * @param rkev the event to extract data from. - * @param dst destination string for comma separated list. - * @param dstsize size of provided dst buffer. - * @returns 0 on success or -1 if unsupported event type. - */ -RD_EXPORT -int rd_kafka_event_debug_contexts (rd_kafka_event_t *rkev, - char *dst, size_t dstsize); - - -/** - * @brief Extract stats from the event. - * - * Event types: - * - RD_KAFKA_EVENT_STATS - * - * @returns stats json string. - * - * @remark the returned string will be freed automatically along with the event object - * - */ -RD_EXPORT -const char *rd_kafka_event_stats (rd_kafka_event_t *rkev); - - -/** - * @returns the topic partition list from the event. - * - * @remark The list MUST NOT be freed with rd_kafka_topic_partition_list_destroy() - * - * Event types: - * - RD_KAFKA_EVENT_REBALANCE - * - RD_KAFKA_EVENT_OFFSET_COMMIT - */ -RD_EXPORT rd_kafka_topic_partition_list_t * -rd_kafka_event_topic_partition_list (rd_kafka_event_t *rkev); - - -/** - * @returns a newly allocated topic_partition container, if applicable for the event type, - * else NULL. - * - * @remark The returned pointer MUST be freed with rd_kafka_topic_partition_destroy(). - * - * Event types: - * RD_KAFKA_EVENT_ERROR (for partition level errors) - */ -RD_EXPORT rd_kafka_topic_partition_t * -rd_kafka_event_topic_partition (rd_kafka_event_t *rkev); - - -/*! CreateTopics result type */ -typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t; -/*! DeleteTopics result type */ -typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t; -/*! CreatePartitions result type */ -typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t; -/*! AlterConfigs result type */ -typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t; -/*! CreateTopics result type */ -typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t; -/*! DeleteRecords result type */ -typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t; -/*! DeleteGroups result type */ -typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t; -/*! DeleteConsumerGroupOffsets result type */ -typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t; - -/** - * @brief Get CreateTopics result. - * - * @returns the result of a CreateTopics request, or NULL if event is of - * different type. - * - * Event types: - * RD_KAFKA_EVENT_CREATETOPICS_RESULT - */ -RD_EXPORT const rd_kafka_CreateTopics_result_t * -rd_kafka_event_CreateTopics_result (rd_kafka_event_t *rkev); - -/** - * @brief Get DeleteTopics result. - * - * @returns the result of a DeleteTopics request, or NULL if event is of - * different type. - * - * Event types: - * RD_KAFKA_EVENT_DELETETOPICS_RESULT - */ -RD_EXPORT const rd_kafka_DeleteTopics_result_t * -rd_kafka_event_DeleteTopics_result (rd_kafka_event_t *rkev); - -/** - * @brief Get CreatePartitions result. - * - * @returns the result of a CreatePartitions request, or NULL if event is of - * different type. - * - * Event types: - * RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT - */ -RD_EXPORT const rd_kafka_CreatePartitions_result_t * -rd_kafka_event_CreatePartitions_result (rd_kafka_event_t *rkev); - -/** - * @brief Get AlterConfigs result. - * - * @returns the result of a AlterConfigs request, or NULL if event is of - * different type. - * - * Event types: - * RD_KAFKA_EVENT_ALTERCONFIGS_RESULT - */ -RD_EXPORT const rd_kafka_AlterConfigs_result_t * -rd_kafka_event_AlterConfigs_result (rd_kafka_event_t *rkev); - -/** - * @brief Get DescribeConfigs result. - * - * @returns the result of a DescribeConfigs request, or NULL if event is of - * different type. - * - * Event types: - * RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT - */ -RD_EXPORT const rd_kafka_DescribeConfigs_result_t * -rd_kafka_event_DescribeConfigs_result (rd_kafka_event_t *rkev); - -/** - * @returns the result of a DeleteRecords request, or NULL if event is of - * different type. - * - * Event types: - * RD_KAFKA_EVENT_DELETERECORDS_RESULT - */ -RD_EXPORT const rd_kafka_DeleteRecords_result_t * -rd_kafka_event_DeleteRecords_result (rd_kafka_event_t *rkev); - -/** - * @brief Get DeleteGroups result. - * - * @returns the result of a DeleteGroups request, or NULL if event is of - * different type. - * - * Event types: - * RD_KAFKA_EVENT_DELETEGROUPS_RESULT - */ -RD_EXPORT const rd_kafka_DeleteGroups_result_t * -rd_kafka_event_DeleteGroups_result (rd_kafka_event_t *rkev); - -/** - * @brief Get DeleteConsumerGroupOffsets result. - * - * @returns the result of a DeleteConsumerGroupOffsets request, or NULL if - * event is of different type. - * - * Event types: - * RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT - */ -RD_EXPORT const rd_kafka_DeleteConsumerGroupOffsets_result_t * -rd_kafka_event_DeleteConsumerGroupOffsets_result (rd_kafka_event_t *rkev); - -/** - * @brief Poll a queue for an event for max \p timeout_ms. - * - * @returns an event, or NULL. - * - * @remark Use rd_kafka_event_destroy() to free the event. - * - * @sa rd_kafka_conf_set_background_event_cb() - */ -RD_EXPORT -rd_kafka_event_t *rd_kafka_queue_poll (rd_kafka_queue_t *rkqu, int timeout_ms); - -/** -* @brief Poll a queue for events served through callbacks for max \p timeout_ms. -* -* @returns the number of events served. -* -* @remark This API must only be used for queues with callbacks registered -* for all expected event types. E.g., not a message queue. -* -* @remark Also see rd_kafka_conf_set_background_event_cb() for triggering -* event callbacks from a librdkafka-managed background thread. -* -* @sa rd_kafka_conf_set_background_event_cb() -*/ -RD_EXPORT -int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms); - - -/**@}*/ - - -/** - * @name Plugin interface - * - * @brief A plugin interface that allows external runtime-loaded libraries - * to integrate with a client instance without modifications to - * the application code. - * - * Plugins are loaded when referenced through the `plugin.library.paths` - * configuration property and operates on the \c rd_kafka_conf_t - * object prior \c rd_kafka_t instance creation. - * - * @warning Plugins require the application to link librdkafka dynamically - * and not statically. Failure to do so will lead to missing symbols - * or finding symbols in another librdkafka library than the - * application was linked with. - */ - - -/** - * @brief Plugin's configuration initializer method called each time the - * library is referenced from configuration (even if previously loaded by - * another client instance). - * - * @remark This method MUST be implemented by plugins and have the symbol name - * \c conf_init - * - * @param conf Configuration set up to this point. - * @param plug_opaquep Plugin can set this pointer to a per-configuration - * opaque pointer. - * @param errstr String buffer of size \p errstr_size where plugin must write - * a human readable error string in the case the initializer - * fails (returns non-zero). - * @param errstr_size Maximum space (including \0) in \p errstr. - * - * @remark A plugin may add an on_conf_destroy() interceptor to clean up - * plugin-specific resources created in the plugin's conf_init() method. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. - */ -typedef rd_kafka_resp_err_t -(rd_kafka_plugin_f_conf_init_t) (rd_kafka_conf_t *conf, - void **plug_opaquep, - char *errstr, size_t errstr_size); - -/**@}*/ - - - -/** - * @name Interceptors - * - * @{ - * - * @brief A callback interface that allows message interception for both - * producer and consumer data pipelines. - * - * Except for the on_new(), on_conf_set(), on_conf_dup() and on_conf_destroy() - * interceptors, interceptors are added to the - * newly created rd_kafka_t client instance. These interceptors MUST only - * be added from on_new() and MUST NOT be added after rd_kafka_new() returns. - * - * The on_new(), on_conf_set(), on_conf_dup() and on_conf_destroy() interceptors - * are added to the configuration object which is later passed to - * rd_kafka_new() where on_new() is called to allow addition of - * other interceptors. - * - * Each interceptor reference consists of a display name (ic_name), - * a callback function, and an application-specified opaque value that is - * passed as-is to the callback. - * The ic_name must be unique for the interceptor implementation and is used - * to reject duplicate interceptor methods. - * - * Any number of interceptors can be added and they are called in the order - * they were added, unless otherwise noted. - * The list of registered interceptor methods are referred to as - * interceptor chains. - * - * @remark Contrary to the Java client the librdkafka interceptor interface - * does not support message key and value modification. - * Message mutability is discouraged in the Java client and the - * combination of serializers and headers cover most use-cases. - * - * @remark Interceptors are NOT copied to the new configuration on - * rd_kafka_conf_dup() since it would be hard for interceptors to - * track usage of the interceptor's opaque value. - * An interceptor should rely on the plugin, which will be copied - * in rd_kafka_conf_conf_dup(), to set up the initial interceptors. - * An interceptor should implement the on_conf_dup() method - * to manually set up its internal configuration on the newly created - * configuration object that is being copied-to based on the - * interceptor-specific configuration properties. - * conf_dup() should thus be treated the same as conf_init(). - * - * @remark Interceptors are keyed by the interceptor type (on_..()), the - * interceptor name (ic_name) and the interceptor method function. - * Duplicates are not allowed and the .._add_on_..() method will - * return RD_KAFKA_RESP_ERR__CONFLICT if attempting to add a duplicate - * method. - * The only exception is on_conf_destroy() which may be added multiple - * times by the same interceptor to allow proper cleanup of - * interceptor configuration state. - */ - - -/** - * @brief on_conf_set() is called from rd_kafka_*_conf_set() in the order - * the interceptors were added. - * - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - * @param name The configuration property to set. - * @param val The configuration value to set, or NULL for reverting to default - * in which case the previous value should be freed. - * @param errstr A human readable error string in case the interceptor fails. - * @param errstr_size Maximum space (including \0) in \p errstr. - * - * @returns RD_KAFKA_CONF_OK if the property was known and successfully - * handled by the interceptor, RD_KAFKA_CONF_INVALID if the - * property was handled by the interceptor but the value was invalid, - * or RD_KAFKA_CONF_UNKNOWN if the interceptor did not handle - * this property, in which case the property is passed on on the - * interceptor in the chain, finally ending up at the built-in - * configuration handler. - */ -typedef rd_kafka_conf_res_t -(rd_kafka_interceptor_f_on_conf_set_t) (rd_kafka_conf_t *conf, - const char *name, const char *val, - char *errstr, size_t errstr_size, - void *ic_opaque); - - -/** - * @brief on_conf_dup() is called from rd_kafka_conf_dup() in the - * order the interceptors were added and is used to let - * an interceptor re-register its conf interecptors with a new - * opaque value. - * The on_conf_dup() method is called prior to the configuration from - * \p old_conf being copied to \p new_conf. - * - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code - * on failure (which is logged but otherwise ignored). - * - * @remark No on_conf_* interceptors are copied to the new configuration - * object on rd_kafka_conf_dup(). - */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_conf_dup_t) (rd_kafka_conf_t *new_conf, - const rd_kafka_conf_t *old_conf, - size_t filter_cnt, - const char **filter, - void *ic_opaque); - - -/** - * @brief on_conf_destroy() is called from rd_kafka_*_conf_destroy() in the - * order the interceptors were added. - * - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_conf_destroy_t) (void *ic_opaque); - - -/** - * @brief on_new() is called from rd_kafka_new() prior toreturning - * the newly created client instance to the application. - * - * @param rk The client instance. - * @param conf The client instance's final configuration. - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - * @param errstr A human readable error string in case the interceptor fails. - * @param errstr_size Maximum space (including \0) in \p errstr. - * - * @returns an error code on failure, the error is logged but otherwise ignored. - * - * @warning The \p rk client instance will not be fully set up when this - * interceptor is called and the interceptor MUST NOT call any - * other rk-specific APIs than rd_kafka_interceptor_add..(). - * - */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_new_t) (rd_kafka_t *rk, const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size); - - -/** - * @brief on_destroy() is called from rd_kafka_destroy() or (rd_kafka_new() - * if rd_kafka_new() fails during initialization). - * - * @param rk The client instance. - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_destroy_t) (rd_kafka_t *rk, void *ic_opaque); - - - - -/** - * @brief on_send() is called from rd_kafka_produce*() (et.al) prior to - * the partitioner being called. - * - * @param rk The client instance. - * @param rkmessage The message being produced. Immutable. - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - * - * @remark This interceptor is only used by producer instances. - * - * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified - * by the interceptor. - * - * @remark If the partitioner fails or an unknown partition was specified, - * the on_acknowledgement() interceptor chain will be called from - * within the rd_kafka_produce*() call to maintain send-acknowledgement - * symmetry. - * - * @returns an error code on failure, the error is logged but otherwise ignored. - */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_send_t) (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque); - -/** - * @brief on_acknowledgement() is called to inform interceptors that a message - * was succesfully delivered or permanently failed delivery. - * The interceptor chain is called from internal librdkafka background - * threads, or rd_kafka_produce*() if the partitioner failed. - * - * @param rk The client instance. - * @param rkmessage The message being produced. Immutable. - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - * - * @remark This interceptor is only used by producer instances. - * - * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified - * by the interceptor. - * - * @warning The on_acknowledgement() method may be called from internal - * librdkafka threads. An on_acknowledgement() interceptor MUST NOT - * call any librdkafka API's associated with the \p rk, or perform - * any blocking or prolonged work. - * - * @returns an error code on failure, the error is logged but otherwise ignored. - */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_acknowledgement_t) (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque); - - -/** - * @brief on_consume() is called just prior to passing the message to the - * application in rd_kafka_consumer_poll(), rd_kafka_consume*(), - * the event interface, etc. - * - * @param rk The client instance. - * @param rkmessage The message being consumed. Immutable. - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - * - * @remark This interceptor is only used by consumer instances. - * - * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified - * by the interceptor. - * - * @returns an error code on failure, the error is logged but otherwise ignored. - */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_consume_t) (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque); - -/** - * @brief on_commit() is called on completed or failed offset commit. - * It is called from internal librdkafka threads. - * - * @param rk The client instance. - * @param offsets List of topic+partition+offset+error that were committed. - * The error message of each partition should be checked for - * error. - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - * - * @remark This interceptor is only used by consumer instances. - * - * @warning The on_commit() interceptor is called from internal - * librdkafka threads. An on_commit() interceptor MUST NOT - * call any librdkafka API's associated with the \p rk, or perform - * any blocking or prolonged work. - * - * - * @returns an error code on failure, the error is logged but otherwise ignored. - */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_commit_t) ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err, void *ic_opaque); - - -/** - * @brief on_request_sent() is called when a request has been fully written - * to a broker TCP connections socket. - * - * @param rk The client instance. - * @param sockfd Socket file descriptor. - * @param brokername Broker request is being sent to. - * @param brokerid Broker request is being sent to. - * @param ApiKey Kafka protocol request type. - * @param ApiVersion Kafka protocol request type version. - * @param Corrid Kafka protocol request correlation id. - * @param size Size of request. - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - * - * @warning The on_request_sent() interceptor is called from internal - * librdkafka broker threads. An on_request_sent() interceptor MUST NOT - * call any librdkafka API's associated with the \p rk, or perform - * any blocking or prolonged work. - * - * @returns an error code on failure, the error is logged but otherwise ignored. - */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_request_sent_t) ( - rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - void *ic_opaque); - - -/** - * @brief on_response_received() is called when a protocol response has been - * fully received from a broker TCP connection socket but before the - * response payload is parsed. - * - * @param rk The client instance. - * @param sockfd Socket file descriptor (always -1). - * @param brokername Broker response was received from, possibly empty string - * on error. - * @param brokerid Broker response was received from. - * @param ApiKey Kafka protocol request type or -1 on error. - * @param ApiVersion Kafka protocol request type version or -1 on error. - * @param Corrid Kafka protocol request correlation id, possibly -1 on error. - * @param size Size of response, possibly 0 on error. - * @param rtt Request round-trip-time in microseconds, possibly -1 on error. - * @param err Receive error. - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - * - * @warning The on_response_received() interceptor is called from internal - * librdkafka broker threads. An on_response_received() interceptor - * MUST NOT call any librdkafka API's associated with the \p rk, or - * perform any blocking or prolonged work. - * - * @returns an error code on failure, the error is logged but otherwise ignored. - */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_response_received_t) ( - rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - int64_t rtt, - rd_kafka_resp_err_t err, - void *ic_opaque); - - -/** - * @brief on_thread_start() is called from a newly created librdkafka-managed - * thread. - - * @param rk The client instance. - * @param thread_type Thread type. - * @param thread_name Human-readable thread name, may not be unique. - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - * - * @warning The on_thread_start() interceptor is called from internal - * librdkafka threads. An on_thread_start() interceptor MUST NOT - * call any librdkafka API's associated with the \p rk, or perform - * any blocking or prolonged work. - * - * @returns an error code on failure, the error is logged but otherwise ignored. - */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_thread_start_t) ( - rd_kafka_t *rk, - rd_kafka_thread_type_t thread_type, - const char *thread_name, - void *ic_opaque); - - -/** - * @brief on_thread_exit() is called just prior to a librdkafka-managed - * thread exiting from the exiting thread itself. - * - * @param rk The client instance. - * @param thread_type Thread type.n - * @param thread_name Human-readable thread name, may not be unique. - * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). - * - * @remark Depending on the thread type, librdkafka may execute additional - * code on the thread after on_thread_exit() returns. - * - * @warning The on_thread_exit() interceptor is called from internal - * librdkafka threads. An on_thread_exit() interceptor MUST NOT - * call any librdkafka API's associated with the \p rk, or perform - * any blocking or prolonged work. - * - * @returns an error code on failure, the error is logged but otherwise ignored. - */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_thread_exit_t) ( - rd_kafka_t *rk, - rd_kafka_thread_type_t thread_type, - const char *thread_name, - void *ic_opaque); - - - -/** - * @brief Append an on_conf_set() interceptor. - * - * @param conf Configuration object. - * @param ic_name Interceptor name, used in logging. - * @param on_conf_set Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function - * has already been added to \p conf. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_set ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, - void *ic_opaque); - - -/** - * @brief Append an on_conf_dup() interceptor. - * - * @param conf Configuration object. - * @param ic_name Interceptor name, used in logging. - * @param on_conf_dup Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function - * has already been added to \p conf. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_dup ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, - void *ic_opaque); - -/** - * @brief Append an on_conf_destroy() interceptor. - * - * @param conf Configuration object. - * @param ic_name Interceptor name, used in logging. - * @param on_conf_destroy Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR - * - * @remark Multiple on_conf_destroy() interceptors are allowed to be added - * to the same configuration object. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_destroy ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, - void *ic_opaque); - - -/** - * @brief Append an on_new() interceptor. - * - * @param conf Configuration object. - * @param ic_name Interceptor name, used in logging. - * @param on_new Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @remark Since the on_new() interceptor is added to the configuration object - * it may be copied by rd_kafka_conf_dup(). - * An interceptor implementation must thus be able to handle - * the same interceptor,ic_opaque tuple to be used by multiple - * client instances. - * - * @remark An interceptor plugin should check the return value to make sure it - * has not already been added. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function - * has already been added to \p conf. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_new ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_new_t *on_new, - void *ic_opaque); - - - -/** - * @brief Append an on_destroy() interceptor. - * - * @param rk Client instance. - * @param ic_name Interceptor name, used in logging. - * @param on_destroy Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function - * has already been added to \p conf. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_destroy ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_destroy_t *on_destroy, - void *ic_opaque); - - -/** - * @brief Append an on_send() interceptor. - * - * @param rk Client instance. - * @param ic_name Interceptor name, used in logging. - * @param on_send Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function - * has already been added to \p conf. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_send ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_send_t *on_send, - void *ic_opaque); - -/** - * @brief Append an on_acknowledgement() interceptor. - * - * @param rk Client instance. - * @param ic_name Interceptor name, used in logging. - * @param on_acknowledgement Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function - * has already been added to \p conf. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_acknowledgement ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, - void *ic_opaque); - - -/** - * @brief Append an on_consume() interceptor. - * - * @param rk Client instance. - * @param ic_name Interceptor name, used in logging. - * @param on_consume Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function - * has already been added to \p conf. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_consume ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_consume_t *on_consume, - void *ic_opaque); - - -/** - * @brief Append an on_commit() interceptor. - * - * @param rk Client instance. - * @param ic_name Interceptor name, used in logging. - * @param on_commit() Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function - * has already been added to \p conf. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_commit ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_commit_t *on_commit, - void *ic_opaque); - - -/** - * @brief Append an on_request_sent() interceptor. - * - * @param rk Client instance. - * @param ic_name Interceptor name, used in logging. - * @param on_request_sent() Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function - * has already been added to \p conf. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_request_sent ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, - void *ic_opaque); - - -/** - * @brief Append an on_response_received() interceptor. - * - * @param rk Client instance. - * @param ic_name Interceptor name, used in logging. - * @param on_response_received() Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function - * has already been added to \p conf. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_response_received ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_response_received_t *on_response_received, - void *ic_opaque); - - -/** - * @brief Append an on_thread_start() interceptor. - * - * @param rk Client instance. - * @param ic_name Interceptor name, used in logging. - * @param on_thread_start() Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function - * has already been added to \p conf. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_thread_start ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, - void *ic_opaque); - - -/** - * @brief Append an on_thread_exit() interceptor. - * - * @param rk Client instance. - * @param ic_name Interceptor name, used in logging. - * @param on_thread_exit() Function pointer. - * @param ic_opaque Opaque value that will be passed to the function. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function - * has already been added to \p conf. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_thread_exit ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, - void *ic_opaque); - - - -/**@}*/ - - - -/** - * @name Auxiliary types - * - * @{ - */ - - - -/** - * @brief Topic result provides per-topic operation result information. - * - */ - -/** - * @returns the error code for the given topic result. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_topic_result_error (const rd_kafka_topic_result_t *topicres); - -/** - * @returns the human readable error string for the given topic result, - * or NULL if there was no error. - * - * @remark lifetime of the returned string is the same as the \p topicres. - */ -RD_EXPORT const char * -rd_kafka_topic_result_error_string (const rd_kafka_topic_result_t *topicres); - -/** - * @returns the name of the topic for the given topic result. - * @remark lifetime of the returned string is the same as the \p topicres. - * - */ -RD_EXPORT const char * -rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres); - -/** - * @brief Group result provides per-group operation result information. - * - */ - -/** - * @returns the error for the given group result, or NULL on success. - * @remark lifetime of the returned error is the same as the \p groupres. - */ -RD_EXPORT const rd_kafka_error_t * -rd_kafka_group_result_error (const rd_kafka_group_result_t *groupres); - -/** - * @returns the name of the group for the given group result. - * @remark lifetime of the returned string is the same as the \p groupres. - * - */ -RD_EXPORT const char * -rd_kafka_group_result_name (const rd_kafka_group_result_t *groupres); - -/** - * @returns the partitions/offsets for the given group result, if applicable - * to the request type, else NULL. - * @remark lifetime of the returned list is the same as the \p groupres. - */ -RD_EXPORT const rd_kafka_topic_partition_list_t * -rd_kafka_group_result_partitions (const rd_kafka_group_result_t *groupres); - - -/**@}*/ - - -/** - * @name Admin API - * @{ - * - * @brief The Admin API enables applications to perform administrative - * Apache Kafka tasks, such as creating and deleting topics, - * altering and reading broker configuration, etc. - * - * The Admin API is asynchronous and makes use of librdkafka's standard - * \c rd_kafka_queue_t queues to propagate the result of an admin operation - * back to the application. - * The supplied queue may be any queue, such as a temporary single-call queue, - * a shared queue used for multiple requests, or even the main queue or - * consumer queues. - * - * Use \c rd_kafka_queue_poll() to collect the result of an admin operation - * from the queue of your choice, then extract the admin API-specific result - * type by using the corresponding \c rd_kafka_event_CreateTopics_result, - * \c rd_kafka_event_DescribeConfigs_result, etc, methods. - * Use the getter methods on the \c .._result_t type to extract response - * information and finally destroy the result and event by calling - * \c rd_kafka_event_destroy(). - * - * Use rd_kafka_event_error() and rd_kafka_event_error_string() to acquire - * the request-level error/success for an Admin API request. - * Even if the returned value is \c RD_KAFKA_RESP_ERR_NO_ERROR there - * may be individual objects (topics, resources, etc) that have failed. - * Extract per-object error information with the corresponding - * \c rd_kafka_..._result_topics|resources|..() to check per-object errors. - * - * Locally triggered errors: - * - \c RD_KAFKA_RESP_ERR__TIMED_OUT - (Controller) broker connection did not - * become available in the time allowed by AdminOption_set_request_timeout. - */ - - -/** - * @enum rd_kafka_admin_op_t - * - * @brief Admin operation enum name for use with rd_kafka_AdminOptions_new() - * - * @sa rd_kafka_AdminOptions_new() - */ -typedef enum rd_kafka_admin_op_t { - RD_KAFKA_ADMIN_OP_ANY = 0, /**< Default value */ - RD_KAFKA_ADMIN_OP_CREATETOPICS, /**< CreateTopics */ - RD_KAFKA_ADMIN_OP_DELETETOPICS, /**< DeleteTopics */ - RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, /**< CreatePartitions */ - RD_KAFKA_ADMIN_OP_ALTERCONFIGS, /**< AlterConfigs */ - RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, /**< DescribeConfigs */ - RD_KAFKA_ADMIN_OP_DELETERECORDS, /**< DeleteRecords */ - RD_KAFKA_ADMIN_OP_DELETEGROUPS, /**< DeleteGroups */ - /** DeleteConsumerGroupOffsets */ - RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, - RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ -} rd_kafka_admin_op_t; - -/** - * @brief AdminOptions provides a generic mechanism for setting optional - * parameters for the Admin API requests. - * - * @remark Since AdminOptions is decoupled from the actual request type - * there is no enforcement to prevent setting unrelated properties, - * e.g. setting validate_only on a DescribeConfigs request is allowed - * but is silently ignored by DescribeConfigs. - * Future versions may introduce such enforcement. - */ - - -typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t; - -/** - * @brief Create a new AdminOptions object. - * - * The options object is not modified by the Admin API request APIs, - * (e.g. CreateTopics) and may be reused for multiple calls. - * - * @param rk Client instance. - * @param for_api Specifies what Admin API this AdminOptions object will be used - * for, which will enforce what AdminOptions_set_..() calls may - * be used based on the API, causing unsupported set..() calls - * to fail. - * Specifying RD_KAFKA_ADMIN_OP_ANY disables the enforcement - * allowing any option to be set, even if the option - * is not used in a future call to an Admin API method. - * - * @returns a new AdminOptions object (which must be freed with - * rd_kafka_AdminOptions_destroy()), or NULL if \p for_api was set to - * an unknown API op type. - */ -RD_EXPORT rd_kafka_AdminOptions_t * -rd_kafka_AdminOptions_new (rd_kafka_t *rk, rd_kafka_admin_op_t for_api); - - -/** - * @brief Destroy a AdminOptions object. - */ -RD_EXPORT void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options); - - -/** - * @brief Sets the overall request timeout, including broker lookup, - * request transmission, operation time on broker, and response. - * - * @param options Admin options. - * @param timeout_ms Timeout in milliseconds, use -1 for indefinite timeout. - * Defaults to `socket.timeout.ms`. - * @param errstr A human readable error string (nul-terminated) is written to - * this location that must be of at least \p errstr_size bytes. - * The \p errstr is only written in case of error. - * @param errstr_size Writable size in \p errstr. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or - * RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which - * case an error string will be written \p errstr. - * - * @remark This option is valid for all Admin API requests. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_request_timeout (rd_kafka_AdminOptions_t *options, - int timeout_ms, - char *errstr, size_t errstr_size); - - -/** - * @brief Sets the broker's operation timeout, such as the timeout for - * CreateTopics to complete the creation of topics on the controller - * before returning a result to the application. - * - * CreateTopics: values <= 0 will return immediately after triggering topic - * creation, while > 0 will wait this long for topic creation to propagate - * in cluster. Default: 60 seconds. - * - * DeleteTopics: same semantics as CreateTopics. - * CreatePartitions: same semantics as CreateTopics. - * - * @param options Admin options. - * @param timeout_ms Timeout in milliseconds. - * @param errstr A human readable error string (nul-terminated) is written to - * this location that must be of at least \p errstr_size bytes. - * The \p errstr is only written in case of error. - * @param errstr_size Writable size in \p errstr. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or - * RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which - * case an error string will be written \p errstr. - * - * @remark This option is valid for CreateTopics, DeleteTopics, - * CreatePartitions, and DeleteRecords. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_operation_timeout (rd_kafka_AdminOptions_t *options, - int timeout_ms, - char *errstr, size_t errstr_size); - - -/** - * @brief Tell broker to only validate the request, without performing - * the requested operation (create topics, etc). - * - * @param options Admin options. - * @param true_or_false Defaults to false. - * @param errstr A human readable error string (nul-terminated) is written to - * this location that must be of at least \p errstr_size bytes. - * The \p errstr is only written in case of error. - * @param errstr_size Writable size in \p errstr. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an - * error code on failure in which case an error string will - * be written \p errstr. - * - * @remark This option is valid for CreateTopics, - * CreatePartitions, AlterConfigs. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_validate_only (rd_kafka_AdminOptions_t *options, - int true_or_false, - char *errstr, size_t errstr_size); - - -/** - * @brief Override what broker the Admin request will be sent to. - * - * By default, Admin requests are sent to the controller broker, with - * the following exceptions: - * - AlterConfigs with a BROKER resource are sent to the broker id set - * as the resource name. - * - DescribeConfigs with a BROKER resource are sent to the broker id set - * as the resource name. - * - * @param options Admin Options. - * @param broker_id The broker to send the request to. - * @param errstr A human readable error string (nul-terminated) is written to - * this location that must be of at least \p errstr_size bytes. - * The \p errstr is only written in case of error. - * @param errstr_size Writable size in \p errstr. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an - * error code on failure in which case an error string will - * be written \p errstr. - * - * @remark This API should typically not be used, but serves as a workaround - * if new resource types are to the broker that the client - * does not know where to send. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_broker (rd_kafka_AdminOptions_t *options, - int32_t broker_id, - char *errstr, size_t errstr_size); - - - -/** - * @brief Set application opaque value that can be extracted from the - * result event using rd_kafka_event_opaque() - */ -RD_EXPORT void -rd_kafka_AdminOptions_set_opaque (rd_kafka_AdminOptions_t *options, - void *ev_opaque); - - - - - - -/* - * CreateTopics - create topics in cluster. - * - */ - - -/*! Defines a new topic to be created. */ -typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t; - -/** - * @brief Create a new NewTopic object. This object is later passed to - * rd_kafka_CreateTopics(). - * - * @param topic Topic name to create. - * @param num_partitions Number of partitions in topic, or -1 to use the - * broker's default partition count (>= 2.4.0). - * @param replication_factor Default replication factor for the topic's - * partitions, or -1 to use the broker's default - * replication factor (>= 2.4.0) or if - * set_replica_assignment() will be used. - * @param errstr A human readable error string (nul-terminated) is written to - * this location that must be of at least \p errstr_size bytes. - * The \p errstr is only written in case of error. - * @param errstr_size Writable size in \p errstr. - * - * - * @returns a new allocated NewTopic object, or NULL if the input parameters - * are invalid. - * Use rd_kafka_NewTopic_destroy() to free object when done. - */ -RD_EXPORT rd_kafka_NewTopic_t * -rd_kafka_NewTopic_new (const char *topic, int num_partitions, - int replication_factor, - char *errstr, size_t errstr_size); - -/** - * @brief Destroy and free a NewTopic object previously created with - * rd_kafka_NewTopic_new() - */ -RD_EXPORT void -rd_kafka_NewTopic_destroy (rd_kafka_NewTopic_t *new_topic); - - -/** - * @brief Helper function to destroy all NewTopic objects in the \p new_topics - * array (of \p new_topic_cnt elements). - * The array itself is not freed. - */ -RD_EXPORT void -rd_kafka_NewTopic_destroy_array (rd_kafka_NewTopic_t **new_topics, - size_t new_topic_cnt); - - -/** - * @brief Set the replica (broker) assignment for \p partition to the - * replica set in \p broker_ids (of \p broker_id_cnt elements). - * - * @remark When this method is used, rd_kafka_NewTopic_new() must have - * been called with a \c replication_factor of -1. - * - * @remark An application must either set the replica assignment for - * all new partitions, or none. - * - * @remark If called, this function must be called consecutively for each - * partition, starting at 0. - * - * @remark Use rd_kafka_metadata() to retrieve the list of brokers - * in the cluster. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code - * if the arguments were invalid. - * - * @sa rd_kafka_AdminOptions_set_validate_only() - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, - int32_t partition, - int32_t *broker_ids, - size_t broker_id_cnt, - char *errstr, size_t errstr_size); - -/** - * @brief Set (broker-side) topic configuration name/value pair. - * - * @remark The name and value are not validated by the client, the validation - * takes place on the broker. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code - * if the arguments were invalid. - * - * @sa rd_kafka_AdminOptions_set_validate_only() - * @sa http://kafka.apache.org/documentation.html#topicconfigs - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_NewTopic_set_config (rd_kafka_NewTopic_t *new_topic, - const char *name, const char *value); - - -/** - * @brief Create topics in cluster as specified by the \p new_topics - * array of size \p new_topic_cnt elements. - * - * @param rk Client instance. - * @param new_topics Array of new topics to create. - * @param new_topic_cnt Number of elements in \p new_topics array. - * @param options Optional admin options, or NULL for defaults. - * @param rkqu Queue to emit result on. - * - * Supported admin options: - * - rd_kafka_AdminOptions_set_validate_only() - default false - * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds - * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms - * - * @remark The result event type emitted on the supplied queue is of type - * \c RD_KAFKA_EVENT_CREATETOPICS_RESULT - */ -RD_EXPORT void -rd_kafka_CreateTopics (rd_kafka_t *rk, - rd_kafka_NewTopic_t **new_topics, - size_t new_topic_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); - - -/* - * CreateTopics result type and methods - */ - -/** - * @brief Get an array of topic results from a CreateTopics result. - * - * The returned \p topics life-time is the same as the \p result object. - * - * @param result Result to get topics from. - * @param cntp Updated to the number of elements in the array. - */ -RD_EXPORT const rd_kafka_topic_result_t ** -rd_kafka_CreateTopics_result_topics ( - const rd_kafka_CreateTopics_result_t *result, - size_t *cntp); - - - - - -/* - * DeleteTopics - delete topics from cluster - * - */ - -/*! Represents a topic to be deleted. */ -typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t; - -/** - * @brief Create a new DeleteTopic object. This object is later passed to - * rd_kafka_DeleteTopics(). - * - * @param topic Topic name to delete. - * - * @returns a new allocated DeleteTopic object. - * Use rd_kafka_DeleteTopic_destroy() to free object when done. - */ -RD_EXPORT rd_kafka_DeleteTopic_t * -rd_kafka_DeleteTopic_new (const char *topic); - -/** - * @brief Destroy and free a DeleteTopic object previously created with - * rd_kafka_DeleteTopic_new() - */ -RD_EXPORT void -rd_kafka_DeleteTopic_destroy (rd_kafka_DeleteTopic_t *del_topic); - -/** - * @brief Helper function to destroy all DeleteTopic objects in - * the \p del_topics array (of \p del_topic_cnt elements). - * The array itself is not freed. - */ -RD_EXPORT void -rd_kafka_DeleteTopic_destroy_array (rd_kafka_DeleteTopic_t **del_topics, - size_t del_topic_cnt); - -/** - * @brief Delete topics from cluster as specified by the \p topics - * array of size \p topic_cnt elements. - * - * @param rk Client instance. - * @param del_topics Array of topics to delete. - * @param del_topic_cnt Number of elements in \p topics array. - * @param options Optional admin options, or NULL for defaults. - * @param rkqu Queue to emit result on. - * - * @remark The result event type emitted on the supplied queue is of type - * \c RD_KAFKA_EVENT_DELETETOPICS_RESULT - */ -RD_EXPORT -void rd_kafka_DeleteTopics (rd_kafka_t *rk, - rd_kafka_DeleteTopic_t **del_topics, - size_t del_topic_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); - - - -/* - * DeleteTopics result type and methods - */ - -/** - * @brief Get an array of topic results from a DeleteTopics result. - * - * The returned \p topics life-time is the same as the \p result object. - * - * @param result Result to get topic results from. - * @param cntp is updated to the number of elements in the array. - */ -RD_EXPORT const rd_kafka_topic_result_t ** -rd_kafka_DeleteTopics_result_topics ( - const rd_kafka_DeleteTopics_result_t *result, - size_t *cntp); - - - - - - -/* - * CreatePartitions - add partitions to topic. - * - */ - -/*! Defines a new partition to be created. */ -typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t; - -/** - * @brief Create a new NewPartitions. This object is later passed to - * rd_kafka_CreatePartitions() to increase the number of partitions - * to \p new_total_cnt for an existing topic. - * - * @param topic Topic name to create more partitions for. - * @param new_total_cnt Increase the topic's partition count to this value. - * @param errstr A human readable error string (nul-terminated) is written to - * this location that must be of at least \p errstr_size bytes. - * The \p errstr is only written in case of error. - * @param errstr_size Writable size in \p errstr. - * - * @returns a new allocated NewPartitions object, or NULL if the - * input parameters are invalid. - * Use rd_kafka_NewPartitions_destroy() to free object when done. - */ -RD_EXPORT rd_kafka_NewPartitions_t * -rd_kafka_NewPartitions_new (const char *topic, size_t new_total_cnt, - char *errstr, size_t errstr_size); - -/** - * @brief Destroy and free a NewPartitions object previously created with - * rd_kafka_NewPartitions_new() - */ -RD_EXPORT void -rd_kafka_NewPartitions_destroy (rd_kafka_NewPartitions_t *new_parts); - -/** - * @brief Helper function to destroy all NewPartitions objects in the - * \p new_parts array (of \p new_parts_cnt elements). - * The array itself is not freed. - */ -RD_EXPORT void -rd_kafka_NewPartitions_destroy_array (rd_kafka_NewPartitions_t **new_parts, - size_t new_parts_cnt); - -/** - * @brief Set the replica (broker id) assignment for \p new_partition_idx to the - * replica set in \p broker_ids (of \p broker_id_cnt elements). - * - * @remark An application must either set the replica assignment for - * all new partitions, or none. - * - * @remark If called, this function must be called consecutively for each - * new partition being created, - * where \p new_partition_idx 0 is the first new partition, - * 1 is the second, and so on. - * - * @remark \p broker_id_cnt should match the topic's replication factor. - * - * @remark Use rd_kafka_metadata() to retrieve the list of brokers - * in the cluster. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code - * if the arguments were invalid. - * - * @sa rd_kafka_AdminOptions_set_validate_only() - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *new_parts, - int32_t new_partition_idx, - int32_t *broker_ids, - size_t broker_id_cnt, - char *errstr, - size_t errstr_size); - - -/** - * @brief Create additional partitions for the given topics, as specified - * by the \p new_parts array of size \p new_parts_cnt elements. - * - * @param rk Client instance. - * @param new_parts Array of topics for which new partitions are to be created. - * @param new_parts_cnt Number of elements in \p new_parts array. - * @param options Optional admin options, or NULL for defaults. - * @param rkqu Queue to emit result on. - * - * Supported admin options: - * - rd_kafka_AdminOptions_set_validate_only() - default false - * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds - * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms - * - * @remark The result event type emitted on the supplied queue is of type - * \c RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT - */ -RD_EXPORT void -rd_kafka_CreatePartitions (rd_kafka_t *rk, - rd_kafka_NewPartitions_t **new_parts, - size_t new_parts_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); - - - -/* - * CreatePartitions result type and methods - */ - -/** - * @brief Get an array of topic results from a CreatePartitions result. - * - * The returned \p topics life-time is the same as the \p result object. - * - * @param result Result o get topic results from. - * @param cntp is updated to the number of elements in the array. - */ -RD_EXPORT const rd_kafka_topic_result_t ** -rd_kafka_CreatePartitions_result_topics ( - const rd_kafka_CreatePartitions_result_t *result, - size_t *cntp); - - - - - -/* - * Cluster, broker, topic configuration entries, sources, etc. - * - */ - -/** - * @enum rd_kafka_ConfigSource_t - * - * @brief Apache Kafka config sources. - * - * @remark These entities relate to the cluster, not the local client. - * - * @sa rd_kafka_conf_set(), et.al. for local client configuration. - */ -typedef enum rd_kafka_ConfigSource_t { - /** Source unknown, e.g., in the ConfigEntry used for alter requests - * where source is not set */ - RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0, - /** Dynamic topic config that is configured for a specific topic */ - RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1, - /** Dynamic broker config that is configured for a specific broker */ - RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2, - /** Dynamic broker config that is configured as default for all - * brokers in the cluster */ - RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3, - /** Static broker config provided as broker properties at startup - * (e.g. from server.properties file) */ - RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4, - /** Built-in default configuration for configs that have a - * default value */ - RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5, - - /** Number of source types defined */ - RD_KAFKA_CONFIG_SOURCE__CNT, -} rd_kafka_ConfigSource_t; - - -/** - * @returns a string representation of the \p confsource. - */ -RD_EXPORT const char * -rd_kafka_ConfigSource_name (rd_kafka_ConfigSource_t confsource); - - -/*! Apache Kafka configuration entry. */ -typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t; - -/** - * @returns the configuration property name - */ -RD_EXPORT const char * -rd_kafka_ConfigEntry_name (const rd_kafka_ConfigEntry_t *entry); - -/** - * @returns the configuration value, may be NULL for sensitive or unset - * properties. - */ -RD_EXPORT const char * -rd_kafka_ConfigEntry_value (const rd_kafka_ConfigEntry_t *entry); - -/** - * @returns the config source. - */ -RD_EXPORT rd_kafka_ConfigSource_t -rd_kafka_ConfigEntry_source (const rd_kafka_ConfigEntry_t *entry); - -/** - * @returns 1 if the config property is read-only on the broker, else 0. - * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. - */ -RD_EXPORT int -rd_kafka_ConfigEntry_is_read_only (const rd_kafka_ConfigEntry_t *entry); - -/** - * @returns 1 if the config property is set to its default value on the broker, - * else 0. - * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. - */ -RD_EXPORT int -rd_kafka_ConfigEntry_is_default (const rd_kafka_ConfigEntry_t *entry); - -/** - * @returns 1 if the config property contains sensitive information (such as - * security configuration), else 0. - * @remark An application should take care not to include the value of - * sensitive configuration entries in its output. - * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. - */ -RD_EXPORT int -rd_kafka_ConfigEntry_is_sensitive (const rd_kafka_ConfigEntry_t *entry); - -/** - * @returns 1 if this entry is a synonym, else 0. - */ -RD_EXPORT int -rd_kafka_ConfigEntry_is_synonym (const rd_kafka_ConfigEntry_t *entry); - - -/** - * @returns the synonym config entry array. - * - * @param entry Entry to get synonyms for. - * @param cntp is updated to the number of elements in the array. - * - * @remark The lifetime of the returned entry is the same as \p conf . - * @remark Shall only be used on a DescribeConfigs result, - * otherwise returns NULL. - */ -RD_EXPORT const rd_kafka_ConfigEntry_t ** -rd_kafka_ConfigEntry_synonyms (const rd_kafka_ConfigEntry_t *entry, - size_t *cntp); - - - - -/*! Apache Kafka resource types */ -typedef enum rd_kafka_ResourceType_t { - RD_KAFKA_RESOURCE_UNKNOWN = 0, /**< Unknown */ - RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */ - RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */ - RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */ - RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */ - RD_KAFKA_RESOURCE__CNT, /**< Number of resource types defined */ -} rd_kafka_ResourceType_t; - -/** - * @returns a string representation of the \p restype - */ -RD_EXPORT const char * -rd_kafka_ResourceType_name (rd_kafka_ResourceType_t restype); - -/*! Apache Kafka configuration resource. */ -typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t; - - -/** - * @brief Create new ConfigResource object. - * - * @param restype The resource type (e.g., RD_KAFKA_RESOURCE_TOPIC) - * @param resname The resource name (e.g., the topic name) - * - * @returns a newly allocated object - */ -RD_EXPORT rd_kafka_ConfigResource_t * -rd_kafka_ConfigResource_new (rd_kafka_ResourceType_t restype, - const char *resname); - -/** - * @brief Destroy and free a ConfigResource object previously created with - * rd_kafka_ConfigResource_new() - */ -RD_EXPORT void -rd_kafka_ConfigResource_destroy (rd_kafka_ConfigResource_t *config); - - -/** - * @brief Helper function to destroy all ConfigResource objects in - * the \p configs array (of \p config_cnt elements). - * The array itself is not freed. - */ -RD_EXPORT void -rd_kafka_ConfigResource_destroy_array (rd_kafka_ConfigResource_t **config, - size_t config_cnt); - - -/** - * @brief Set configuration name value pair. - * - * @param config ConfigResource to set config property on. - * @param name Configuration name, depends on resource type. - * @param value Configuration value, depends on resource type and \p name. - * Set to \c NULL to revert configuration value to default. - * - * This will overwrite the current value. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR if config was added to resource, - * or RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input. - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_ConfigResource_set_config (rd_kafka_ConfigResource_t *config, - const char *name, const char *value); - - -/** - * @brief Get an array of config entries from a ConfigResource object. - * - * The returned object life-times are the same as the \p config object. - * - * @param config ConfigResource to get configs from. - * @param cntp is updated to the number of elements in the array. - */ -RD_EXPORT const rd_kafka_ConfigEntry_t ** -rd_kafka_ConfigResource_configs (const rd_kafka_ConfigResource_t *config, - size_t *cntp); - - - -/** - * @returns the ResourceType for \p config - */ -RD_EXPORT rd_kafka_ResourceType_t -rd_kafka_ConfigResource_type (const rd_kafka_ConfigResource_t *config); - -/** - * @returns the name for \p config - */ -RD_EXPORT const char * -rd_kafka_ConfigResource_name (const rd_kafka_ConfigResource_t *config); - -/** - * @returns the error for this resource from an AlterConfigs request - */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_ConfigResource_error (const rd_kafka_ConfigResource_t *config); - -/** - * @returns the error string for this resource from an AlterConfigs - * request, or NULL if no error. - */ -RD_EXPORT const char * -rd_kafka_ConfigResource_error_string (const rd_kafka_ConfigResource_t *config); - - -/* - * AlterConfigs - alter cluster configuration. - * - */ - - -/** - * @brief Update the configuration for the specified resources. - * Updates are not transactional so they may succeed for a subset - * of the provided resources while the others fail. - * The configuration for a particular resource is updated atomically, - * replacing values using the provided ConfigEntrys and reverting - * unspecified ConfigEntrys to their default values. - * - * @remark Requires broker version >=0.11.0.0 - * - * @warning AlterConfigs will replace all existing configuration for - * the provided resources with the new configuration given, - * reverting all other configuration to their default values. - * - * @remark Multiple resources and resource types may be set, but at most one - * resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call - * since these resource requests must be sent to the broker specified - * in the resource. - * - */ -RD_EXPORT -void rd_kafka_AlterConfigs (rd_kafka_t *rk, - rd_kafka_ConfigResource_t **configs, - size_t config_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); - - -/* - * AlterConfigs result type and methods - */ - -/** - * @brief Get an array of resource results from a AlterConfigs result. - * - * Use \c rd_kafka_ConfigResource_error() and - * \c rd_kafka_ConfigResource_error_string() to extract per-resource error - * results on the returned array elements. - * - * The returned object life-times are the same as the \p result object. - * - * @param result Result object to get resource results from. - * @param cntp is updated to the number of elements in the array. - * - * @returns an array of ConfigResource elements, or NULL if not available. - */ -RD_EXPORT const rd_kafka_ConfigResource_t ** -rd_kafka_AlterConfigs_result_resources ( - const rd_kafka_AlterConfigs_result_t *result, - size_t *cntp); - - - - - - -/* - * DescribeConfigs - retrieve cluster configuration. - * - */ - - -/** - * @brief Get configuration for the specified resources in \p configs. - * - * The returned configuration includes default values and the - * rd_kafka_ConfigEntry_is_default() or rd_kafka_ConfigEntry_source() - * methods may be used to distinguish them from user supplied values. - * - * The value of config entries where rd_kafka_ConfigEntry_is_sensitive() - * is true will always be NULL to avoid disclosing sensitive - * information, such as security settings. - * - * Configuration entries where rd_kafka_ConfigEntry_is_read_only() - * is true can't be updated (with rd_kafka_AlterConfigs()). - * - * Synonym configuration entries are returned if the broker supports - * it (broker version >= 1.1.0). See rd_kafka_ConfigEntry_synonyms(). - * - * @remark Requires broker version >=0.11.0.0 - * - * @remark Multiple resources and resource types may be requested, but at most - * one resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call - * since these resource requests must be sent to the broker specified - * in the resource. - */ -RD_EXPORT -void rd_kafka_DescribeConfigs (rd_kafka_t *rk, - rd_kafka_ConfigResource_t **configs, - size_t config_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); - - - - -/* - * DescribeConfigs result type and methods - */ - -/** - * @brief Get an array of resource results from a DescribeConfigs result. - * - * The returned \p resources life-time is the same as the \p result object. - * - * @param result Result object to get resource results from. - * @param cntp is updated to the number of elements in the array. - */ -RD_EXPORT const rd_kafka_ConfigResource_t ** -rd_kafka_DescribeConfigs_result_resources ( - const rd_kafka_DescribeConfigs_result_t *result, - size_t *cntp); - - -/* - * DeleteRecords - delete records (messages) from partitions - * - * - */ - -/**! Represents records to be deleted */ -typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t; - -/** - * @brief Create a new DeleteRecords object. This object is later passed to - * rd_kafka_DeleteRecords(). - * - * \p before_offsets must contain \c topic, \c partition, and - * \c offset is the offset before which the messages will - * be deleted (exclusive). - * Set \c offset to RD_KAFKA_OFFSET_END (high-watermark) in order to - * delete all data in the partition. - * - * @param before_offsets For each partition delete all messages up to but not - * including the specified offset. - * - * @returns a new allocated DeleteRecords object. - * Use rd_kafka_DeleteRecords_destroy() to free object when done. - */ -RD_EXPORT rd_kafka_DeleteRecords_t * -rd_kafka_DeleteRecords_new (const rd_kafka_topic_partition_list_t * - before_offsets); - -/** - * @brief Destroy and free a DeleteRecords object previously created with - * rd_kafka_DeleteRecords_new() - */ -RD_EXPORT void -rd_kafka_DeleteRecords_destroy (rd_kafka_DeleteRecords_t *del_records); - -/** - * @brief Helper function to destroy all DeleteRecords objects in - * the \p del_groups array (of \p del_group_cnt elements). - * The array itself is not freed. - */ -RD_EXPORT void -rd_kafka_DeleteRecords_destroy_array (rd_kafka_DeleteRecords_t **del_records, - size_t del_record_cnt); - -/** - * @brief Delete records (messages) in topic partitions older than the - * offsets provided. - * - * @param rk Client instance. - * @param del_records The offsets to delete (up to). - * Currently only one DeleteRecords_t (but containing - * multiple offsets) is supported. - * @param del_record_cnt The number of elements in del_records, must be 1. - * @param options Optional admin options, or NULL for defaults. - * @param rkqu Queue to emit result on. - * - * Supported admin options: - * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds. - * Controls how long the brokers will wait for records to be deleted. - * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms. - * Controls how long \c rdkafka will wait for the request to complete. - * - * @remark The result event type emitted on the supplied queue is of type - * \c RD_KAFKA_EVENT_DELETERECORDS_RESULT - */ -RD_EXPORT void -rd_kafka_DeleteRecords (rd_kafka_t *rk, - rd_kafka_DeleteRecords_t **del_records, - size_t del_record_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); - - -/* - * DeleteRecords result type and methods - */ - -/** - * @brief Get a list of topic and partition results from a DeleteRecords result. - * The returned objects will contain \c topic, \c partition, \c offset - * and \c err. \c offset will be set to the post-deletion low-watermark - * (smallest available offset of all live replicas). \c err will be set - * per-partition if deletion failed. - * - * The returned object's life-time is the same as the \p result object. - */ -RD_EXPORT const rd_kafka_topic_partition_list_t * -rd_kafka_DeleteRecords_result_offsets ( - const rd_kafka_DeleteRecords_result_t *result); - -/* - * DeleteGroups - delete groups from cluster - * - * - */ - -/*! Represents a group to be deleted. */ -typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t; - -/** - * @brief Create a new DeleteGroup object. This object is later passed to - * rd_kafka_DeleteGroups(). - * - * @param group Name of group to delete. - * - * @returns a new allocated DeleteGroup object. - * Use rd_kafka_DeleteGroup_destroy() to free object when done. - */ -RD_EXPORT rd_kafka_DeleteGroup_t * -rd_kafka_DeleteGroup_new (const char *group); - -/** - * @brief Destroy and free a DeleteGroup object previously created with - * rd_kafka_DeleteGroup_new() - */ -RD_EXPORT void -rd_kafka_DeleteGroup_destroy (rd_kafka_DeleteGroup_t *del_group); - -/** - * @brief Helper function to destroy all DeleteGroup objects in - * the \p del_groups array (of \p del_group_cnt elements). - * The array itself is not freed. - */ -RD_EXPORT void -rd_kafka_DeleteGroup_destroy_array (rd_kafka_DeleteGroup_t **del_groups, - size_t del_group_cnt); - -/** - * @brief Delete groups from cluster as specified by the \p del_groups - * array of size \p del_group_cnt elements. - * - * @param rk Client instance. - * @param del_groups Array of groups to delete. - * @param del_group_cnt Number of elements in \p del_groups array. - * @param options Optional admin options, or NULL for defaults. - * @param rkqu Queue to emit result on. - * - * @remark The result event type emitted on the supplied queue is of type - * \c RD_KAFKA_EVENT_DELETEGROUPS_RESULT - */ -RD_EXPORT -void rd_kafka_DeleteGroups (rd_kafka_t *rk, - rd_kafka_DeleteGroup_t **del_groups, - size_t del_group_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); - - - -/* - * DeleteGroups result type and methods - */ - -/** - * @brief Get an array of group results from a DeleteGroups result. - * - * The returned groups life-time is the same as the \p result object. - * - * @param result Result to get group results from. - * @param cntp is updated to the number of elements in the array. - */ -RD_EXPORT const rd_kafka_group_result_t ** -rd_kafka_DeleteGroups_result_groups ( - const rd_kafka_DeleteGroups_result_t *result, - size_t *cntp); - - -/* - * DeleteConsumerGroupOffsets - delete groups from cluster - * - * - */ - -/*! Represents consumer group committed offsets to be deleted. */ -typedef struct rd_kafka_DeleteConsumerGroupOffsets_s -rd_kafka_DeleteConsumerGroupOffsets_t; - -/** - * @brief Create a new DeleteConsumerGroupOffsets object. - * This object is later passed to rd_kafka_DeleteConsumerGroupOffsets(). - * - * @param group Consumer group id. - * @param partitions Partitions to delete committed offsets for. - * Only the topic and partition fields are used. - * - * @returns a new allocated DeleteConsumerGroupOffsets object. - * Use rd_kafka_DeleteConsumerGroupOffsets_destroy() to free - * object when done. - */ -RD_EXPORT rd_kafka_DeleteConsumerGroupOffsets_t * -rd_kafka_DeleteConsumerGroupOffsets_new (const char *group, - const rd_kafka_topic_partition_list_t - *partitions); - -/** - * @brief Destroy and free a DeleteConsumerGroupOffsets object previously - * created with rd_kafka_DeleteConsumerGroupOffsets_new() - */ -RD_EXPORT void -rd_kafka_DeleteConsumerGroupOffsets_destroy ( - rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets); - -/** - * @brief Helper function to destroy all DeleteConsumerGroupOffsets objects in - * the \p del_grpoffsets array (of \p del_grpoffsets_cnt elements). - * The array itself is not freed. - */ -RD_EXPORT void -rd_kafka_DeleteConsumerGroupOffsets_destroy_array ( - rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, - size_t del_grpoffset_cnt); - -/** - * @brief Delete committed offsets for a set of partitions in a conusmer - * group. This will succeed at the partition level only if the group - * is not actively subscribed to the corresponding topic. - * - * @param rk Client instance. - * @param del_grpoffsets Array of group committed offsets to delete. - * MUST only be one single element. - * @param del_grpoffsets_cnt Number of elements in \p del_grpoffsets array. - * MUST always be 1. - * @param options Optional admin options, or NULL for defaults. - * @param rkqu Queue to emit result on. - * - * @remark The result event type emitted on the supplied queue is of type - * \c RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT - * - * @remark The current implementation only supports one group per invocation. - */ -RD_EXPORT -void rd_kafka_DeleteConsumerGroupOffsets ( - rd_kafka_t *rk, - rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, - size_t del_grpoffsets_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); - - - -/* - * DeleteConsumerGroupOffsets result type and methods - */ - -/** - * @brief Get an array of results from a DeleteConsumerGroupOffsets result. - * - * The returned groups life-time is the same as the \p result object. - * - * @param result Result to get group results from. - * @param cntp is updated to the number of elements in the array. - */ -RD_EXPORT const rd_kafka_group_result_t ** -rd_kafka_DeleteConsumerGroupOffsets_result_groups ( - const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, - size_t *cntp); - - -/**@}*/ - - -/** - * @name Security APIs - * @{ - * - */ - -/** - * @brief Set SASL/OAUTHBEARER token and metadata - * - * @param rk Client instance. - * @param token_value the mandatory token value to set, often (but not - * necessarily) a JWS compact serialization as per - * https://tools.ietf.org/html/rfc7515#section-3.1. - * @param md_lifetime_ms when the token expires, in terms of the number of - * milliseconds since the epoch. - * @param md_principal_name the mandatory Kafka principal name associated - * with the token. - * @param extensions optional SASL extensions key-value array with - * \p extensions_size elements (number of keys * 2), where [i] is the key and - * [i+1] is the key's value, to be communicated to the broker - * as additional key-value pairs during the initial client response as per - * https://tools.ietf.org/html/rfc7628#section-3.1. The key-value pairs are - * copied. - * @param extension_size the number of SASL extension keys plus values, - * which must be a non-negative multiple of 2. - * @param errstr A human readable error string (nul-terminated) is written to - * this location that must be of at least \p errstr_size bytes. - * The \p errstr is only written in case of error. - * @param errstr_size Writable size in \p errstr. - * - * The SASL/OAUTHBEARER token refresh callback or event handler should invoke - * this method upon success. The extension keys must not include the reserved - * key "`auth`", and all extension keys and values must conform to the required - * format as per https://tools.ietf.org/html/rfc7628#section-3.1: - * - * key = 1*(ALPHA) - * value = *(VCHAR / SP / HTAB / CR / LF ) - * - * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise \p errstr set - * and:
- * \c RD_KAFKA_RESP_ERR__INVALID_ARG if any of the arguments are - * invalid;
- * \c RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not - * supported by this build;
- * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is - * not configured as the client's authentication mechanism.
- * - * @sa rd_kafka_oauthbearer_set_token_failure - * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb - */ -RD_EXPORT -rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token (rd_kafka_t *rk, - const char *token_value, - int64_t md_lifetime_ms, - const char *md_principal_name, - const char **extensions, size_t extension_size, - char *errstr, size_t errstr_size); - -/** - * @brief SASL/OAUTHBEARER token refresh failure indicator. - * - * @param rk Client instance. - * @param errstr mandatory human readable error reason for failing to acquire - * a token. - * - * The SASL/OAUTHBEARER token refresh callback or event handler should invoke - * this method upon failure. - * - * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise:
- * \c RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not - * supported by this build;
- * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is - * not configured as the client's authentication mechanism,
- * \c RD_KAFKA_RESP_ERR__INVALID_ARG if no error string is supplied. - * - * @sa rd_kafka_oauthbearer_set_token - * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb - */ -RD_EXPORT -rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token_failure (rd_kafka_t *rk, const char *errstr); - -/**@}*/ - - -/** - * @name Transactional producer API - * - * The transactional producer operates on top of the idempotent producer, - * and provides full exactly-once semantics (EOS) for Apache Kafka when used - * with the transaction aware consumer (\c isolation.level=read_committed). - * - * A producer instance is configured for transactions by setting the - * \c transactional.id to an identifier unique for the application. This - * id will be used to fence stale transactions from previous instances of - * the application, typically following an outage or crash. - * - * After creating the transactional producer instance using rd_kafka_new() - * the transactional state must be initialized by calling - * rd_kafka_init_transactions(). This is a blocking call that will - * acquire a runtime producer id from the transaction coordinator broker - * as well as abort any stale transactions and fence any still running producer - * instances with the same \c transactional.id. - * - * Once transactions are initialized the application may begin a new - * transaction by calling rd_kafka_begin_transaction(). - * A producer instance may only have one single on-going transaction. - * - * Any messages produced after the transaction has been started will - * belong to the ongoing transaction and will be committed or aborted - * atomically. - * It is not permitted to produce messages outside a transaction - * boundary, e.g., before rd_kafka_begin_transaction() or after - * rd_kafka_commit_transaction(), rd_kafka_abort_transaction(), or after - * the current transaction has failed. - * - * If consumed messages are used as input to the transaction, the consumer - * instance must be configured with \c enable.auto.commit set to \c false. - * To commit the consumed offsets along with the transaction pass the - * list of consumed partitions and the last offset processed + 1 to - * rd_kafka_send_offsets_to_transaction() prior to committing the transaction. - * This allows an aborted transaction to be restarted using the previously - * committed offsets. - * - * To commit the produced messages, and any consumed offsets, to the - * current transaction, call rd_kafka_commit_transaction(). - * This call will block until the transaction has been fully committed or - * failed (typically due to fencing by a newer producer instance). - * - * Alternatively, if processing fails, or an abortable transaction error is - * raised, the transaction needs to be aborted by calling - * rd_kafka_abort_transaction() which marks any produced messages and - * offset commits as aborted. - * - * After the current transaction has been committed or aborted a new - * transaction may be started by calling rd_kafka_begin_transaction() again. - * - * @par Retriable errors - * Some error cases allow the attempted operation to be retried, this is - * indicated by the error object having the retriable flag set which can - * be detected by calling rd_kafka_error_is_retriable(). - * When this flag is set the application may retry the operation immediately - * or preferably after a shorter grace period (to avoid busy-looping). - * Retriable errors include timeouts, broker transport failures, etc. - * - * @par Abortable errors - * An ongoing transaction may fail permanently due to various errors, - * such as transaction coordinator becoming unavailable, write failures to the - * Apache Kafka log, under-replicated partitions, etc. - * At this point the producer application must abort the current transaction - * using rd_kafka_abort_transaction() and optionally start a new transaction - * by calling rd_kafka_begin_transaction(). - * Whether an error is abortable or not is detected by calling - * rd_kafka_error_txn_requires_abort() on the returned error object. - * - * @par Fatal errors - * While the underlying idempotent producer will typically only raise - * fatal errors for unrecoverable cluster errors where the idempotency - * guarantees can't be maintained, most of these are treated as abortable by - * the transactional producer since transactions may be aborted and retried - * in their entirety; - * The transactional producer on the other hand introduces a set of additional - * fatal errors which the application needs to handle by shutting down the - * producer and terminate. There is no way for a producer instance to recover - * from fatal errors. - * Whether an error is fatal or not is detected by calling - * rd_kafka_error_is_fatal() on the returned error object or by checking - * the global rd_kafka_fatal_error() code. - * Fatal errors are raised by triggering the \c error_cb (see the - * Fatal error chapter in INTRODUCTION.md for more information), and any - * sub-sequent transactional API calls will return RD_KAFKA_RESP_ERR__FATAL - * or have the fatal flag set (see rd_kafka_error_is_fatal()). - * The originating fatal error code can be retrieved by calling - * rd_kafka_fatal_error(). - * - * @par Handling of other errors - * For errors that have neither retriable, abortable or the fatal flag set - * it is not always obvious how to handle them. While some of these errors - * may be indicative of bugs in the application code, such as when - * an invalid parameter is passed to a method, other errors might originate - * from the broker and be passed thru as-is to the application. - * The general recommendation is to treat these errors, that have - * neither the retriable or abortable flags set, as fatal. - * - * @par Error handling example - * @code - * retry: - * rd_kafka_error_t *error; - * - * error = rd_kafka_commit_transaction(producer, 10*1000); - * if (!error) - * return success; - * else if (rd_kafka_error_txn_requires_abort(error)) { - * do_abort_transaction_and_reset_inputs(); - * } else if (rd_kafka_error_is_retriable(error)) { - * rd_kafka_error_destroy(error); - * goto retry; - * } else { // treat all other errors as fatal errors - * fatal_error(rd_kafka_error_string(error)); - * } - * rd_kafka_error_destroy(error); - * @endcode - * - * - * @{ - */ - - -/** - * @brief Initialize transactions for the producer instance. - * - * This function ensures any transactions initiated by previous instances - * of the producer with the same \c transactional.id are completed. - * If the previous instance failed with a transaction in progress the - * previous transaction will be aborted. - * This function needs to be called before any other transactional or - * produce functions are called when the \c transactional.id is configured. - * - * If the last transaction had begun completion (following transaction commit) - * but not yet finished, this function will await the previous transaction's - * completion. - * - * When any previous transactions have been fenced this function - * will acquire the internal producer id and epoch, used in all future - * transactional messages issued by this producer instance. - * - * @param rk Producer instance. - * @param timeout_ms The maximum time to block. On timeout the operation - * may continue in the background, depending on state, - * and it is okay to call init_transactions() again. - * - * @remark This function may block up to \p timeout_ms milliseconds. - * - * @returns NULL on success or an error object on failure. - * Check whether the returned error object permits retrying - * by calling rd_kafka_error_is_retriable(), or whether a fatal - * error has been raised by calling rd_kafka_error_is_fatal(). - * Error codes: - * RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction coordinator - * could be not be contacted within \p timeout_ms (retriable), - * RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE if the transaction - * coordinator is not available (retriable), - * RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS if a previous transaction - * would not complete within \p timeout_ms (retriable), - * RD_KAFKA_RESP_ERR__STATE if transactions have already been started - * or upon fatal error, - * RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE if the broker(s) do not - * support transactions ( -#include -#include -#include -#include -#include -#include - -#ifdef _WIN32 -#ifndef ssize_t -#ifndef _BASETSD_H_ -#include -#endif -#ifndef _SSIZE_T_DEFINED -#define _SSIZE_T_DEFINED -typedef SSIZE_T ssize_t; -#endif -#endif -#undef RD_EXPORT -#ifdef LIBRDKAFKA_STATICLIB -#define RD_EXPORT -#else -#ifdef LIBRDKAFKACPP_EXPORTS -#define RD_EXPORT __declspec(dllexport) -#else -#define RD_EXPORT __declspec(dllimport) -#endif -#endif -#else -#define RD_EXPORT -#endif - -/**@endcond*/ - -extern "C" { - /* Forward declarations */ - struct rd_kafka_s; - struct rd_kafka_topic_s; - struct rd_kafka_message_s; - struct rd_kafka_conf_s; - struct rd_kafka_topic_conf_s; -} - -namespace RdKafka { - -/** - * @name Miscellaneous APIs - * @{ - */ - -/** - * @brief librdkafka version - * - * Interpreted as hex \c MM.mm.rr.xx: - * - MM = Major - * - mm = minor - * - rr = revision - * - xx = pre-release id (0xff is the final release) - * - * E.g.: \c 0x000801ff = 0.8.1 - * - * @remark This value should only be used during compile time, - * for runtime checks of version use RdKafka::version() - */ -#define RD_KAFKA_VERSION 0x010802ff - -/** - * @brief Returns the librdkafka version as integer. - * - * @sa See RD_KAFKA_VERSION for how to parse the integer format. - */ -RD_EXPORT -int version (); - -/** - * @brief Returns the librdkafka version as string. - */ -RD_EXPORT -std::string version_str(); - -/** - * @brief Returns a CSV list of the supported debug contexts - * for use with Conf::Set("debug", ..). - */ -RD_EXPORT -std::string get_debug_contexts(); - -/** - * @brief Wait for all rd_kafka_t objects to be destroyed. - * - * @returns 0 if all kafka objects are now destroyed, or -1 if the - * timeout was reached. - * Since RdKafka handle deletion is an asynch operation the - * \p wait_destroyed() function can be used for applications where - * a clean shutdown is required. - */ -RD_EXPORT -int wait_destroyed(int timeout_ms); - -/** - * @brief Allocate memory using the same allocator librdkafka uses. - * - * This is typically an abstraction for the malloc(3) call and makes sure - * the application can use the same memory allocator as librdkafka for - * allocating pointers that are used by librdkafka. - * - * @remark Memory allocated by mem_malloc() must be freed using - * mem_free(). - */ -RD_EXPORT -void *mem_malloc (size_t size); - -/** - * @brief Free pointer returned by librdkafka - * - * This is typically an abstraction for the free(3) call and makes sure - * the application can use the same memory allocator as librdkafka for - * freeing pointers returned by librdkafka. - * - * In standard setups it is usually not necessary to use this interface - * rather than the free(3) function. - * - * @remark mem_free() must only be used for pointers returned by APIs - * that explicitly mention using this function for freeing. - */ -RD_EXPORT -void mem_free (void *ptr); - -/**@}*/ - - - -/** - * @name Constants, errors, types - * @{ - * - * - */ - -/** - * @brief Error codes. - * - * The negative error codes delimited by two underscores - * (\c _ERR__..) denotes errors internal to librdkafka and are - * displayed as \c \"Local: \\", while the error codes - * delimited by a single underscore (\c ERR_..) denote broker - * errors and are displayed as \c \"Broker: \\". - * - * @sa Use RdKafka::err2str() to translate an error code a human readable string - */ -enum ErrorCode { - /* Internal errors to rdkafka: */ - /** Begin internal error codes */ - ERR__BEGIN = -200, - /** Received message is incorrect */ - ERR__BAD_MSG = -199, - /** Bad/unknown compression */ - ERR__BAD_COMPRESSION = -198, - /** Broker is going away */ - ERR__DESTROY = -197, - /** Generic failure */ - ERR__FAIL = -196, - /** Broker transport failure */ - ERR__TRANSPORT = -195, - /** Critical system resource */ - ERR__CRIT_SYS_RESOURCE = -194, - /** Failed to resolve broker */ - ERR__RESOLVE = -193, - /** Produced message timed out*/ - ERR__MSG_TIMED_OUT = -192, - /** Reached the end of the topic+partition queue on - * the broker. Not really an error. - * This event is disabled by default, - * see the `enable.partition.eof` configuration property. */ - ERR__PARTITION_EOF = -191, - /** Permanent: Partition does not exist in cluster. */ - ERR__UNKNOWN_PARTITION = -190, - /** File or filesystem error */ - ERR__FS = -189, - /** Permanent: Topic does not exist in cluster. */ - ERR__UNKNOWN_TOPIC = -188, - /** All broker connections are down. */ - ERR__ALL_BROKERS_DOWN = -187, - /** Invalid argument, or invalid configuration */ - ERR__INVALID_ARG = -186, - /** Operation timed out */ - ERR__TIMED_OUT = -185, - /** Queue is full */ - ERR__QUEUE_FULL = -184, - /** ISR count < required.acks */ - ERR__ISR_INSUFF = -183, - /** Broker node update */ - ERR__NODE_UPDATE = -182, - /** SSL error */ - ERR__SSL = -181, - /** Waiting for coordinator to become available. */ - ERR__WAIT_COORD = -180, - /** Unknown client group */ - ERR__UNKNOWN_GROUP = -179, - /** Operation in progress */ - ERR__IN_PROGRESS = -178, - /** Previous operation in progress, wait for it to finish. */ - ERR__PREV_IN_PROGRESS = -177, - /** This operation would interfere with an existing subscription */ - ERR__EXISTING_SUBSCRIPTION = -176, - /** Assigned partitions (rebalance_cb) */ - ERR__ASSIGN_PARTITIONS = -175, - /** Revoked partitions (rebalance_cb) */ - ERR__REVOKE_PARTITIONS = -174, - /** Conflicting use */ - ERR__CONFLICT = -173, - /** Wrong state */ - ERR__STATE = -172, - /** Unknown protocol */ - ERR__UNKNOWN_PROTOCOL = -171, - /** Not implemented */ - ERR__NOT_IMPLEMENTED = -170, - /** Authentication failure*/ - ERR__AUTHENTICATION = -169, - /** No stored offset */ - ERR__NO_OFFSET = -168, - /** Outdated */ - ERR__OUTDATED = -167, - /** Timed out in queue */ - ERR__TIMED_OUT_QUEUE = -166, - /** Feature not supported by broker */ - ERR__UNSUPPORTED_FEATURE = -165, - /** Awaiting cache update */ - ERR__WAIT_CACHE = -164, - /** Operation interrupted */ - ERR__INTR = -163, - /** Key serialization error */ - ERR__KEY_SERIALIZATION = -162, - /** Value serialization error */ - ERR__VALUE_SERIALIZATION = -161, - /** Key deserialization error */ - ERR__KEY_DESERIALIZATION = -160, - /** Value deserialization error */ - ERR__VALUE_DESERIALIZATION = -159, - /** Partial response */ - ERR__PARTIAL = -158, - /** Modification attempted on read-only object */ - ERR__READ_ONLY = -157, - /** No such entry / item not found */ - ERR__NOENT = -156, - /** Read underflow */ - ERR__UNDERFLOW = -155, - /** Invalid type */ - ERR__INVALID_TYPE = -154, - /** Retry operation */ - ERR__RETRY = -153, - /** Purged in queue */ - ERR__PURGE_QUEUE = -152, - /** Purged in flight */ - ERR__PURGE_INFLIGHT = -151, - /** Fatal error: see RdKafka::Handle::fatal_error() */ - ERR__FATAL = -150, - /** Inconsistent state */ - ERR__INCONSISTENT = -149, - /** Gap-less ordering would not be guaranteed if proceeding */ - ERR__GAPLESS_GUARANTEE = -148, - /** Maximum poll interval exceeded */ - ERR__MAX_POLL_EXCEEDED = -147, - /** Unknown broker */ - ERR__UNKNOWN_BROKER = -146, - /** Functionality not configured */ - ERR__NOT_CONFIGURED = -145, - /** Instance has been fenced */ - ERR__FENCED = -144, - /** Application generated error */ - ERR__APPLICATION = -143, - /** Assignment lost */ - ERR__ASSIGNMENT_LOST = -142, - /** No operation performed */ - ERR__NOOP = -141, - /** No offset to automatically reset to */ - ERR__AUTO_OFFSET_RESET = -140, - - /** End internal error codes */ - ERR__END = -100, - - /* Kafka broker errors: */ - /** Unknown broker error */ - ERR_UNKNOWN = -1, - /** Success */ - ERR_NO_ERROR = 0, - /** Offset out of range */ - ERR_OFFSET_OUT_OF_RANGE = 1, - /** Invalid message */ - ERR_INVALID_MSG = 2, - /** Unknown topic or partition */ - ERR_UNKNOWN_TOPIC_OR_PART = 3, - /** Invalid message size */ - ERR_INVALID_MSG_SIZE = 4, - /** Leader not available */ - ERR_LEADER_NOT_AVAILABLE = 5, - /** Not leader for partition */ - ERR_NOT_LEADER_FOR_PARTITION = 6, - /** Request timed out */ - ERR_REQUEST_TIMED_OUT = 7, - /** Broker not available */ - ERR_BROKER_NOT_AVAILABLE = 8, - /** Replica not available */ - ERR_REPLICA_NOT_AVAILABLE = 9, - /** Message size too large */ - ERR_MSG_SIZE_TOO_LARGE = 10, - /** StaleControllerEpochCode */ - ERR_STALE_CTRL_EPOCH = 11, - /** Offset metadata string too large */ - ERR_OFFSET_METADATA_TOO_LARGE = 12, - /** Broker disconnected before response received */ - ERR_NETWORK_EXCEPTION = 13, - /** Coordinator load in progress */ - ERR_COORDINATOR_LOAD_IN_PROGRESS = 14, - /** Group coordinator load in progress */ -#define ERR_GROUP_LOAD_IN_PROGRESS ERR_COORDINATOR_LOAD_IN_PROGRESS - /** Coordinator not available */ - ERR_COORDINATOR_NOT_AVAILABLE = 15, - /** Group coordinator not available */ -#define ERR_GROUP_COORDINATOR_NOT_AVAILABLE ERR_COORDINATOR_NOT_AVAILABLE - /** Not coordinator */ - ERR_NOT_COORDINATOR = 16, - /** Not coordinator for group */ -#define ERR_NOT_COORDINATOR_FOR_GROUP ERR_NOT_COORDINATOR - /** Invalid topic */ - ERR_TOPIC_EXCEPTION = 17, - /** Message batch larger than configured server segment size */ - ERR_RECORD_LIST_TOO_LARGE = 18, - /** Not enough in-sync replicas */ - ERR_NOT_ENOUGH_REPLICAS = 19, - /** Message(s) written to insufficient number of in-sync replicas */ - ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, - /** Invalid required acks value */ - ERR_INVALID_REQUIRED_ACKS = 21, - /** Specified group generation id is not valid */ - ERR_ILLEGAL_GENERATION = 22, - /** Inconsistent group protocol */ - ERR_INCONSISTENT_GROUP_PROTOCOL = 23, - /** Invalid group.id */ - ERR_INVALID_GROUP_ID = 24, - /** Unknown member */ - ERR_UNKNOWN_MEMBER_ID = 25, - /** Invalid session timeout */ - ERR_INVALID_SESSION_TIMEOUT = 26, - /** Group rebalance in progress */ - ERR_REBALANCE_IN_PROGRESS = 27, - /** Commit offset data size is not valid */ - ERR_INVALID_COMMIT_OFFSET_SIZE = 28, - /** Topic authorization failed */ - ERR_TOPIC_AUTHORIZATION_FAILED = 29, - /** Group authorization failed */ - ERR_GROUP_AUTHORIZATION_FAILED = 30, - /** Cluster authorization failed */ - ERR_CLUSTER_AUTHORIZATION_FAILED = 31, - /** Invalid timestamp */ - ERR_INVALID_TIMESTAMP = 32, - /** Unsupported SASL mechanism */ - ERR_UNSUPPORTED_SASL_MECHANISM = 33, - /** Illegal SASL state */ - ERR_ILLEGAL_SASL_STATE = 34, - /** Unuspported version */ - ERR_UNSUPPORTED_VERSION = 35, - /** Topic already exists */ - ERR_TOPIC_ALREADY_EXISTS = 36, - /** Invalid number of partitions */ - ERR_INVALID_PARTITIONS = 37, - /** Invalid replication factor */ - ERR_INVALID_REPLICATION_FACTOR = 38, - /** Invalid replica assignment */ - ERR_INVALID_REPLICA_ASSIGNMENT = 39, - /** Invalid config */ - ERR_INVALID_CONFIG = 40, - /** Not controller for cluster */ - ERR_NOT_CONTROLLER = 41, - /** Invalid request */ - ERR_INVALID_REQUEST = 42, - /** Message format on broker does not support request */ - ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, - /** Policy violation */ - ERR_POLICY_VIOLATION = 44, - /** Broker received an out of order sequence number */ - ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45, - /** Broker received a duplicate sequence number */ - ERR_DUPLICATE_SEQUENCE_NUMBER = 46, - /** Producer attempted an operation with an old epoch */ - ERR_INVALID_PRODUCER_EPOCH = 47, - /** Producer attempted a transactional operation in an invalid state */ - ERR_INVALID_TXN_STATE = 48, - /** Producer attempted to use a producer id which is not - * currently assigned to its transactional id */ - ERR_INVALID_PRODUCER_ID_MAPPING = 49, - /** Transaction timeout is larger than the maximum - * value allowed by the broker's max.transaction.timeout.ms */ - ERR_INVALID_TRANSACTION_TIMEOUT = 50, - /** Producer attempted to update a transaction while another - * concurrent operation on the same transaction was ongoing */ - ERR_CONCURRENT_TRANSACTIONS = 51, - /** Indicates that the transaction coordinator sending a - * WriteTxnMarker is no longer the current coordinator for a - * given producer */ - ERR_TRANSACTION_COORDINATOR_FENCED = 52, - /** Transactional Id authorization failed */ - ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53, - /** Security features are disabled */ - ERR_SECURITY_DISABLED = 54, - /** Operation not attempted */ - ERR_OPERATION_NOT_ATTEMPTED = 55, - /** Disk error when trying to access log file on the disk */ - ERR_KAFKA_STORAGE_ERROR = 56, - /** The user-specified log directory is not found in the broker config */ - ERR_LOG_DIR_NOT_FOUND = 57, - /** SASL Authentication failed */ - ERR_SASL_AUTHENTICATION_FAILED = 58, - /** Unknown Producer Id */ - ERR_UNKNOWN_PRODUCER_ID = 59, - /** Partition reassignment is in progress */ - ERR_REASSIGNMENT_IN_PROGRESS = 60, - /** Delegation Token feature is not enabled */ - ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61, - /** Delegation Token is not found on server */ - ERR_DELEGATION_TOKEN_NOT_FOUND = 62, - /** Specified Principal is not valid Owner/Renewer */ - ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63, - /** Delegation Token requests are not allowed on this connection */ - ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64, - /** Delegation Token authorization failed */ - ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65, - /** Delegation Token is expired */ - ERR_DELEGATION_TOKEN_EXPIRED = 66, - /** Supplied principalType is not supported */ - ERR_INVALID_PRINCIPAL_TYPE = 67, - /** The group is not empty */ - ERR_NON_EMPTY_GROUP = 68, - /** The group id does not exist */ - ERR_GROUP_ID_NOT_FOUND = 69, - /** The fetch session ID was not found */ - ERR_FETCH_SESSION_ID_NOT_FOUND = 70, - /** The fetch session epoch is invalid */ - ERR_INVALID_FETCH_SESSION_EPOCH = 71, - /** No matching listener */ - ERR_LISTENER_NOT_FOUND = 72, - /** Topic deletion is disabled */ - ERR_TOPIC_DELETION_DISABLED = 73, - /** Leader epoch is older than broker epoch */ - ERR_FENCED_LEADER_EPOCH = 74, - /** Leader epoch is newer than broker epoch */ - ERR_UNKNOWN_LEADER_EPOCH = 75, - /** Unsupported compression type */ - ERR_UNSUPPORTED_COMPRESSION_TYPE = 76, - /** Broker epoch has changed */ - ERR_STALE_BROKER_EPOCH = 77, - /** Leader high watermark is not caught up */ - ERR_OFFSET_NOT_AVAILABLE = 78, - /** Group member needs a valid member ID */ - ERR_MEMBER_ID_REQUIRED = 79, - /** Preferred leader was not available */ - ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80, - /** Consumer group has reached maximum size */ - ERR_GROUP_MAX_SIZE_REACHED = 81, - /** Static consumer fenced by other consumer with same - * group.instance.id. */ - ERR_FENCED_INSTANCE_ID = 82, - /** Eligible partition leaders are not available */ - ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83, - /** Leader election not needed for topic partition */ - ERR_ELECTION_NOT_NEEDED = 84, - /** No partition reassignment is in progress */ - ERR_NO_REASSIGNMENT_IN_PROGRESS = 85, - /** Deleting offsets of a topic while the consumer group is - * subscribed to it */ - ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86, - /** Broker failed to validate record */ - ERR_INVALID_RECORD = 87, - /** There are unstable offsets that need to be cleared */ - ERR_UNSTABLE_OFFSET_COMMIT = 88, - /** Throttling quota has been exceeded */ - ERR_THROTTLING_QUOTA_EXCEEDED = 89, - /** There is a newer producer with the same transactionalId - * which fences the current one */ - ERR_PRODUCER_FENCED = 90, - /** Request illegally referred to resource that does not exist */ - ERR_RESOURCE_NOT_FOUND = 91, - /** Request illegally referred to the same resource twice */ - ERR_DUPLICATE_RESOURCE = 92, - /** Requested credential would not meet criteria for acceptability */ - ERR_UNACCEPTABLE_CREDENTIAL = 93, - /** Indicates that the either the sender or recipient of a - * voter-only request is not one of the expected voters */ - ERR_INCONSISTENT_VOTER_SET = 94, - /** Invalid update version */ - ERR_INVALID_UPDATE_VERSION = 95, - /** Unable to update finalized features due to server error */ - ERR_FEATURE_UPDATE_FAILED = 96, - /** Request principal deserialization failed during forwarding */ - ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97 -}; - - -/** - * @brief Returns a human readable representation of a kafka error. - */ -RD_EXPORT -std::string err2str(RdKafka::ErrorCode err); - - - -/** - * @enum CertificateType - * @brief SSL certificate types - */ -enum CertificateType { - CERT_PUBLIC_KEY, /**< Client's public key */ - CERT_PRIVATE_KEY, /**< Client's private key */ - CERT_CA, /**< CA certificate */ - CERT__CNT -}; - -/** - * @enum CertificateEncoding - * @brief SSL certificate encoding - */ -enum CertificateEncoding { - CERT_ENC_PKCS12, /**< PKCS#12 */ - CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ - CERT_ENC_PEM, /**< PEM */ - CERT_ENC__CNT -}; - -/**@} */ - - - -/**@cond NO_DOC*/ -/* Forward declarations */ -class Handle; -class Producer; -class Message; -class Headers; -class Queue; -class Event; -class Topic; -class TopicPartition; -class Metadata; -class KafkaConsumer; -/**@endcond*/ - - -/** - * @name Error class - * @{ - * - */ - -/** - * @brief The Error class is used as a return value from APIs to propagate - * an error. The error consists of an error code which is to be used - * programatically, an error string for showing to the user, - * and various error flags that can be used programmatically to decide - * how to handle the error; e.g., should the operation be retried, - * was it a fatal error, etc. - * - * Error objects must be deleted explicitly to free its resources. - */ -class RD_EXPORT Error { - public: - - /** - * @brief Create error object. - */ - static Error *create (ErrorCode code, const std::string *errstr); - - virtual ~Error () { } - - /* - * Error accessor methods - */ - - /** - * @returns the error code, e.g., RdKafka::ERR_UNKNOWN_MEMBER_ID. - */ - virtual ErrorCode code () const = 0; - - /** - * @returns the error code name, e.g, "ERR_UNKNOWN_MEMBER_ID". - */ - virtual std::string name () const = 0; - - /** - * @returns a human readable error string. - */ - virtual std::string str () const = 0; - - /** - * @returns true if the error is a fatal error, indicating that the client - * instance is no longer usable, else false. - */ - virtual bool is_fatal () const = 0; - - /** - * @returns true if the operation may be retried, else false. - */ - virtual bool is_retriable () const = 0; - - /** - * @returns true if the error is an abortable transaction error in which case - * the application must call RdKafka::Producer::abort_transaction() - * and start a new transaction with - * RdKafka::Producer::begin_transaction() if it wishes to proceed - * with transactions. - * Else returns false. - * - * @remark The return value of this method is only valid for errors returned - * by the transactional API. - */ - virtual bool txn_requires_abort () const = 0; -}; - -/**@}*/ - - -/** - * @name Callback classes - * @{ - * - * - * librdkafka uses (optional) callbacks to propagate information and - * delegate decisions to the application logic. - * - * An application must call RdKafka::poll() at regular intervals to - * serve queued callbacks. - */ - - -/** - * @brief Delivery Report callback class - * - * The delivery report callback will be called once for each message - * accepted by RdKafka::Producer::produce() (et.al) with - * RdKafka::Message::err() set to indicate the result of the produce request. - * - * The callback is called when a message is succesfully produced or - * if librdkafka encountered a permanent failure, or the retry counter for - * temporary errors has been exhausted. - * - * An application must call RdKafka::poll() at regular intervals to - * serve queued delivery report callbacks. - - */ -class RD_EXPORT DeliveryReportCb { - public: - /** - * @brief Delivery report callback. - */ - virtual void dr_cb (Message &message) = 0; - - virtual ~DeliveryReportCb() { } -}; - - -/** - * @brief SASL/OAUTHBEARER token refresh callback class - * - * The SASL/OAUTHBEARER token refresh callback is triggered via RdKafka::poll() - * whenever OAUTHBEARER is the SASL mechanism and a token needs to be retrieved, - * typically based on the configuration defined in \c sasl.oauthbearer.config. - * - * The \c oauthbearer_config argument is the value of the - * \c sasl.oauthbearer.config configuration property. - * - * The callback should invoke RdKafka::Handle::oauthbearer_set_token() or - * RdKafka::Handle::oauthbearer_set_token_failure() to indicate success or - * failure, respectively. - * - * The refresh operation is eventable and may be received when an event - * callback handler is set with an event type of - * \c RdKafka::Event::EVENT_OAUTHBEARER_TOKEN_REFRESH. - * - * Note that before any SASL/OAUTHBEARER broker connection can succeed the - * application must call RdKafka::Handle::oauthbearer_set_token() once -- either - * directly or, more typically, by invoking RdKafka::poll() -- in order to - * cause retrieval of an initial token to occur. - * - * An application must call RdKafka::poll() at regular intervals to - * serve queued SASL/OAUTHBEARER token refresh callbacks (when - * OAUTHBEARER is the SASL mechanism). - */ -class RD_EXPORT OAuthBearerTokenRefreshCb { - public: - /** - * @brief SASL/OAUTHBEARER token refresh callback class. - * - * @param handle The RdKafka::Handle which requires a refreshed token. - * @param oauthbearer_config The value of the - * \p sasl.oauthbearer.config configuration property for \p handle. - */ - virtual void oauthbearer_token_refresh_cb (RdKafka::Handle* handle, - const std::string &oauthbearer_config) = 0; - - virtual ~OAuthBearerTokenRefreshCb() { } -}; - - -/** - * @brief Partitioner callback class - * - * Generic partitioner callback class for implementing custom partitioners. - * - * @sa RdKafka::Conf::set() \c "partitioner_cb" - */ -class RD_EXPORT PartitionerCb { - public: - /** - * @brief Partitioner callback - * - * Return the partition to use for \p key in \p topic. - * - * The \p msg_opaque is the same \p msg_opaque provided in the - * RdKafka::Producer::produce() call. - * - * @remark \p key may be NULL or the empty. - * - * @returns Must return a value between 0 and \p partition_cnt (non-inclusive). - * May return RD_KAFKA_PARTITION_UA (-1) if partitioning failed. - * - * @sa The callback may use RdKafka::Topic::partition_available() to check - * if a partition has an active leader broker. - */ - virtual int32_t partitioner_cb (const Topic *topic, - const std::string *key, - int32_t partition_cnt, - void *msg_opaque) = 0; - - virtual ~PartitionerCb() { } -}; - -/** - * @brief Variant partitioner with key pointer - * - */ -class PartitionerKeyPointerCb { - public: - /** - * @brief Variant partitioner callback that gets \p key as pointer and length - * instead of as a const std::string *. - * - * @remark \p key may be NULL or have \p key_len 0. - * - * @sa See RdKafka::PartitionerCb::partitioner_cb() for exact semantics - */ - virtual int32_t partitioner_cb (const Topic *topic, - const void *key, - size_t key_len, - int32_t partition_cnt, - void *msg_opaque) = 0; - - virtual ~PartitionerKeyPointerCb() { } -}; - - - -/** - * @brief Event callback class - * - * Events are a generic interface for propagating errors, statistics, logs, etc - * from librdkafka to the application. - * - * @sa RdKafka::Event - */ -class RD_EXPORT EventCb { - public: - /** - * @brief Event callback - * - * @sa RdKafka::Event - */ - virtual void event_cb (Event &event) = 0; - - virtual ~EventCb() { } -}; - - -/** - * @brief Event object class as passed to the EventCb callback. - */ -class RD_EXPORT Event { - public: - /** @brief Event type */ - enum Type { - EVENT_ERROR, /**< Event is an error condition */ - EVENT_STATS, /**< Event is a statistics JSON document */ - EVENT_LOG, /**< Event is a log message */ - EVENT_THROTTLE /**< Event is a throttle level signaling from the broker */ - }; - - /** @brief EVENT_LOG severities (conforms to syslog(3) severities) */ - enum Severity { - EVENT_SEVERITY_EMERG = 0, - EVENT_SEVERITY_ALERT = 1, - EVENT_SEVERITY_CRITICAL = 2, - EVENT_SEVERITY_ERROR = 3, - EVENT_SEVERITY_WARNING = 4, - EVENT_SEVERITY_NOTICE = 5, - EVENT_SEVERITY_INFO = 6, - EVENT_SEVERITY_DEBUG = 7 - }; - - virtual ~Event () { } - - /* - * Event Accessor methods - */ - - /** - * @returns The event type - * @remark Applies to all event types - */ - virtual Type type () const = 0; - - /** - * @returns Event error, if any. - * @remark Applies to all event types except THROTTLE - */ - virtual ErrorCode err () const = 0; - - /** - * @returns Log severity level. - * @remark Applies to LOG event type. - */ - virtual Severity severity () const = 0; - - /** - * @returns Log facility string. - * @remark Applies to LOG event type. - */ - virtual std::string fac () const = 0; - - /** - * @returns Log message string. - * - * \c EVENT_LOG: Log message string. - * \c EVENT_STATS: JSON object (as string). - * - * @remark Applies to LOG event type. - */ - virtual std::string str () const = 0; - - /** - * @returns Throttle time in milliseconds. - * @remark Applies to THROTTLE event type. - */ - virtual int throttle_time () const = 0; - - /** - * @returns Throttling broker's name. - * @remark Applies to THROTTLE event type. - */ - virtual std::string broker_name () const = 0; - - /** - * @returns Throttling broker's id. - * @remark Applies to THROTTLE event type. - */ - virtual int broker_id () const = 0; - - - /** - * @returns true if this is a fatal error. - * @remark Applies to ERROR event type. - * @sa RdKafka::Handle::fatal_error() - */ - virtual bool fatal () const = 0; -}; - - - -/** - * @brief Consume callback class - */ -class RD_EXPORT ConsumeCb { - public: - /** - * @brief The consume callback is used with - * RdKafka::Consumer::consume_callback() - * methods and will be called for each consumed \p message. - * - * The callback interface is optional but provides increased performance. - */ - virtual void consume_cb (Message &message, void *opaque) = 0; - - virtual ~ConsumeCb() { } -}; - - -/** - * @brief \b KafkaConsumer: Rebalance callback class - */ -class RD_EXPORT RebalanceCb { -public: - /** - * @brief Group rebalance callback for use with RdKafka::KafkaConsumer - * - * Registering a \p rebalance_cb turns off librdkafka's automatic - * partition assignment/revocation and instead delegates that responsibility - * to the application's \p rebalance_cb. - * - * The rebalance callback is responsible for updating librdkafka's - * assignment set based on the two events: RdKafka::ERR__ASSIGN_PARTITIONS - * and RdKafka::ERR__REVOKE_PARTITIONS but should also be able to handle - * arbitrary rebalancing failures where \p err is neither of those. - * @remark In this latter case (arbitrary error), the application must - * call unassign() to synchronize state. - * - * For eager/non-cooperative `partition.assignment.strategy` assignors, - * such as `range` and `roundrobin`, the application must use - * assign assign() to set and unassign() to clear the entire assignment. - * For the cooperative assignors, such as `cooperative-sticky`, the - * application must use incremental_assign() for ERR__ASSIGN_PARTITIONS and - * incremental_unassign() for ERR__REVOKE_PARTITIONS. - * - * Without a rebalance callback this is done automatically by librdkafka - * but registering a rebalance callback gives the application flexibility - * in performing other operations along with the assinging/revocation, - * such as fetching offsets from an alternate location (on assign) - * or manually committing offsets (on revoke). - * - * @sa RdKafka::KafkaConsumer::assign() - * @sa RdKafka::KafkaConsumer::incremental_assign() - * @sa RdKafka::KafkaConsumer::incremental_unassign() - * @sa RdKafka::KafkaConsumer::assignment_lost() - * @sa RdKafka::KafkaConsumer::rebalance_protocol() - * - * The following example show's the application's responsibilities: - * @code - * class MyRebalanceCb : public RdKafka::RebalanceCb { - * public: - * void rebalance_cb (RdKafka::KafkaConsumer *consumer, - * RdKafka::ErrorCode err, - * std::vector &partitions) { - * if (err == RdKafka::ERR__ASSIGN_PARTITIONS) { - * // application may load offets from arbitrary external - * // storage here and update \p partitions - * if (consumer->rebalance_protocol() == "COOPERATIVE") - * consumer->incremental_assign(partitions); - * else - * consumer->assign(partitions); - * - * } else if (err == RdKafka::ERR__REVOKE_PARTITIONS) { - * // Application may commit offsets manually here - * // if auto.commit.enable=false - * if (consumer->rebalance_protocol() == "COOPERATIVE") - * consumer->incremental_unassign(partitions); - * else - * consumer->unassign(); - * - * } else { - * std::cerr << "Rebalancing error: " << - * RdKafka::err2str(err) << std::endl; - * consumer->unassign(); - * } - * } - * } - * @endcode - * - * @remark The above example lacks error handling for assign calls, see - * the examples/ directory. - */ - virtual void rebalance_cb (RdKafka::KafkaConsumer *consumer, - RdKafka::ErrorCode err, - std::vector&partitions) = 0; - - virtual ~RebalanceCb() { } -}; - - -/** - * @brief Offset Commit callback class - */ -class RD_EXPORT OffsetCommitCb { -public: - /** - * @brief Set offset commit callback for use with consumer groups - * - * The results of automatic or manual offset commits will be scheduled - * for this callback and is served by RdKafka::KafkaConsumer::consume(). - * - * If no partitions had valid offsets to commit this callback will be called - * with \p err == ERR__NO_OFFSET which is not to be considered an error. - * - * The \p offsets list contains per-partition information: - * - \c topic The topic committed - * - \c partition The partition committed - * - \c offset: Committed offset (attempted) - * - \c err: Commit error - */ - virtual void offset_commit_cb(RdKafka::ErrorCode err, - std::vector&offsets) = 0; - - virtual ~OffsetCommitCb() { } -}; - - - -/** - * @brief SSL broker certificate verification class. - * - * @remark Class instance must outlive the RdKafka client instance. - */ -class RD_EXPORT SslCertificateVerifyCb { -public: - /** - * @brief SSL broker certificate verification callback. - * - * The verification callback is triggered from internal librdkafka threads - * upon connecting to a broker. On each connection attempt the callback - * will be called for each certificate in the broker's certificate chain, - * starting at the root certification, as long as the application callback - * returns 1 (valid certificate). - * - * \p broker_name and \p broker_id correspond to the broker the connection - * is being made to. - * The \c x509_error argument indicates if OpenSSL's verification of - * the certificate succeed (0) or failed (an OpenSSL error code). - * The application may set the SSL context error code by returning 0 - * from the verify callback and providing a non-zero SSL context error code - * in \p x509_error. - * If the verify callback sets \p x509_error to 0, returns 1, and the - * original \p x509_error was non-zero, the error on the SSL context will - * be cleared. - * \p x509_error is always a valid pointer to an int. - * - * \p depth is the depth of the current certificate in the chain, starting - * at the root certificate. - * - * The certificate itself is passed in binary DER format in \p buf of - * size \p size. - * - * The callback must 1 if verification succeeds, or 0 if verification fails - * and write a human-readable error message - * to \p errstr. - * - * @warning This callback will be called from internal librdkafka threads. - * - * @remark See in the OpenSSL source distribution - * for a list of \p x509_error codes. - */ - virtual bool ssl_cert_verify_cb (const std::string &broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - std::string &errstr) = 0; - - virtual ~SslCertificateVerifyCb() {} -}; - - -/** - * @brief \b Portability: SocketCb callback class - * - */ -class RD_EXPORT SocketCb { - public: - /** - * @brief Socket callback - * - * The socket callback is responsible for opening a socket - * according to the supplied \p domain, \p type and \p protocol. - * The socket shall be created with \c CLOEXEC set in a racefree fashion, if - * possible. - * - * It is typically not required to register an alternative socket - * implementation - * - * @returns The socket file descriptor or -1 on error (\c errno must be set) - */ - virtual int socket_cb (int domain, int type, int protocol) = 0; - - virtual ~SocketCb() { } -}; - - -/** - * @brief \b Portability: OpenCb callback class - * - */ -class RD_EXPORT OpenCb { - public: - /** - * @brief Open callback - * The open callback is responsible for opening the file specified by - * \p pathname, using \p flags and \p mode. - * The file shall be opened with \c CLOEXEC set in a racefree fashion, if - * possible. - * - * It is typically not required to register an alternative open implementation - * - * @remark Not currently available on native Win32 - */ - virtual int open_cb (const std::string &path, int flags, int mode) = 0; - - virtual ~OpenCb() { } -}; - - -/**@}*/ - - - - -/** - * @name Configuration interface - * @{ - * - */ - -/** - * @brief Configuration interface - * - * Holds either global or topic configuration that are passed to - * RdKafka::Consumer::create(), RdKafka::Producer::create(), - * RdKafka::KafkaConsumer::create(), etc. - * - * @sa CONFIGURATION.md for the full list of supported properties. - */ -class RD_EXPORT Conf { - public: - /** - * @brief Configuration object type - */ - enum ConfType { - CONF_GLOBAL, /**< Global configuration */ - CONF_TOPIC /**< Topic specific configuration */ - }; - - /** - * @brief RdKafka::Conf::Set() result code - */ - enum ConfResult { - CONF_UNKNOWN = -2, /**< Unknown configuration property */ - CONF_INVALID = -1, /**< Invalid configuration value */ - CONF_OK = 0 /**< Configuration property was succesfully set */ - }; - - - /** - * @brief Create configuration object - */ - static Conf *create (ConfType type); - - virtual ~Conf () { } - - /** - * @brief Set configuration property \p name to value \p value. - * - * Fallthrough: - * Topic-level configuration properties may be set using this interface - * in which case they are applied on the \c default_topic_conf. - * If no \c default_topic_conf has been set one will be created. - * Any sub-sequent set("default_topic_conf", ..) calls will - * replace the current default topic configuration. - - * @returns CONF_OK on success, else writes a human readable error - * description to \p errstr on error. - */ - virtual Conf::ConfResult set (const std::string &name, - const std::string &value, - std::string &errstr) = 0; - - /** @brief Use with \p name = \c \"dr_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - DeliveryReportCb *dr_cb, - std::string &errstr) = 0; - - /** @brief Use with \p name = \c \"oauthbearer_token_refresh_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb, - std::string &errstr) = 0; - - /** @brief Use with \p name = \c \"event_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - EventCb *event_cb, - std::string &errstr) = 0; - - /** @brief Use with \p name = \c \"default_topic_conf\" - * - * Sets the default topic configuration to use for for automatically - * subscribed topics. - * - * @sa RdKafka::KafkaConsumer::subscribe() - */ - virtual Conf::ConfResult set (const std::string &name, - const Conf *topic_conf, - std::string &errstr) = 0; - - /** @brief Use with \p name = \c \"partitioner_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - PartitionerCb *partitioner_cb, - std::string &errstr) = 0; - - /** @brief Use with \p name = \c \"partitioner_key_pointer_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - PartitionerKeyPointerCb *partitioner_kp_cb, - std::string &errstr) = 0; - - /** @brief Use with \p name = \c \"socket_cb\" */ - virtual Conf::ConfResult set (const std::string &name, SocketCb *socket_cb, - std::string &errstr) = 0; - - /** @brief Use with \p name = \c \"open_cb\" */ - virtual Conf::ConfResult set (const std::string &name, OpenCb *open_cb, - std::string &errstr) = 0; - - /** @brief Use with \p name = \c \"rebalance_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - RebalanceCb *rebalance_cb, - std::string &errstr) = 0; - - /** @brief Use with \p name = \c \"offset_commit_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - OffsetCommitCb *offset_commit_cb, - std::string &errstr) = 0; - - /** @brief Use with \p name = \c \"ssl_cert_verify_cb\". - * @returns CONF_OK on success or CONF_INVALID if SSL is - * not supported in this build. - */ - virtual Conf::ConfResult set(const std::string &name, - SslCertificateVerifyCb *ssl_cert_verify_cb, - std::string &errstr) = 0; - - /** - * @brief Set certificate/key \p cert_type from the \p cert_enc encoded - * memory at \p buffer of \p size bytes. - * - * @param cert_type Certificate or key type to configure. - * @param cert_enc Buffer \p encoding type. - * @param buffer Memory pointer to encoded certificate or key. - * The memory is not referenced after this function returns. - * @param size Size of memory at \p buffer. - * @param errstr A human-readable error string will be written to this string - * on failure. - * - * @returns CONF_OK on success or CONF_INVALID if the memory in - * \p buffer is of incorrect encoding, or if librdkafka - * was not built with SSL support. - * - * @remark Calling this method multiple times with the same \p cert_type - * will replace the previous value. - * - * @remark Calling this method with \p buffer set to NULL will clear the - * configuration for \p cert_type. - * - * @remark The private key may require a password, which must be specified - * with the `ssl.key.password` configuration property prior to - * calling this function. - * - * @remark Private and public keys in PEM format may also be set with the - * `ssl.key.pem` and `ssl.certificate.pem` configuration properties. - * - * @remark CA certificate in PEM format may also be set with the - * `ssl.ca.pem` configuration property. - */ - virtual Conf::ConfResult set_ssl_cert (RdKafka::CertificateType cert_type, - RdKafka::CertificateEncoding cert_enc, - const void *buffer, size_t size, - std::string &errstr) = 0; - - /** @brief Query single configuration value - * - * Do not use this method to get callbacks registered by the configuration file. - * Instead use the specific get() methods with the specific callback parameter in the signature. - * - * Fallthrough: - * Topic-level configuration properties from the \c default_topic_conf - * may be retrieved using this interface. - * - * @returns CONF_OK if the property was set previously set and - * returns the value in \p value. */ - virtual Conf::ConfResult get(const std::string &name, - std::string &value) const = 0; - - /** @brief Query single configuration value - * @returns CONF_OK if the property was set previously set and - * returns the value in \p dr_cb. */ - virtual Conf::ConfResult get(DeliveryReportCb *&dr_cb) const = 0; - - /** @brief Query single configuration value - * @returns CONF_OK if the property was set previously set and - * returns the value in \p oauthbearer_token_refresh_cb. */ - virtual Conf::ConfResult get( - OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const = 0; - - /** @brief Query single configuration value - * @returns CONF_OK if the property was set previously set and - * returns the value in \p event_cb. */ - virtual Conf::ConfResult get(EventCb *&event_cb) const = 0; - - /** @brief Query single configuration value - * @returns CONF_OK if the property was set previously set and - * returns the value in \p partitioner_cb. */ - virtual Conf::ConfResult get(PartitionerCb *&partitioner_cb) const = 0; - - /** @brief Query single configuration value - * @returns CONF_OK if the property was set previously set and - * returns the value in \p partitioner_kp_cb. */ - virtual Conf::ConfResult get(PartitionerKeyPointerCb *&partitioner_kp_cb) const = 0; - - /** @brief Query single configuration value - * @returns CONF_OK if the property was set previously set and - * returns the value in \p socket_cb. */ - virtual Conf::ConfResult get(SocketCb *&socket_cb) const = 0; - - /** @brief Query single configuration value - * @returns CONF_OK if the property was set previously set and - * returns the value in \p open_cb. */ - virtual Conf::ConfResult get(OpenCb *&open_cb) const = 0; - - /** @brief Query single configuration value - * @returns CONF_OK if the property was set previously set and - * returns the value in \p rebalance_cb. */ - virtual Conf::ConfResult get(RebalanceCb *&rebalance_cb) const = 0; - - /** @brief Query single configuration value - * @returns CONF_OK if the property was set previously set and - * returns the value in \p offset_commit_cb. */ - virtual Conf::ConfResult get(OffsetCommitCb *&offset_commit_cb) const = 0; - - /** @brief Use with \p name = \c \"ssl_cert_verify_cb\" */ - virtual Conf::ConfResult get(SslCertificateVerifyCb *&ssl_cert_verify_cb) const = 0; - - /** @brief Dump configuration names and values to list containing - * name,value tuples */ - virtual std::list *dump () = 0; - - /** @brief Use with \p name = \c \"consume_cb\" */ - virtual Conf::ConfResult set (const std::string &name, ConsumeCb *consume_cb, - std::string &errstr) = 0; - - /** - * @brief Returns the underlying librdkafka C rd_kafka_conf_t handle. - * - * @warning Calling the C API on this handle is not recommended and there - * is no official support for it, but for cases where the C++ - * does not provide the proper functionality this C handle can be - * used to interact directly with the core librdkafka API. - * - * @remark The lifetime of the returned pointer is the same as the Conf - * object this method is called on. - * - * @remark Include prior to including - * - * - * @returns \c rd_kafka_conf_t* if this is a CONF_GLOBAL object, else NULL. - */ - virtual struct rd_kafka_conf_s *c_ptr_global () = 0; - - /** - * @brief Returns the underlying librdkafka C rd_kafka_topic_conf_t handle. - * - * @warning Calling the C API on this handle is not recommended and there - * is no official support for it, but for cases where the C++ - * does not provide the proper functionality this C handle can be - * used to interact directly with the core librdkafka API. - * - * @remark The lifetime of the returned pointer is the same as the Conf - * object this method is called on. - * - * @remark Include prior to including - * - * - * @returns \c rd_kafka_topic_conf_t* if this is a CONF_TOPIC object, - * else NULL. - */ - virtual struct rd_kafka_topic_conf_s *c_ptr_topic () = 0; - - /** - * @brief Set callback_data for ssl engine. - * - * @remark The \c ssl.engine.location configuration must be set for this - * to have affect. - * - * @remark The memory pointed to by \p value must remain valid for the - * lifetime of the configuration object and any Kafka clients that - * use it. - * - * @returns CONF_OK on success, else CONF_INVALID. - */ - virtual Conf::ConfResult set_engine_callback_data (void *value, - std::string &errstr) = 0; -}; - -/**@}*/ - - -/** - * @name Kafka base client handle - * @{ - * - */ - -/** - * @brief Base handle, super class for specific clients. - */ -class RD_EXPORT Handle { - public: - virtual ~Handle() { } - - /** @returns the name of the handle */ - virtual const std::string name () const = 0; - - /** - * @brief Returns the client's broker-assigned group member id - * - * @remark This currently requires the high-level KafkaConsumer - * - * @returns Last assigned member id, or empty string if not currently - * a group member. - */ - virtual const std::string memberid () const = 0; - - - /** - * @brief Polls the provided kafka handle for events. - * - * Events will trigger application provided callbacks to be called. - * - * The \p timeout_ms argument specifies the maximum amount of time - * (in milliseconds) that the call will block waiting for events. - * For non-blocking calls, provide 0 as \p timeout_ms. - * To wait indefinately for events, provide -1. - * - * Events: - * - delivery report callbacks (if an RdKafka::DeliveryCb is configured) [producer] - * - event callbacks (if an RdKafka::EventCb is configured) [producer & consumer] - * - * @remark An application should make sure to call poll() at regular - * intervals to serve any queued callbacks waiting to be called. - * - * @warning This method MUST NOT be used with the RdKafka::KafkaConsumer, - * use its RdKafka::KafkaConsumer::consume() instead. - * - * @returns the number of events served. - */ - virtual int poll (int timeout_ms) = 0; - - /** - * @brief Returns the current out queue length - * - * The out queue contains messages and requests waiting to be sent to, - * or acknowledged by, the broker. - */ - virtual int outq_len () = 0; - - /** - * @brief Request Metadata from broker. - * - * Parameters: - * \p all_topics - if non-zero: request info about all topics in cluster, - * if zero: only request info about locally known topics. - * \p only_rkt - only request info about this topic - * \p metadatap - pointer to hold metadata result. - * The \p *metadatap pointer must be released with \c delete. - * \p timeout_ms - maximum response time before failing. - * - * @returns RdKafka::ERR_NO_ERROR on success (in which case \p *metadatap - * will be set), else RdKafka::ERR__TIMED_OUT on timeout or - * other error code on error. - */ - virtual ErrorCode metadata (bool all_topics, const Topic *only_rkt, - Metadata **metadatap, int timeout_ms) = 0; - - - /** - * @brief Pause producing or consumption for the provided list of partitions. - * - * Success or error is returned per-partition in the \p partitions list. - * - * @returns ErrorCode::NO_ERROR - * - * @sa resume() - */ - virtual ErrorCode pause (std::vector &partitions) = 0; - - - /** - * @brief Resume producing or consumption for the provided list of partitions. - * - * Success or error is returned per-partition in the \p partitions list. - * - * @returns ErrorCode::NO_ERROR - * - * @sa pause() - */ - virtual ErrorCode resume (std::vector &partitions) = 0; - - - /** - * @brief Query broker for low (oldest/beginning) - * and high (newest/end) offsets for partition. - * - * Offsets are returned in \p *low and \p *high respectively. - * - * @returns RdKafka::ERR_NO_ERROR on success or an error code on failure. - */ - virtual ErrorCode query_watermark_offsets (const std::string &topic, - int32_t partition, - int64_t *low, int64_t *high, - int timeout_ms) = 0; - - /** - * @brief Get last known low (oldest/beginning) - * and high (newest/end) offsets for partition. - * - * The low offset is updated periodically (if statistics.interval.ms is set) - * while the high offset is updated on each fetched message set from the - * broker. - * - * If there is no cached offset (either low or high, or both) then - * OFFSET_INVALID will be returned for the respective offset. - * - * Offsets are returned in \p *low and \p *high respectively. - * - * @returns RdKafka::ERR_NO_ERROR on success or an error code on failure. - * - * @remark Shall only be used with an active consumer instance. - */ - virtual ErrorCode get_watermark_offsets (const std::string &topic, - int32_t partition, - int64_t *low, int64_t *high) = 0; - - - /** - * @brief Look up the offsets for the given partitions by timestamp. - * - * The returned offset for each partition is the earliest offset whose - * timestamp is greater than or equal to the given timestamp in the - * corresponding partition. - * - * The timestamps to query are represented as \c offset in \p offsets - * on input, and \c offset() will return the closest earlier offset - * for the timestamp on output. - * - * Timestamps are expressed as milliseconds since epoch (UTC). - * - * The function will block for at most \p timeout_ms milliseconds. - * - * @remark Duplicate Topic+Partitions are not supported. - * @remark Errors are also returned per TopicPartition, see \c err() - * - * @returns an error code for general errors, else RdKafka::ERR_NO_ERROR - * in which case per-partition errors might be set. - */ - virtual ErrorCode offsetsForTimes (std::vector &offsets, - int timeout_ms) = 0; - - - /** - * @brief Retrieve queue for a given partition. - * - * @returns The fetch queue for the given partition if successful. Else, - * NULL is returned. - * - * @remark This function only works on consumers. - */ - virtual Queue *get_partition_queue (const TopicPartition *partition) = 0; - - /** - * @brief Forward librdkafka logs (and debug) to the specified queue - * for serving with one of the ..poll() calls. - * - * This allows an application to serve log callbacks (\c log_cb) - * in its thread of choice. - * - * @param queue Queue to forward logs to. If the value is NULL the logs - * are forwarded to the main queue. - * - * @remark The configuration property \c log.queue MUST also be set to true. - * - * @remark librdkafka maintains its own reference to the provided queue. - * - * @returns ERR_NO_ERROR on success or an error code on error. - */ - virtual ErrorCode set_log_queue (Queue *queue) = 0; - - /** - * @brief Cancels the current callback dispatcher (Handle::poll(), - * KafkaConsumer::consume(), etc). - * - * A callback may use this to force an immediate return to the calling - * code (caller of e.g. Handle::poll()) without processing any further - * events. - * - * @remark This function MUST ONLY be called from within a - * librdkafka callback. - */ - virtual void yield () = 0; - - /** - * @brief Returns the ClusterId as reported in broker metadata. - * - * @param timeout_ms If there is no cached value from metadata retrieval - * then this specifies the maximum amount of time - * (in milliseconds) the call will block waiting - * for metadata to be retrieved. - * Use 0 for non-blocking calls. - * - * @remark Requires broker version >=0.10.0 and api.version.request=true. - * - * @returns Last cached ClusterId, or empty string if no ClusterId could be - * retrieved in the allotted timespan. - */ - virtual const std::string clusterid (int timeout_ms) = 0; - - /** - * @brief Returns the underlying librdkafka C rd_kafka_t handle. - * - * @warning Calling the C API on this handle is not recommended and there - * is no official support for it, but for cases where the C++ - * does not provide the proper functionality this C handle can be - * used to interact directly with the core librdkafka API. - * - * @remark The lifetime of the returned pointer is the same as the Topic - * object this method is called on. - * - * @remark Include prior to including - * - * - * @returns \c rd_kafka_t* - */ - virtual struct rd_kafka_s *c_ptr () = 0; - - /** - * @brief Returns the current ControllerId (controller broker id) - * as reported in broker metadata. - * - * @param timeout_ms If there is no cached value from metadata retrieval - * then this specifies the maximum amount of time - * (in milliseconds) the call will block waiting - * for metadata to be retrieved. - * Use 0 for non-blocking calls. - * - * @remark Requires broker version >=0.10.0 and api.version.request=true. - * - * @returns Last cached ControllerId, or -1 if no ControllerId could be - * retrieved in the allotted timespan. - */ - virtual int32_t controllerid (int timeout_ms) = 0; - - - /** - * @brief Returns the first fatal error set on this client instance, - * or ERR_NO_ERROR if no fatal error has occurred. - * - * This function is to be used with the Idempotent Producer and - * the Event class for \c EVENT_ERROR events to detect fatal errors. - * - * Generally all errors raised by the error event are to be considered - * informational and temporary, the client will try to recover from all - * errors in a graceful fashion (by retrying, etc). - * - * However, some errors should logically be considered fatal to retain - * consistency; in particular a set of errors that may occur when using the - * Idempotent Producer and the in-order or exactly-once producer guarantees - * can't be satisfied. - * - * @param errstr A human readable error string if a fatal error was set. - * - * @returns ERR_NO_ERROR if no fatal error has been raised, else - * any other error code. - */ - virtual ErrorCode fatal_error (std::string &errstr) const = 0; - - /** - * @brief Set SASL/OAUTHBEARER token and metadata - * - * @param token_value the mandatory token value to set, often (but not - * necessarily) a JWS compact serialization as per - * https://tools.ietf.org/html/rfc7515#section-3.1. - * @param md_lifetime_ms when the token expires, in terms of the number of - * milliseconds since the epoch. - * @param md_principal_name the Kafka principal name associated with the - * token. - * @param extensions potentially empty SASL extension keys and values where - * element [i] is the key and [i+1] is the key's value, to be communicated - * to the broker as additional key-value pairs during the initial client - * response as per https://tools.ietf.org/html/rfc7628#section-3.1. The - * number of SASL extension keys plus values must be a non-negative multiple - * of 2. Any provided keys and values are copied. - * @param errstr A human readable error string is written here, only if - * there is an error. - * - * The SASL/OAUTHBEARER token refresh callback should invoke - * this method upon success. The extension keys must not include the reserved - * key "`auth`", and all extension keys and values must conform to the - * required format as per https://tools.ietf.org/html/rfc7628#section-3.1: - * - * key = 1*(ALPHA) - * value = *(VCHAR / SP / HTAB / CR / LF ) - * - * @returns \c RdKafka::ERR_NO_ERROR on success, otherwise \p errstr set - * and:
- * \c RdKafka::ERR__INVALID_ARG if any of the arguments are - * invalid;
- * \c RdKafka::ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not - * supported by this build;
- * \c RdKafka::ERR__STATE if SASL/OAUTHBEARER is supported but is - * not configured as the client's authentication mechanism.
- * - * @sa RdKafka::oauthbearer_set_token_failure - * @sa RdKafka::Conf::set() \c "oauthbearer_token_refresh_cb" - */ - virtual ErrorCode oauthbearer_set_token (const std::string &token_value, - int64_t md_lifetime_ms, - const std::string &md_principal_name, - const std::list &extensions, - std::string &errstr) = 0; - - /** - * @brief SASL/OAUTHBEARER token refresh failure indicator. - * - * @param errstr human readable error reason for failing to acquire a token. - * - * The SASL/OAUTHBEARER token refresh callback should - * invoke this method upon failure to refresh the token. - * - * @returns \c RdKafka::ERR_NO_ERROR on success, otherwise:
- * \c RdKafka::ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not - * supported by this build;
- * \c RdKafka::ERR__STATE if SASL/OAUTHBEARER is supported but is - * not configured as the client's authentication mechanism. - * - * @sa RdKafka::oauthbearer_set_token - * @sa RdKafka::Conf::set() \c "oauthbearer_token_refresh_cb" - */ - virtual ErrorCode oauthbearer_set_token_failure (const std::string &errstr) = 0; - - /** - * @brief Allocate memory using the same allocator librdkafka uses. - * - * This is typically an abstraction for the malloc(3) call and makes sure - * the application can use the same memory allocator as librdkafka for - * allocating pointers that are used by librdkafka. - * - * @remark Memory allocated by mem_malloc() must be freed using - * mem_free(). - */ - virtual void *mem_malloc (size_t size) = 0; - - /** - * @brief Free pointer returned by librdkafka - * - * This is typically an abstraction for the free(3) call and makes sure - * the application can use the same memory allocator as librdkafka for - * freeing pointers returned by librdkafka. - * - * In standard setups it is usually not necessary to use this interface - * rather than the free(3) function. - * - * @remark mem_free() must only be used for pointers returned by APIs - * that explicitly mention using this function for freeing. - */ - virtual void mem_free (void *ptr) = 0; -}; - - -/**@}*/ - - -/** - * @name Topic and partition objects - * @{ - * - */ - -/** - * @brief Topic+Partition - * - * This is a generic type to hold a single partition and various - * information about it. - * - * Is typically used with std::vector to provide - * a list of partitions for different operations. - */ -class RD_EXPORT TopicPartition { -public: - /** - * @brief Create topic+partition object for \p topic and \p partition. - * - * Use \c delete to deconstruct. - */ - static TopicPartition *create (const std::string &topic, int partition); - - /** - * @brief Create topic+partition object for \p topic and \p partition - * with offset \p offset. - * - * Use \c delete to deconstruct. - */ - static TopicPartition *create (const std::string &topic, int partition, - int64_t offset); - - virtual ~TopicPartition() = 0; - - /** - * @brief Destroy/delete the TopicPartitions in \p partitions - * and clear the vector. - */ - static void destroy (std::vector &partitions); - - /** @returns topic name */ - virtual const std::string &topic () const = 0; - - /** @returns partition id */ - virtual int partition () const = 0; - - /** @returns offset (if applicable) */ - virtual int64_t offset () const = 0; - - /** @brief Set offset */ - virtual void set_offset (int64_t offset) = 0; - - /** @returns error code (if applicable) */ - virtual ErrorCode err () const = 0; -}; - - - -/** - * @brief Topic handle - * - */ -class RD_EXPORT Topic { - public: - /** - * @brief Unassigned partition. - * - * The unassigned partition is used by the producer API for messages - * that should be partitioned using the configured or default partitioner. - */ - static const int32_t PARTITION_UA; - - /** @brief Special offsets */ - static const int64_t OFFSET_BEGINNING; /**< Consume from beginning */ - static const int64_t OFFSET_END; /**< Consume from end */ - static const int64_t OFFSET_STORED; /**< Use offset storage */ - static const int64_t OFFSET_INVALID; /**< Invalid offset */ - - - /** - * @brief Creates a new topic handle for topic named \p topic_str - * - * \p conf is an optional configuration for the topic that will be used - * instead of the default topic configuration. - * The \p conf object is reusable after this call. - * - * @returns the new topic handle or NULL on error (see \p errstr). - */ - static Topic *create (Handle *base, const std::string &topic_str, - const Conf *conf, std::string &errstr); - - virtual ~Topic () = 0; - - - /** @returns the topic name */ - virtual const std::string name () const = 0; - - /** - * @returns true if \p partition is available for the topic (has leader). - * @warning \b MUST \b ONLY be called from within a - * RdKafka::PartitionerCb callback. - */ - virtual bool partition_available (int32_t partition) const = 0; - - /** - * @brief Store offset \p offset + 1 for topic partition \p partition. - * The offset will be committed (written) to the broker (or file) according - * to \p auto.commit.interval.ms or next manual offset-less commit call. - * - * @remark \c enable.auto.offset.store must be set to \c false when using - * this API. - * - * @returns RdKafka::ERR_NO_ERROR on success or an error code if none of the - * offsets could be stored. - */ - virtual ErrorCode offset_store (int32_t partition, int64_t offset) = 0; - - /** - * @brief Returns the underlying librdkafka C rd_kafka_topic_t handle. - * - * @warning Calling the C API on this handle is not recommended and there - * is no official support for it, but for cases where the C++ API - * does not provide the underlying functionality this C handle can be - * used to interact directly with the core librdkafka API. - * - * @remark The lifetime of the returned pointer is the same as the Topic - * object this method is called on. - * - * @remark Include prior to including - * - * - * @returns \c rd_kafka_topic_t* - */ - virtual struct rd_kafka_topic_s *c_ptr () = 0; -}; - - -/**@}*/ - - -/** - * @name Message object - * @{ - * - */ - - -/** - * @brief Message timestamp object - * - * Represents the number of milliseconds since the epoch (UTC). - * - * The MessageTimestampType dictates the timestamp type or origin. - * - * @remark Requires Apache Kafka broker version >= 0.10.0 - * - */ - -class RD_EXPORT MessageTimestamp { -public: - /*! Message timestamp type */ - enum MessageTimestampType { - MSG_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ - MSG_TIMESTAMP_CREATE_TIME, /**< Message creation time (source) */ - MSG_TIMESTAMP_LOG_APPEND_TIME /**< Message log append time (broker) */ - }; - - MessageTimestampType type; /**< Timestamp type */ - int64_t timestamp; /**< Milliseconds since epoch (UTC). */ -}; - - -/** - * @brief Headers object - * - * Represents message headers. - * - * https://cwiki.apache.org/confluence/display/KAFKA/KIP-82+-+Add+Record+Headers - * - * @remark Requires Apache Kafka >= 0.11.0 brokers - */ -class RD_EXPORT Headers { -public: - virtual ~Headers() = 0; - - /** - * @brief Header object - * - * This object represents a single Header with a key value pair - * and an ErrorCode - * - * @remark dynamic allocation of this object is not supported. - */ - class Header { - public: - /** - * @brief Header object to encapsulate a single Header - * - * @param key the string value for the header key - * @param value the bytes of the header value, or NULL - * @param value_size the length in bytes of the header value - * - * @remark key and value are copied. - * - */ - Header(const std::string &key, - const void *value, - size_t value_size): - key_(key), err_(ERR_NO_ERROR), value_size_(value_size) { - value_ = copy_value(value, value_size); - } - - /** - * @brief Header object to encapsulate a single Header - * - * @param key the string value for the header key - * @param value the bytes of the header value - * @param value_size the length in bytes of the header value - * @param err the error code if one returned - * - * @remark The error code is used for when the Header is constructed - * internally by using RdKafka::Headers::get_last which constructs - * a Header encapsulating the ErrorCode in the process. - * If err is set, the value and value_size fields will be undefined. - */ - Header(const std::string &key, - const void *value, - size_t value_size, - const RdKafka::ErrorCode err): - key_(key), err_(err), value_(NULL), value_size_(value_size) { - if (err == ERR_NO_ERROR) - value_ = copy_value(value, value_size); - } - - /** - * @brief Copy constructor - * - * @param other Header to make a copy of. - */ - Header(const Header &other): - key_(other.key_), err_(other.err_), value_size_(other.value_size_) { - value_ = copy_value(other.value_, value_size_); - } - - /** - * @brief Assignment operator - * - * @param other Header to make a copy of. - */ - Header& operator=(const Header &other) - { - if (&other == this) { - return *this; - } - - key_ = other.key_; - err_ = other.err_; - value_size_ = other.value_size_; - - if (value_ != NULL) - mem_free(value_); - - value_ = copy_value(other.value_, value_size_); - - return *this; - } - - ~Header() { - if (value_ != NULL) - mem_free(value_); - } - - /** @returns the key/name associated with this Header */ - std::string key() const { - return key_; - } - - /** @returns returns the binary value, or NULL */ - const void *value() const { - return value_; - } - - /** @returns returns the value casted to a nul-terminated C string, - * or NULL. */ - const char *value_string() const { - return static_cast(value_); - } - - /** @returns Value Size the length of the Value in bytes */ - size_t value_size() const { - return value_size_; - } - - /** @returns the error code of this Header (usually ERR_NO_ERROR) */ - RdKafka::ErrorCode err() const { - return err_; - } - - private: - char *copy_value(const void *value, size_t value_size) { - if (!value) - return NULL; - - char *dest = (char *)mem_malloc(value_size + 1); - memcpy(dest, (const char *)value, value_size); - dest[value_size] = '\0'; - - return dest; - } - - std::string key_; - RdKafka::ErrorCode err_; - char *value_; - size_t value_size_; - void *operator new(size_t); /* Prevent dynamic allocation */ - }; - - /** - * @brief Create a new instance of the Headers object - * - * @returns an empty Headers list - */ - static Headers *create(); - - /** - * @brief Create a new instance of the Headers object from a std::vector - * - * @param headers std::vector of RdKafka::Headers::Header objects. - * The headers are copied, not referenced. - * - * @returns a Headers list from std::vector set to the size of the std::vector - */ - static Headers *create(const std::vector

&headers); - - /** - * @brief Adds a Header to the end of the list. - * - * @param key header key/name - * @param value binary value, or NULL - * @param value_size size of the value - * - * @returns an ErrorCode signalling success or failure to add the header. - */ - virtual ErrorCode add(const std::string &key, const void *value, - size_t value_size) = 0; - - /** - * @brief Adds a Header to the end of the list. - * - * Convenience method for adding a std::string as a value for the header. - * - * @param key header key/name - * @param value value string - * - * @returns an ErrorCode signalling success or failure to add the header. - */ - virtual ErrorCode add(const std::string &key, const std::string &value) = 0; - - /** - * @brief Adds a Header to the end of the list. - * - * This method makes a copy of the passed header. - * - * @param header Existing header to copy - * - * @returns an ErrorCode signalling success or failure to add the header. - */ - virtual ErrorCode add(const Header &header) = 0; - - /** - * @brief Removes all the Headers of a given key - * - * @param key header key/name to remove - * - * @returns An ErrorCode signalling a success or failure to remove the Header. - */ - virtual ErrorCode remove(const std::string &key) = 0; - - /** - * @brief Gets all of the Headers of a given key - * - * @param key header key/name - * - * @remark If duplicate keys exist this will return them all as a std::vector - * - * @returns a std::vector containing all the Headers of the given key. - */ - virtual std::vector
get(const std::string &key) const = 0; - - /** - * @brief Gets the last occurrence of a Header of a given key - * - * @param key header key/name - * - * @remark This will only return the most recently added header - * - * @returns the Header if found, otherwise a Header with an err set to - * ERR__NOENT. - */ - virtual Header get_last(const std::string &key) const = 0; - - /** - * @brief Returns all Headers - * - * @returns a std::vector containing all of the Headers - */ - virtual std::vector
get_all() const = 0; - - /** - * @returns the number of headers. - */ - virtual size_t size() const = 0; -}; - - -/** - * @brief Message object - * - * This object represents either a single consumed or produced message, - * or an event (\p err() is set). - * - * An application must check RdKafka::Message::err() to see if the - * object is a proper message (error is RdKafka::ERR_NO_ERROR) or a - * an error event. - * - */ -class RD_EXPORT Message { - public: - /** @brief Message persistence status can be used by the application to - * find out if a produced message was persisted in the topic log. */ - enum Status { - /** Message was never transmitted to the broker, or failed with - * an error indicating it was not written to the log. - * Application retry risks ordering, but not duplication. */ - MSG_STATUS_NOT_PERSISTED = 0, - - /** Message was transmitted to broker, but no acknowledgement was - * received. - * Application retry risks ordering and duplication. */ - MSG_STATUS_POSSIBLY_PERSISTED = 1, - - /** Message was written to the log and fully acknowledged. - * No reason for application to retry. - * Note: this value should only be trusted with \c acks=all. */ - MSG_STATUS_PERSISTED = 2, - }; - - /** - * @brief Accessor functions* - * @remark Not all fields are present in all types of callbacks. - */ - - /** @returns The error string if object represent an error event, - * else an empty string. */ - virtual std::string errstr() const = 0; - - /** @returns The error code if object represents an error event, else 0. */ - virtual ErrorCode err () const = 0; - - /** @returns the RdKafka::Topic object for a message (if applicable), - * or NULL if a corresponding RdKafka::Topic object has not been - * explicitly created with RdKafka::Topic::create(). - * In this case use topic_name() instead. */ - virtual Topic *topic () const = 0; - - /** @returns Topic name (if applicable, else empty string) */ - virtual std::string topic_name () const = 0; - - /** @returns Partition (if applicable) */ - virtual int32_t partition () const = 0; - - /** @returns Message payload (if applicable) */ - virtual void *payload () const = 0 ; - - /** @returns Message payload length (if applicable) */ - virtual size_t len () const = 0; - - /** @returns Message key as string (if applicable) */ - virtual const std::string *key () const = 0; - - /** @returns Message key as void pointer (if applicable) */ - virtual const void *key_pointer () const = 0 ; - - /** @returns Message key's binary length (if applicable) */ - virtual size_t key_len () const = 0; - - /** @returns Message or error offset (if applicable) */ - virtual int64_t offset () const = 0; - - /** @returns Message timestamp (if applicable) */ - virtual MessageTimestamp timestamp () const = 0; - - /** @returns The \p msg_opaque as provided to RdKafka::Producer::produce() */ - virtual void *msg_opaque () const = 0; - - virtual ~Message () = 0; - - /** @returns the latency in microseconds for a produced message measured - * from the produce() call, or -1 if latency is not available. */ - virtual int64_t latency () const = 0; - - /** - * @brief Returns the underlying librdkafka C rd_kafka_message_t handle. - * - * @warning Calling the C API on this handle is not recommended and there - * is no official support for it, but for cases where the C++ API - * does not provide the underlying functionality this C handle can be - * used to interact directly with the core librdkafka API. - * - * @remark The lifetime of the returned pointer is the same as the Message - * object this method is called on. - * - * @remark Include prior to including - * - * - * @returns \c rd_kafka_message_t* - */ - virtual struct rd_kafka_message_s *c_ptr () = 0; - - /** - * @brief Returns the message's persistence status in the topic log. - */ - virtual Status status () const = 0; - - /** @returns the Headers instance for this Message, or NULL if there - * are no headers. - * - * @remark The lifetime of the Headers are the same as the Message. */ - virtual RdKafka::Headers *headers () = 0; - - /** @returns the Headers instance for this Message (if applicable). - * If NULL is returned the reason is given in \p err, which - * is either ERR__NOENT if there were no headers, or another - * error code if header parsing failed. - * - * @remark The lifetime of the Headers are the same as the Message. */ - virtual RdKafka::Headers *headers (RdKafka::ErrorCode *err) = 0; - - /** @returns the broker id of the broker the message was produced to or - * fetched from, or -1 if not known/applicable. */ - virtual int32_t broker_id () const = 0; -}; - -/**@}*/ - - -/** - * @name Queue interface - * @{ - * - */ - - -/** - * @brief Queue interface - * - * Create a new message queue. Message queues allows the application - * to re-route consumed messages from multiple topic+partitions into - * one single queue point. This queue point, containing messages from - * a number of topic+partitions, may then be served by a single - * consume() method, rather than one per topic+partition combination. - * - * See the RdKafka::Consumer::start(), RdKafka::Consumer::consume(), and - * RdKafka::Consumer::consume_callback() methods that take a queue as the first - * parameter for more information. - */ -class RD_EXPORT Queue { - public: - /** - * @brief Create Queue object - */ - static Queue *create (Handle *handle); - - /** - * @brief Forward/re-route queue to \p dst. - * If \p dst is \c NULL, the forwarding is removed. - * - * The internal refcounts for both queues are increased. - * - * @remark Regardless of whether \p dst is NULL or not, after calling this - * function, \p src will not forward it's fetch queue to the consumer - * queue. - */ - virtual ErrorCode forward (Queue *dst) = 0; - - - /** - * @brief Consume message or get error event from the queue. - * - * @remark Use \c delete to free the message. - * - * @returns One of: - * - proper message (RdKafka::Message::err() is ERR_NO_ERROR) - * - error event (RdKafka::Message::err() is != ERR_NO_ERROR) - * - timeout due to no message or event in \p timeout_ms - * (RdKafka::Message::err() is ERR__TIMED_OUT) - */ - virtual Message *consume (int timeout_ms) = 0; - - /** - * @brief Poll queue, serving any enqueued callbacks. - * - * @remark Must NOT be used for queues containing messages. - * - * @returns the number of events served or 0 on timeout. - */ - virtual int poll (int timeout_ms) = 0; - - virtual ~Queue () = 0; - - /** - * @brief Enable IO event triggering for queue. - * - * To ease integration with IO based polling loops this API - * allows an application to create a separate file-descriptor - * that librdkafka will write \p payload (of size \p size) to - * whenever a new element is enqueued on a previously empty queue. - * - * To remove event triggering call with \p fd = -1. - * - * librdkafka will maintain a copy of the \p payload. - * - * @remark When using forwarded queues the IO event must only be enabled - * on the final forwarded-to (destination) queue. - */ - virtual void io_event_enable (int fd, const void *payload, size_t size) = 0; -}; - -/**@}*/ - -/** - * @name ConsumerGroupMetadata - * @{ - * - */ -/** - * @brief ConsumerGroupMetadata holds a consumer instance's group - * metadata state. - * - * This class currently does not have any public methods. - */ -class RD_EXPORT ConsumerGroupMetadata { -public: - virtual ~ConsumerGroupMetadata () = 0; -}; - -/**@}*/ - -/** - * @name KafkaConsumer - * @{ - * - */ - - -/** - * @brief High-level KafkaConsumer (for brokers 0.9 and later) - * - * @remark Requires Apache Kafka >= 0.9.0 brokers - * - * Currently supports the \c range and \c roundrobin partition assignment - * strategies (see \c partition.assignment.strategy) - */ -class RD_EXPORT KafkaConsumer : public virtual Handle { -public: - /** - * @brief Creates a KafkaConsumer. - * - * The \p conf object must have \c group.id set to the consumer group to join. - * - * Use RdKafka::KafkaConsumer::close() to shut down the consumer. - * - * @sa RdKafka::RebalanceCb - * @sa CONFIGURATION.md for \c group.id, \c session.timeout.ms, - * \c partition.assignment.strategy, etc. - */ - static KafkaConsumer *create (const Conf *conf, std::string &errstr); - - virtual ~KafkaConsumer () = 0; - - - /** @brief Returns the current partition assignment as set by - * RdKafka::KafkaConsumer::assign() */ - virtual ErrorCode assignment (std::vector &partitions) = 0; - - /** @brief Returns the current subscription as set by - * RdKafka::KafkaConsumer::subscribe() */ - virtual ErrorCode subscription (std::vector &topics) = 0; - - /** - * @brief Update the subscription set to \p topics. - * - * Any previous subscription will be unassigned and unsubscribed first. - * - * The subscription set denotes the desired topics to consume and this - * set is provided to the partition assignor (one of the elected group - * members) for all clients which then uses the configured - * \c partition.assignment.strategy to assign the subscription sets's - * topics's partitions to the consumers, depending on their subscription. - * - * The result of such an assignment is a rebalancing which is either - * handled automatically in librdkafka or can be overridden by the application - * by providing a RdKafka::RebalanceCb. - * - * The rebalancing passes the assigned partition set to - * RdKafka::KafkaConsumer::assign() to update what partitions are actually - * being fetched by the KafkaConsumer. - * - * Regex pattern matching automatically performed for topics prefixed - * with \c \"^\" (e.g. \c \"^myPfx[0-9]_.*\" - * - * @remark A consumer error will be raised for each unavailable topic in the - * \p topics. The error will be ERR_UNKNOWN_TOPIC_OR_PART - * for non-existent topics, and - * ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics. - * The consumer error will be raised through consume() (et.al.) - * with the \c RdKafka::Message::err() returning one of the - * error codes mentioned above. - * The subscribe function itself is asynchronous and will not return - * an error on unavailable topics. - * - * @returns an error if the provided list of topics is invalid. - */ - virtual ErrorCode subscribe (const std::vector &topics) = 0; - - /** @brief Unsubscribe from the current subscription set. */ - virtual ErrorCode unsubscribe () = 0; - - /** - * @brief Update the assignment set to \p partitions. - * - * The assignment set is the set of partitions actually being consumed - * by the KafkaConsumer. - */ - virtual ErrorCode assign (const std::vector &partitions) = 0; - - /** - * @brief Stop consumption and remove the current assignment. - */ - virtual ErrorCode unassign () = 0; - - /** - * @brief Consume message or get error event, triggers callbacks. - * - * Will automatically call registered callbacks for any such queued events, - * including RdKafka::RebalanceCb, RdKafka::EventCb, RdKafka::OffsetCommitCb, - * etc. - * - * @remark Use \c delete to free the message. - * - * @remark An application should make sure to call consume() at regular - * intervals, even if no messages are expected, to serve any - * queued callbacks waiting to be called. This is especially - * important when a RebalanceCb has been registered as it needs - * to be called and handled properly to synchronize internal - * consumer state. - * - * @remark Application MUST NOT call \p poll() on KafkaConsumer objects. - * - * @returns One of: - * - proper message (RdKafka::Message::err() is ERR_NO_ERROR) - * - error event (RdKafka::Message::err() is != ERR_NO_ERROR) - * - timeout due to no message or event in \p timeout_ms - * (RdKafka::Message::err() is ERR__TIMED_OUT) - */ - virtual Message *consume (int timeout_ms) = 0; - - /** - * @brief Commit offsets for the current assignment. - * - * @remark This is the synchronous variant that blocks until offsets - * are committed or the commit fails (see return value). - * - * @remark If a RdKafka::OffsetCommitCb callback is registered it will - * be called with commit details on a future call to - * RdKafka::KafkaConsumer::consume() - - * - * @returns ERR_NO_ERROR or error code. - */ - virtual ErrorCode commitSync () = 0; - - /** - * @brief Asynchronous version of RdKafka::KafkaConsumer::CommitSync() - * - * @sa RdKafka::KafkaConsumer::commitSync() - */ - virtual ErrorCode commitAsync () = 0; - - /** - * @brief Commit offset for a single topic+partition based on \p message - * - * @remark The offset committed will be the message's offset + 1. - * - * @remark This is the synchronous variant. - * - * @sa RdKafka::KafkaConsumer::commitSync() - */ - virtual ErrorCode commitSync (Message *message) = 0; - - /** - * @brief Commit offset for a single topic+partition based on \p message - * - * @remark The offset committed will be the message's offset + 1. - * - * @remark This is the asynchronous variant. - * - * @sa RdKafka::KafkaConsumer::commitSync() - */ - virtual ErrorCode commitAsync (Message *message) = 0; - - /** - * @brief Commit offsets for the provided list of partitions. - * - * @remark The \c .offset of the partitions in \p offsets should be the - * offset where consumption will resume, i.e., the last - * processed offset + 1. - * - * @remark This is the synchronous variant. - */ - virtual ErrorCode commitSync (std::vector &offsets) = 0; - - /** - * @brief Commit offset for the provided list of partitions. - * - * @remark The \c .offset of the partitions in \p offsets should be the - * offset where consumption will resume, i.e., the last - * processed offset + 1. - * - * @remark This is the asynchronous variant. - */ - virtual ErrorCode commitAsync (const std::vector &offsets) = 0; - - /** - * @brief Commit offsets for the current assignment. - * - * @remark This is the synchronous variant that blocks until offsets - * are committed or the commit fails (see return value). - * - * @remark The provided callback will be called from this function. - * - * @returns ERR_NO_ERROR or error code. - */ - virtual ErrorCode commitSync (OffsetCommitCb *offset_commit_cb) = 0; - - /** - * @brief Commit offsets for the provided list of partitions. - * - * @remark This is the synchronous variant that blocks until offsets - * are committed or the commit fails (see return value). - * - * @remark The provided callback will be called from this function. - * - * @returns ERR_NO_ERROR or error code. - */ - virtual ErrorCode commitSync (std::vector &offsets, - OffsetCommitCb *offset_commit_cb) = 0; - - - - - /** - * @brief Retrieve committed offsets for topics+partitions. - * - * @returns ERR_NO_ERROR on success in which case the - * \p offset or \p err field of each \p partitions' element is filled - * in with the stored offset, or a partition specific error. - * Else returns an error code. - */ - virtual ErrorCode committed (std::vector &partitions, - int timeout_ms) = 0; - - /** - * @brief Retrieve current positions (offsets) for topics+partitions. - * - * @returns ERR_NO_ERROR on success in which case the - * \p offset or \p err field of each \p partitions' element is filled - * in with the stored offset, or a partition specific error. - * Else returns an error code. - */ - virtual ErrorCode position (std::vector &partitions) = 0; - - - /** - * For pausing and resuming consumption, see - * @sa RdKafka::Handle::pause() and RdKafka::Handle::resume() - */ - - - /** - * @brief Close and shut down the proper. - * - * This call will block until the following operations are finished: - * - Trigger a local rebalance to void the current assignment - * - Stop consumption for current assignment - * - Commit offsets - * - Leave group - * - * The maximum blocking time is roughly limited to session.timeout.ms. - * - * @remark Callbacks, such as RdKafka::RebalanceCb and - * RdKafka::OffsetCommitCb, etc, may be called. - * - * @remark The consumer object must later be freed with \c delete - */ - virtual ErrorCode close () = 0; - - - /** - * @brief Seek consumer for topic+partition to offset which is either an - * absolute or logical offset. - * - * If \p timeout_ms is not 0 the call will wait this long for the - * seek to be performed. If the timeout is reached the internal state - * will be unknown and this function returns `ERR__TIMED_OUT`. - * If \p timeout_ms is 0 it will initiate the seek but return - * immediately without any error reporting (e.g., async). - * - * This call triggers a fetch queue barrier flush. - * - * @remark Consumtion for the given partition must have started for the - * seek to work. Use assign() to set the starting offset. - * - * @returns an ErrorCode to indicate success or failure. - */ - virtual ErrorCode seek (const TopicPartition &partition, int timeout_ms) = 0; - - - /** - * @brief Store offset \p offset for topic partition \p partition. - * The offset will be committed (written) to the offset store according - * to \p auto.commit.interval.ms or the next manual offset-less commit*() - * - * Per-partition success/error status propagated through TopicPartition.err() - * - * @remark The \c .offset field is stored as is, it will NOT be + 1. - * - * @remark \c enable.auto.offset.store must be set to \c false when using - * this API. - * - * @returns RdKafka::ERR_NO_ERROR on success, or - * RdKafka::ERR___UNKNOWN_PARTITION if none of the offsets could - * be stored, or - * RdKafka::ERR___INVALID_ARG if \c enable.auto.offset.store is true. - */ - virtual ErrorCode offsets_store (std::vector &offsets) = 0; - - - /** - * @returns the current consumer group metadata associated with this consumer, - * or NULL if the consumer is configured with a \c group.id. - * This metadata object should be passed to the transactional - * producer's RdKafka::Producer::send_offsets_to_transaction() API. - * - * @remark The returned object must be deleted by the application. - * - * @sa RdKafka::Producer::send_offsets_to_transaction() - */ - virtual ConsumerGroupMetadata *groupMetadata () = 0; - - - /** @brief Check whether the consumer considers the current assignment to - * have been lost involuntarily. This method is only applicable for - * use with a subscribing consumer. Assignments are revoked - * immediately when determined to have been lost, so this method is - * only useful within a rebalance callback. Partitions that have - * been lost may already be owned by other members in the group and - * therefore commiting offsets, for example, may fail. - * - * @remark Calling assign(), incremental_assign() or incremental_unassign() - * resets this flag. - * - * @returns Returns true if the current partition assignment is considered - * lost, false otherwise. - */ - virtual bool assignment_lost () = 0; - - /** - * @brief The rebalance protocol currently in use. This will be - * "NONE" if the consumer has not (yet) joined a group, else it will - * match the rebalance protocol ("EAGER", "COOPERATIVE") of the - * configured and selected assignor(s). All configured - * assignors must have the same protocol type, meaning - * online migration of a consumer group from using one - * protocol to another (in particular upgading from EAGER - * to COOPERATIVE) without a restart is not currently - * supported. - * - * @returns an empty string on error, or one of - * "NONE", "EAGER", "COOPERATIVE" on success. - */ - - virtual std::string rebalance_protocol () = 0; - - - /** - * @brief Incrementally add \p partitions to the current assignment. - * - * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, - * this method should be used in a rebalance callback to adjust the current - * assignment appropriately in the case where the rebalance type is - * ERR__ASSIGN_PARTITIONS. The application must pass the partition list - * passed to the callback (or a copy of it), even if the list is empty. - * This method may also be used outside the context of a rebalance callback. - * - * @returns NULL on success, or an error object if the operation was - * unsuccessful. - * - * @remark The returned object must be deleted by the application. - */ - virtual Error *incremental_assign (const std::vector &partitions) = 0; - - - /** - * @brief Incrementally remove \p partitions from the current assignment. - * - * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, - * this method should be used in a rebalance callback to adjust the current - * assignment appropriately in the case where the rebalance type is - * ERR__REVOKE_PARTITIONS. The application must pass the partition list - * passed to the callback (or a copy of it), even if the list is empty. - * This method may also be used outside the context of a rebalance callback. - * - * @returns NULL on success, or an error object if the operation was - * unsuccessful. - * - * @remark The returned object must be deleted by the application. - */ - virtual Error *incremental_unassign (const std::vector &partitions) = 0; - -}; - - -/**@}*/ - - -/** - * @name Simple Consumer (legacy) - * @{ - * - */ - -/** - * @brief Simple Consumer (legacy) - * - * A simple non-balanced, non-group-aware, consumer. - */ -class RD_EXPORT Consumer : public virtual Handle { - public: - /** - * @brief Creates a new Kafka consumer handle. - * - * \p conf is an optional object that will be used instead of the default - * configuration. - * The \p conf object is reusable after this call. - * - * @returns the new handle on success or NULL on error in which case - * \p errstr is set to a human readable error message. - */ - static Consumer *create (const Conf *conf, std::string &errstr); - - virtual ~Consumer () = 0; - - - /** - * @brief Start consuming messages for topic and \p partition - * at offset \p offset which may either be a proper offset (0..N) - * or one of the the special offsets: \p OFFSET_BEGINNING or \p OFFSET_END. - * - * rdkafka will attempt to keep \p queued.min.messages (config property) - * messages in the local queue by repeatedly fetching batches of messages - * from the broker until the threshold is reached. - * - * The application shall use one of the \p ..->consume*() functions - * to consume messages from the local queue, each kafka message being - * represented as a `RdKafka::Message *` object. - * - * \p ..->start() must not be called multiple times for the same - * topic and partition without stopping consumption first with - * \p ..->stop(). - * - * @returns an ErrorCode to indicate success or failure. - */ - virtual ErrorCode start (Topic *topic, int32_t partition, int64_t offset) = 0; - - /** - * @brief Start consuming messages for topic and \p partition on - * queue \p queue. - * - * @sa RdKafka::Consumer::start() - */ - virtual ErrorCode start (Topic *topic, int32_t partition, int64_t offset, - Queue *queue) = 0; - - /** - * @brief Stop consuming messages for topic and \p partition, purging - * all messages currently in the local queue. - * - * The application needs to be stop all consumers before destroying - * the Consumer handle. - * - * @returns an ErrorCode to indicate success or failure. - */ - virtual ErrorCode stop (Topic *topic, int32_t partition) = 0; - - /** - * @brief Seek consumer for topic+partition to \p offset which is either an - * absolute or logical offset. - * - * If \p timeout_ms is not 0 the call will wait this long for the - * seek to be performed. If the timeout is reached the internal state - * will be unknown and this function returns `ERR__TIMED_OUT`. - * If \p timeout_ms is 0 it will initiate the seek but return - * immediately without any error reporting (e.g., async). - * - * This call triggers a fetch queue barrier flush. - * - * @returns an ErrorCode to indicate success or failure. - */ - virtual ErrorCode seek (Topic *topic, int32_t partition, int64_t offset, - int timeout_ms) = 0; - - /** - * @brief Consume a single message from \p topic and \p partition. - * - * \p timeout_ms is maximum amount of time to wait for a message to be - * received. - * Consumer must have been previously started with \p ..->start(). - * - * @returns a Message object, the application needs to check if message - * is an error or a proper message RdKafka::Message::err() and checking for - * \p ERR_NO_ERROR. - * - * The message object must be destroyed when the application is done with it. - * - * Errors (in RdKafka::Message::err()): - * - ERR__TIMED_OUT - \p timeout_ms was reached with no new messages fetched. - * - ERR__PARTITION_EOF - End of partition reached, not an error. - */ - virtual Message *consume (Topic *topic, int32_t partition, - int timeout_ms) = 0; - - /** - * @brief Consume a single message from the specified queue. - * - * \p timeout_ms is maximum amount of time to wait for a message to be - * received. - * Consumer must have been previously started on the queue with - * \p ..->start(). - * - * @returns a Message object, the application needs to check if message - * is an error or a proper message \p Message->err() and checking for - * \p ERR_NO_ERROR. - * - * The message object must be destroyed when the application is done with it. - * - * Errors (in RdKafka::Message::err()): - * - ERR__TIMED_OUT - \p timeout_ms was reached with no new messages fetched - * - * Note that Message->topic() may be nullptr after certain kinds of - * errors, so applications should check that it isn't null before - * dereferencing it. - */ - virtual Message *consume (Queue *queue, int timeout_ms) = 0; - - /** - * @brief Consumes messages from \p topic and \p partition, calling - * the provided callback for each consumed messsage. - * - * \p consume_callback() provides higher throughput performance - * than \p consume(). - * - * \p timeout_ms is the maximum amount of time to wait for one or - * more messages to arrive. - * - * The provided \p consume_cb instance has its \p consume_cb function - * called for every message received. - * - * The \p opaque argument is passed to the \p consume_cb as \p opaque. - * - * @returns the number of messages processed or -1 on error. - * - * @sa RdKafka::Consumer::consume() - */ - virtual int consume_callback (Topic *topic, int32_t partition, - int timeout_ms, - ConsumeCb *consume_cb, - void *opaque) = 0; - - /** - * @brief Consumes messages from \p queue, calling the provided callback for - * each consumed messsage. - * - * @sa RdKafka::Consumer::consume_callback() - */ - virtual int consume_callback (Queue *queue, int timeout_ms, - RdKafka::ConsumeCb *consume_cb, - void *opaque) = 0; - - /** - * @brief Converts an offset into the logical offset from the tail of a topic. - * - * \p offset is the (positive) number of items from the end. - * - * @returns the logical offset for message \p offset from the tail, this value - * may be passed to Consumer::start, et.al. - * @remark The returned logical offset is specific to librdkafka. - */ - static int64_t OffsetTail(int64_t offset); -}; - -/**@}*/ - - -/** - * @name Producer - * @{ - * - */ - - -/** - * @brief Producer - */ -class RD_EXPORT Producer : public virtual Handle { - public: - /** - * @brief Creates a new Kafka producer handle. - * - * \p conf is an optional object that will be used instead of the default - * configuration. - * The \p conf object is reusable after this call. - * - * @returns the new handle on success or NULL on error in which case - * \p errstr is set to a human readable error message. - */ - static Producer *create (const Conf *conf, std::string &errstr); - - - virtual ~Producer () = 0; - - /** - * @brief RdKafka::Producer::produce() \p msgflags - * - * These flags are optional. - */ - enum { - RK_MSG_FREE = 0x1, /**< rdkafka will free(3) \p payload - * when it is done with it. - * Mutually exclusive with RK_MSG_COPY. */ - RK_MSG_COPY = 0x2, /**< the \p payload data will be copied - * and the \p payload pointer will not - * be used by rdkafka after the - * call returns. - * Mutually exclusive with RK_MSG_FREE. */ - RK_MSG_BLOCK = 0x4 /**< Block produce*() on message queue - * full. - * WARNING: - * If a delivery report callback - * is used the application MUST - * call rd_kafka_poll() (or equiv.) - * to make sure delivered messages - * are drained from the internal - * delivery report queue. - * Failure to do so will result - * in indefinately blocking on - * the produce() call when the - * message queue is full. - */ - - - /**@cond NO_DOC*/ - /* For backwards compatibility: */ -#ifndef MSG_COPY /* defined in sys/msg.h */ - , /** this comma must exist betwen - * RK_MSG_BLOCK and MSG_FREE - */ - MSG_FREE = RK_MSG_FREE, - MSG_COPY = RK_MSG_COPY -#endif - /**@endcond*/ - }; - - /** - * @brief Produce and send a single message to broker. - * - * This is an asynch non-blocking API. - * - * \p partition is the target partition, either: - * - RdKafka::Topic::PARTITION_UA (unassigned) for - * automatic partitioning using the topic's partitioner function, or - * - a fixed partition (0..N) - * - * \p msgflags is zero or more of the following flags OR:ed together: - * RK_MSG_BLOCK - block \p produce*() call if - * \p queue.buffering.max.messages or - * \p queue.buffering.max.kbytes are exceeded. - * Messages are considered in-queue from the point they - * are accepted by produce() until their corresponding - * delivery report callback/event returns. - * It is thus a requirement to call - * poll() (or equiv.) from a separate - * thread when RK_MSG_BLOCK is used. - * See WARNING on \c RK_MSG_BLOCK above. - * RK_MSG_FREE - rdkafka will free(3) \p payload when it is done with it. - * RK_MSG_COPY - the \p payload data will be copied and the \p payload - * pointer will not be used by rdkafka after the - * call returns. - * - * NOTE: RK_MSG_FREE and RK_MSG_COPY are mutually exclusive. - * - * If the function returns an error code and RK_MSG_FREE was specified, then - * the memory associated with the payload is still the caller's - * responsibility. - * - * \p payload is the message payload of size \p len bytes. - * - * \p key is an optional message key, if non-NULL it - * will be passed to the topic partitioner as well as be sent with the - * message to the broker and passed on to the consumer. - * - * \p msg_opaque is an optional application-provided per-message opaque - * pointer that will provided in the delivery report callback (\p dr_cb) for - * referencing this message. - * - * @returns an ErrorCode to indicate success or failure: - * - ERR_NO_ERROR - message successfully enqueued for transmission. - * - * - ERR__QUEUE_FULL - maximum number of outstanding messages has been - * reached: \c queue.buffering.max.message - * - * - ERR_MSG_SIZE_TOO_LARGE - message is larger than configured max size: - * \c messages.max.bytes - * - * - ERR__UNKNOWN_PARTITION - requested \p partition is unknown in the - * Kafka cluster. - * - * - ERR__UNKNOWN_TOPIC - topic is unknown in the Kafka cluster. - */ - virtual ErrorCode produce (Topic *topic, int32_t partition, - int msgflags, - void *payload, size_t len, - const std::string *key, - void *msg_opaque) = 0; - - /** - * @brief Variant produce() that passes the key as a pointer and length - * instead of as a const std::string *. - */ - virtual ErrorCode produce (Topic *topic, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - void *msg_opaque) = 0; - - /** - * @brief produce() variant that takes topic as a string (no need for - * creating a Topic object), and also allows providing the - * message timestamp (milliseconds since beginning of epoch, UTC). - * Otherwise identical to produce() above. - */ - virtual ErrorCode produce (const std::string topic_name, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, void *msg_opaque) = 0; - - /** - * @brief produce() variant that that allows for Header support on produce - * Otherwise identical to produce() above. - * - * @warning The \p headers will be freed/deleted if the produce() call - * succeeds, or left untouched if produce() fails. - */ - virtual ErrorCode produce (const std::string topic_name, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, - RdKafka::Headers *headers, - void *msg_opaque) = 0; - - - /** - * @brief Variant produce() that accepts vectors for key and payload. - * The vector data will be copied. - */ - virtual ErrorCode produce (Topic *topic, int32_t partition, - const std::vector *payload, - const std::vector *key, - void *msg_opaque) = 0; - - - /** - * @brief Wait until all outstanding produce requests, et.al, are completed. - * This should typically be done prior to destroying a producer instance - * to make sure all queued and in-flight produce requests are completed - * before terminating. - * - * @remark The \c linger.ms time will be ignored for the duration of the call, - * queued messages will be sent to the broker as soon as possible. - * - * @remark This function will call Producer::poll() and thus - * trigger callbacks. - * - * @returns ERR__TIMED_OUT if \p timeout_ms was reached before all - * outstanding requests were completed, else ERR_NO_ERROR - */ - virtual ErrorCode flush (int timeout_ms) = 0; - - - /** - * @brief Purge messages currently handled by the producer instance. - * - * @param purge_flags tells which messages should be purged and how. - * - * The application will need to call Handle::poll() or Producer::flush() - * afterwards to serve the delivery report callbacks of the purged messages. - * - * Messages purged from internal queues fail with the delivery report - * error code set to ERR__PURGE_QUEUE, while purged messages that - * are in-flight to or from the broker will fail with the error code set to - * ERR__PURGE_INFLIGHT. - * - * @warning Purging messages that are in-flight to or from the broker - * will ignore any sub-sequent acknowledgement for these messages - * received from the broker, effectively making it impossible - * for the application to know if the messages were successfully - * produced or not. This may result in duplicate messages if the - * application retries these messages at a later time. - * - * @remark This call may block for a short time while background thread - * queues are purged. - * - * @returns ERR_NO_ERROR on success, - * ERR__INVALID_ARG if the \p purge flags are invalid or unknown, - * ERR__NOT_IMPLEMENTED if called on a non-producer client instance. - */ - virtual ErrorCode purge (int purge_flags) = 0; - - /** - * @brief RdKafka::Handle::purge() \p purge_flags - */ - enum { - PURGE_QUEUE = 0x1, /**< Purge messages in internal queues */ - - PURGE_INFLIGHT = 0x2, /*! Purge messages in-flight to or from the broker. - * Purging these messages will void any future - * acknowledgements from the broker, making it - * impossible for the application to know if these - * messages were successfully delivered or not. - * Retrying these messages may lead to duplicates. */ - - PURGE_NON_BLOCKING = 0x4 /* Don't wait for background queue - * purging to finish. */ - }; - - /** - * @name Transactional API - * @{ - * - * Requires Kafka broker version v0.11.0 or later - * - * See the Transactional API documentation in rdkafka.h for more information. - */ - - /** - * @brief Initialize transactions for the producer instance. - * - * @param timeout_ms The maximum time to block. On timeout the operation - * may continue in the background, depending on state, - * and it is okay to call init_transactions() again. - * - * @returns an RdKafka::Error object on error, or NULL on success. - * Check whether the returned error object permits retrying - * by calling RdKafka::Error::is_retriable(), or whether a fatal - * error has been raised by calling RdKafka::Error::is_fatal(). - * - * @remark The returned error object (if not NULL) must be deleted. - * - * See rd_kafka_init_transactions() in rdkafka.h for more information. - * - */ - virtual Error *init_transactions (int timeout_ms) = 0; - - - /** - * @brief init_transactions() must have been called successfully - * (once) before this function is called. - * - * @returns an RdKafka::Error object on error, or NULL on success. - * Check whether a fatal error has been raised by calling - * RdKafka::Error::is_fatal_error(). - * - * @remark The returned error object (if not NULL) must be deleted. - * - * See rd_kafka_begin_transaction() in rdkafka.h for more information. - */ - virtual Error *begin_transaction () = 0; - - /** - * @brief Sends a list of topic partition offsets to the consumer group - * coordinator for \p group_metadata, and marks the offsets as part - * part of the current transaction. - * These offsets will be considered committed only if the transaction - * is committed successfully. - * - * The offsets should be the next message your application will - * consume, - * i.e., the last processed message's offset + 1 for each partition. - * Either track the offsets manually during processing or use - * RdKafka::KafkaConsumer::position() (on the consumer) to get the - * current offsets for - * the partitions assigned to the consumer. - * - * Use this method at the end of a consume-transform-produce loop prior - * to committing the transaction with commit_transaction(). - * - * @param offsets List of offsets to commit to the consumer group upon - * successful commit of the transaction. Offsets should be - * the next message to consume, - * e.g., last processed message + 1. - * @param group_metadata The current consumer group metadata as returned by - * RdKafka::KafkaConsumer::groupMetadata() on the consumer - * instance the provided offsets were consumed from. - * @param timeout_ms Maximum time allowed to register the - * offsets on the broker. - * - * @remark This function must be called on the transactional producer - * instance, not the consumer. - * - * @remark The consumer must disable auto commits - * (set \c enable.auto.commit to false on the consumer). - * - * @returns an RdKafka::Error object on error, or NULL on success. - * Check whether the returned error object permits retrying - * by calling RdKafka::Error::is_retriable(), or whether an abortable - * or fatal error has been raised by calling - * RdKafka::Error::txn_requires_abort() or RdKafka::Error::is_fatal() - * respectively. - * - * @remark The returned error object (if not NULL) must be deleted. - * - * See rd_kafka_send_offsets_to_transaction() in rdkafka.h for - * more information. - */ - virtual Error *send_offsets_to_transaction ( - const std::vector &offsets, - const ConsumerGroupMetadata *group_metadata, - int timeout_ms) = 0; - - /** - * @brief Commit the current transaction as started with begin_transaction(). - * - * Any outstanding messages will be flushed (delivered) before actually - * committing the transaction. - * - * @param timeout_ms The maximum time to block. On timeout the operation - * may continue in the background, depending on state, - * and it is okay to call this function again. - * Pass -1 to use the remaining transaction timeout, - * this is the recommended use. - * - * @remark It is strongly recommended to always pass -1 (remaining transaction - * time) as the \p timeout_ms. Using other values risk internal - * state desynchronization in case any of the underlying protocol - * requests fail. - * - * @returns an RdKafka::Error object on error, or NULL on success. - * Check whether the returned error object permits retrying - * by calling RdKafka::Error::is_retriable(), or whether an abortable - * or fatal error has been raised by calling - * RdKafka::Error::txn_requires_abort() or RdKafka::Error::is_fatal() - * respectively. - * - * @remark The returned error object (if not NULL) must be deleted. - * - * See rd_kafka_commit_transaction() in rdkafka.h for more information. - */ - virtual Error *commit_transaction (int timeout_ms) = 0; - - /** - * @brief Aborts the ongoing transaction. - * - * This function should also be used to recover from non-fatal abortable - * transaction errors. - * - * Any outstanding messages will be purged and fail with - * RdKafka::ERR__PURGE_INFLIGHT or RdKafka::ERR__PURGE_QUEUE. - * See RdKafka::Producer::purge() for details. - * - * @param timeout_ms The maximum time to block. On timeout the operation - * may continue in the background, depending on state, - * and it is okay to call this function again. - * Pass -1 to use the remaining transaction timeout, - * this is the recommended use. - * - * @remark It is strongly recommended to always pass -1 (remaining transaction - * time) as the \p timeout_ms. Using other values risk internal - * state desynchronization in case any of the underlying protocol - * requests fail. - * - * @returns an RdKafka::Error object on error, or NULL on success. - * Check whether the returned error object permits retrying - * by calling RdKafka::Error::is_retriable(), or whether a - * fatal error has been raised by calling RdKafka::Error::is_fatal(). - * - * @remark The returned error object (if not NULL) must be deleted. - * - * See rd_kafka_abort_transaction() in rdkafka.h for more information. - */ - virtual Error *abort_transaction (int timeout_ms) = 0; - - /**@}*/ -}; - -/**@}*/ - - -/** - * @name Metadata interface - * @{ - * - */ - - -/** - * @brief Metadata: Broker information - */ -class BrokerMetadata { - public: - /** @returns Broker id */ - virtual int32_t id() const = 0; - - /** @returns Broker hostname */ - virtual const std::string host() const = 0; - - /** @returns Broker listening port */ - virtual int port() const = 0; - - virtual ~BrokerMetadata() = 0; -}; - - - -/** - * @brief Metadata: Partition information - */ -class PartitionMetadata { - public: - /** @brief Replicas */ - typedef std::vector ReplicasVector; - /** @brief ISRs (In-Sync-Replicas) */ - typedef std::vector ISRSVector; - - /** @brief Replicas iterator */ - typedef ReplicasVector::const_iterator ReplicasIterator; - /** @brief ISRs iterator */ - typedef ISRSVector::const_iterator ISRSIterator; - - - /** @returns Partition id */ - virtual int32_t id() const = 0; - - /** @returns Partition error reported by broker */ - virtual ErrorCode err() const = 0; - - /** @returns Leader broker (id) for partition */ - virtual int32_t leader() const = 0; - - /** @returns Replica brokers */ - virtual const std::vector *replicas() const = 0; - - /** @returns In-Sync-Replica brokers - * @warning The broker may return a cached/outdated list of ISRs. - */ - virtual const std::vector *isrs() const = 0; - - virtual ~PartitionMetadata() = 0; -}; - - - -/** - * @brief Metadata: Topic information - */ -class TopicMetadata { - public: - /** @brief Partitions */ - typedef std::vector PartitionMetadataVector; - /** @brief Partitions iterator */ - typedef PartitionMetadataVector::const_iterator PartitionMetadataIterator; - - /** @returns Topic name */ - virtual const std::string topic() const = 0; - - /** @returns Partition list */ - virtual const PartitionMetadataVector *partitions() const = 0; - - /** @returns Topic error reported by broker */ - virtual ErrorCode err() const = 0; - - virtual ~TopicMetadata() = 0; -}; - - -/** - * @brief Metadata container - */ -class Metadata { - public: - /** @brief Brokers */ - typedef std::vector BrokerMetadataVector; - /** @brief Topics */ - typedef std::vector TopicMetadataVector; - - /** @brief Brokers iterator */ - typedef BrokerMetadataVector::const_iterator BrokerMetadataIterator; - /** @brief Topics iterator */ - typedef TopicMetadataVector::const_iterator TopicMetadataIterator; - - - /** - * @brief Broker list - * @remark Ownership of the returned pointer is retained by the instance of - * Metadata that is called. - */ - virtual const BrokerMetadataVector *brokers() const = 0; - - /** - * @brief Topic list - * @remark Ownership of the returned pointer is retained by the instance of - * Metadata that is called. - */ - virtual const TopicMetadataVector *topics() const = 0; - - /** @brief Broker (id) originating this metadata */ - virtual int32_t orig_broker_id() const = 0; - - /** @brief Broker (name) originating this metadata */ - virtual const std::string orig_broker_name() const = 0; - - virtual ~Metadata() = 0; -}; - -/**@}*/ - -} - - -#endif /* _RDKAFKACPP_H_ */ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka++.a b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka++.a deleted file mode 100755 index ed7a5031..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka++.a and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka++.so b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka++.so deleted file mode 100755 index 6aaf679a..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka++.so and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka++.so.1 b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka++.so.1 deleted file mode 100755 index 6aaf679a..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka++.so.1 and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.Makefile b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.Makefile deleted file mode 100644 index 6767215d..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# This file is generated by gyp; do not edit. - -export builddir_name ?= ./build/deps/. -.PHONY: all -all: - $(MAKE) -C .. librdkafka diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.a b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.a deleted file mode 100755 index 42255e75..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.a and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.so b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.so deleted file mode 100755 index abf7a6ff..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.so and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.so.1 b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.so.1 deleted file mode 100755 index abf7a6ff..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.so.1 and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.target.mk b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.target.mk deleted file mode 100644 index 021dac8d..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/librdkafka.target.mk +++ /dev/null @@ -1,55 +0,0 @@ -# This file is generated by gyp; do not edit. - -TOOLSET := target -TARGET := librdkafka -### Rules for action "configure": -quiet_cmd_deps_librdkafka_gyp_librdkafka_target_configure = ACTION deps_librdkafka_gyp_librdkafka_target_configure $@ -cmd_deps_librdkafka_gyp_librdkafka_target_configure = LD_LIBRARY_PATH=$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; export LD_LIBRARY_PATH; cd $(srcdir)/deps; mkdir -p librdkafka; node ../util/configure - -deps/librdkafka/config.h: obj := $(abs_obj) -deps/librdkafka/config.h: builddir := $(abs_builddir) -deps/librdkafka/config.h: TOOLSET := $(TOOLSET) -deps/librdkafka/config.h: FORCE_DO_CMD - $(call do_cmd,deps_librdkafka_gyp_librdkafka_target_configure) - -all_deps += deps/librdkafka/config.h -action_deps_librdkafka_gyp_librdkafka_target_configure_outputs := deps/librdkafka/config.h - -### Rules for action "build_dependencies": -quiet_cmd_deps_librdkafka_gyp_librdkafka_target_build_dependencies = ACTION deps_librdkafka_gyp_librdkafka_target_build_dependencies $@ -cmd_deps_librdkafka_gyp_librdkafka_target_build_dependencies = LD_LIBRARY_PATH=$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; export LD_LIBRARY_PATH; cd $(srcdir)/deps; mkdir -p deps/librdkafka/src deps/librdkafka/src-cpp; make -C librdkafka libs install - -deps/deps/librdkafka/src-cpp/librdkafka++.so: obj := $(abs_obj) -deps/deps/librdkafka/src-cpp/librdkafka++.so: builddir := $(abs_builddir) -deps/deps/librdkafka/src-cpp/librdkafka++.so: TOOLSET := $(TOOLSET) -deps/deps/librdkafka/src-cpp/librdkafka++.so deps/deps/librdkafka/src-cpp/librdkafka++.so.1 deps/deps/librdkafka/src/librdkafka.so deps/deps/librdkafka/src/librdkafka.so.1 deps/deps/librdkafka/src-cpp/librdkafka++.a deps/deps/librdkafka/src/librdkafka.a: 11a9e3388a67e1ca5c31c1d8da49cb6d2714eb41.intermediate - @: -.INTERMEDIATE: 11a9e3388a67e1ca5c31c1d8da49cb6d2714eb41.intermediate -11a9e3388a67e1ca5c31c1d8da49cb6d2714eb41.intermediate: $(srcdir)/deps/librdkafka/config.h FORCE_DO_CMD - $(call do_cmd,touch) - $(call do_cmd,deps_librdkafka_gyp_librdkafka_target_build_dependencies) - -all_deps += deps/deps/librdkafka/src-cpp/librdkafka++.so deps/deps/librdkafka/src-cpp/librdkafka++.so.1 deps/deps/librdkafka/src/librdkafka.so deps/deps/librdkafka/src/librdkafka.so.1 deps/deps/librdkafka/src-cpp/librdkafka++.a deps/deps/librdkafka/src/librdkafka.a -action_deps_librdkafka_gyp_librdkafka_target_build_dependencies_outputs := deps/deps/librdkafka/src-cpp/librdkafka++.so deps/deps/librdkafka/src-cpp/librdkafka++.so.1 deps/deps/librdkafka/src/librdkafka.so deps/deps/librdkafka/src/librdkafka.so.1 deps/deps/librdkafka/src-cpp/librdkafka++.a deps/deps/librdkafka/src/librdkafka.a - - -### Rules for final target. -# Build our special outputs first. -$(obj).target/deps/librdkafka.stamp: | $(action_deps_librdkafka_gyp_librdkafka_target_configure_outputs) $(action_deps_librdkafka_gyp_librdkafka_target_build_dependencies_outputs) - -# Preserve order dependency of special output on deps. -$(action_deps_librdkafka_gyp_librdkafka_target_configure_outputs) $(action_deps_librdkafka_gyp_librdkafka_target_build_dependencies_outputs): | - -$(obj).target/deps/librdkafka.stamp: TOOLSET := $(TOOLSET) -$(obj).target/deps/librdkafka.stamp: FORCE_DO_CMD - $(call do_cmd,touch) - -all_deps += $(obj).target/deps/librdkafka.stamp -# Add target alias -.PHONY: librdkafka -librdkafka: $(obj).target/deps/librdkafka.stamp - -# Add target alias to "all" target. -.PHONY: all -all: librdkafka - diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/pkgconfig/rdkafka++-static.pc b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/pkgconfig/rdkafka++-static.pc deleted file mode 100644 index ff296db7..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/pkgconfig/rdkafka++-static.pc +++ /dev/null @@ -1,10 +0,0 @@ -prefix=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/deps -libdir=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/deps -includedir=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/deps/include - -Name: librdkafka++-static -Description: The Apache Kafka C/C++ library (static) -Version: 1.8.2 -Requires: rdkafka-static -Cflags: -I${includedir} -Libs: -L${libdir} ${pc_sysrootdir}${libdir}/librdkafka++.a -lm -ldl -lpthread -lrt diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/pkgconfig/rdkafka++.pc b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/pkgconfig/rdkafka++.pc deleted file mode 100644 index 5c47ede6..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/pkgconfig/rdkafka++.pc +++ /dev/null @@ -1,11 +0,0 @@ -prefix=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/deps -libdir=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/deps -includedir=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/deps/include - -Name: librdkafka++ -Description: The Apache Kafka C/C++ library -Version: 1.8.2 -Requires.private: rdkafka -Cflags: -I${includedir} -Libs: -L${libdir} -lrdkafka++ -Libs.private: -lm -ldl -lpthread -lrt diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/pkgconfig/rdkafka-static.pc b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/pkgconfig/rdkafka-static.pc deleted file mode 100644 index 6f2058e7..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/pkgconfig/rdkafka-static.pc +++ /dev/null @@ -1,10 +0,0 @@ -prefix=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/deps -libdir=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/deps -includedir=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/deps/include - -Name: librdkafka-static -Description: The Apache Kafka C/C++ library (static) -Version: 1.8.2 -Requires: zlib libcrypto libssl -Cflags: -I${includedir} -Libs: -L${libdir} ${pc_sysrootdir}${libdir}/librdkafka.a -lm -ldl -lpthread -lrt diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/pkgconfig/rdkafka.pc b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/pkgconfig/rdkafka.pc deleted file mode 100644 index f1564e03..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/pkgconfig/rdkafka.pc +++ /dev/null @@ -1,11 +0,0 @@ -prefix=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/deps -libdir=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/deps -includedir=/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka/build/deps/include - -Name: librdkafka -Description: The Apache Kafka C/C++ library -Version: 1.8.2 -Requires.private: zlib libcrypto libssl -Cflags: -I${includedir} -Libs: -L${libdir} -lrdkafka -Libs.private: -lm -ldl -lpthread -lrt diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/CHANGELOG.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/CHANGELOG.md deleted file mode 100755 index 0387cd0e..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/CHANGELOG.md +++ /dev/null @@ -1,776 +0,0 @@ -# librdkafka v1.8.2 - -librdkafka v1.8.2 is a maintenance release. - -## Enhancements - - * Added `ssl.ca.pem` to add CA certificate by PEM string. (#2380) - * Prebuilt binaries for Mac OSX now contain statically linked OpenSSL v1.1.1l. - Previously the OpenSSL version was either v1.1.1 or v1.0.2 depending on - build type. - -## Fixes - - * The `librdkafka.redist` 1.8.0 package had two flaws: - - the linux-arm64 .so build was a linux-x64 build. - - the included Windows MSVC 140 runtimes for x64 were infact x86. - The release script has been updated to verify the architectures of - provided artifacts to avoid this happening in the future. - * Prebuilt binaries for Mac OSX Sierra (10.12) and older are no longer provided. - This affects [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go). - * Some of the prebuilt binaries for Linux were built on Ubuntu 14.04, - these builds are now performed on Ubuntu 16.04 instead. - This may affect users on ancient Linux distributions. - * It was not possible to configure `ssl.ca.location` on OSX, the property - would automatically revert back to `probe` (default value). - This regression was introduced in v1.8.0. (#3566) - * librdkafka's internal timers would not start if the timeout was set to 0, - which would result in some timeout operations not being enforced correctly, - e.g., the transactional producer API timeouts. - These timers are now started with a timeout of 1 microsecond. - -### Transactional producer fixes - - * Upon quick repeated leader changes the transactional producer could receive - an `OUT_OF_ORDER_SEQUENCE` error from the broker, which triggered an - Epoch bump on the producer resulting in an InitProducerIdRequest being sent - to the transaction coordinator in the middle of a transaction. - This request would start a new transaction on the coordinator, but the - producer would still think (erroneously) it was in current transaction. - Any messages produced in the current transaction prior to this event would - be silently lost when the application committed the transaction, leading - to message loss. - This has been fixed by setting the Abortable transaction error state - in the producer. #3575. - * The transactional producer could stall during a transaction if the transaction - coordinator changed while adding offsets to the transaction (send_offsets_to_transaction()). - This stall lasted until the coordinator connection went down, the - transaction timed out, transaction was aborted, or messages were produced - to a new partition, whichever came first. #3571. - - - -*Note: there was no v1.8.1 librdkafka release* - - -# librdkafka v1.8.0 - -librdkafka v1.8.0 is a security release: - - * Upgrade bundled zlib version from 1.2.8 to 1.2.11 in the `librdkafka.redist` - NuGet package. The updated zlib version fixes CVEs: - CVE-2016-9840, CVE-2016-9841, CVE-2016-9842, CVE-2016-9843 - See https://github.com/edenhill/librdkafka/issues/2934 for more information. - * librdkafka now uses [vcpkg](https://vcpkg.io/) for up-to-date Windows - dependencies in the `librdkafka.redist` NuGet package: - OpenSSL 1.1.1l, zlib 1.2.11, zstd 1.5.0. - * The upstream dependency (OpenSSL, zstd, zlib) source archive checksums are - now verified when building with `./configure --install-deps`. - These builds are used by the librdkafka builds bundled with - confluent-kafka-go, confluent-kafka-python and confluent-kafka-dotnet. - - -## Enhancements - - * Producer `flush()` now overrides the `linger.ms` setting for the duration - of the `flush()` call, effectively triggering immediate transmission of - queued messages. (#3489) - -## Fixes - -### General fixes - - * Correctly detect presence of zlib via compilation check. (Chris Novakovic) - * `ERR__ALL_BROKERS_DOWN` is no longer emitted when the coordinator - connection goes down, only when all standard named brokers have been tried. - This fixes the issue with `ERR__ALL_BROKERS_DOWN` being triggered on - `consumer_close()`. It is also now only emitted if the connection was fully - up (past handshake), and not just connected. - * `rd_kafka_query_watermark_offsets()`, `rd_kafka_offsets_for_times()`, - `consumer_lag` metric, and `auto.offset.reset` now honour - `isolation.level` and will return the Last Stable Offset (LSO) - when `isolation.level` is set to `read_committed` (default), rather than - the uncommitted high-watermark when it is set to `read_uncommitted`. (#3423) - * SASL GSSAPI is now usable when `sasl.kerberos.min.time.before.relogin` - is set to 0 - which disables ticket refreshes (by @mpekalski, #3431). - * Rename internal crc32c() symbol to rd_crc32c() to avoid conflict with - other static libraries (#3421). - * `txidle` and `rxidle` in the statistics object was emitted as 18446744073709551615 when no idle was known. -1 is now emitted instead. (#3519) - - -### Consumer fixes - - * Automatically retry offset commits on `ERR_REQUEST_TIMED_OUT`, - `ERR_COORDINATOR_NOT_AVAILABLE`, and `ERR_NOT_COORDINATOR` (#3398). - Offset commits will be retried twice. - * Timed auto commits did not work when only using assign() and not subscribe(). - This regression was introduced in v1.7.0. - * If the topics matching the current subscription changed (or the application - updated the subscription) while there was an outstanding JoinGroup or - SyncGroup request, an additional request would sometimes be sent before - handling the response of the first. This in turn lead to internal state - issues that could cause a crash or malbehaviour. - The consumer will now wait for any outstanding JoinGroup or SyncGroup - responses before re-joining the group. - * `auto.offset.reset` could previously be triggered by temporary errors, - such as disconnects and timeouts (after the two retries are exhausted). - This is now fixed so that the auto offset reset policy is only triggered - for permanent errors. - * The error that triggers `auto.offset.reset` is now logged to help the - application owner identify the reason of the reset. - * If a rebalance takes longer than a consumer's `session.timeout.ms`, the - consumer will remain in the group as long as it receives heartbeat responses - from the broker. - - -### Admin fixes - - * `DeleteRecords()` could crash if one of the underlying requests - (for a given partition leader) failed at the transport level (e.g., timeout). - (#3476). - - - -# librdkafka v1.7.0 - -librdkafka v1.7.0 is feature release: - - * [KIP-360](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=89068820) - Improve reliability of transactional producer. - Requires Apache Kafka 2.5 or later. - * OpenSSL Engine support (`ssl.engine.location`) by @adinigam and @ajbarb. - - -## Enhancements - - * Added `connections.max.idle.ms` to automatically close idle broker - connections. - This feature is disabled by default unless `bootstrap.servers` contains - the string `azure` in which case the default is set to <4 minutes to improve - connection reliability and circumvent limitations with the Azure load - balancers (see #3109 for more information). - * Bumped to OpenSSL 1.1.1k in binary librdkafka artifacts. - * The binary librdkafka artifacts for Alpine are now using Alpine 3.12. - OpenSSL 1.1.1k. - * Improved static librdkafka Windows builds using MinGW (@neptoess, #3130). - * The `librdkafka.redist` NuGet package now has updated zlib, zstd and - OpenSSL versions (from vcpkg). - - -## Security considerations - - * The zlib version bundled with the `librdkafka.redist` NuGet package has now been upgraded - from zlib 1.2.8 to 1.2.11, fixing the following CVEs: - * CVE-2016-9840: undefined behaviour (compiler dependent) in inflate (decompression) code: this is used by the librdkafka consumer. Risk of successfully exploitation through consumed messages is eastimated very low. - * CVE-2016-9841: undefined behaviour (compiler dependent) in inflate code: this is used by the librdkafka consumer. Risk of successfully exploitation through consumed messages is eastimated very low. - * CVE-2016-9842: undefined behaviour in inflateMark(): this API is not used by librdkafka. - * CVE-2016-9843: issue in crc32_big() which is called from crc32_z(): this API is not used by librdkafka. - -## Upgrade considerations - - * The C++ `oauthbearer_token_refresh_cb()` was missing a `Handle *` - argument that has now been added. This is a breaking change but the original - function signature is considered a bug. - This change only affects C++ OAuth developers. - * [KIP-735](https://cwiki.apache.org/confluence/display/KAFKA/KIP-735%3A+Increase+default+consumer+session+timeout) The consumer `session.timeout.ms` - default was changed from 10 to 45 seconds to make consumer groups more - robust and less sensitive to temporary network and cluster issues. - * Statistics: `consumer_lag` is now using the `committed_offset`, - while the new `consumer_lag_stored` is using `stored_offset` - (offset to be committed). - This is more correct than the previous `consumer_lag` which was using - either `committed_offset` or `app_offset` (last message passed - to application). - * The `librdkafka.redist` NuGet package is now built with MSVC runtime v140 - (VS 2015). Previous versions were built with MSVC runtime v120 (VS 2013). - - -## Fixes - -### General fixes - - * Fix accesses to freed metadata cache mutexes on client termination (#3279) - * There was a race condition on receiving updated metadata where a broker id - update (such as bootstrap to proper broker transformation) could finish after - the topic metadata cache was updated, leading to existing brokers seemingly - being not available. - One occurrence of this issue was query_watermark_offsets() that could return - `ERR__UNKNOWN_PARTITION` for existing partitions shortly after the - client instance was created. - * The OpenSSL context is now initialized with `TLS_client_method()` - (on OpenSSL >= 1.1.0) instead of the deprecated and outdated - `SSLv23_client_method()`. - * The initial cluster connection on client instance creation could sometimes - be delayed up to 1 second if a `group.id` or `transactional.id` - was configured (#3305). - * Speed up triggering of new broker connections in certain cases by exiting - the broker thread io/op poll loop when a wakeup op is received. - * SASL GSSAPI: The Kerberos kinit refresh command was triggered from - `rd_kafka_new()` which made this call blocking if the refresh command - was taking long. The refresh is now performed by the background rdkafka - main thread. - * Fix busy-loop (100% CPU on the broker threads) during the handshake phase - of an SSL connection. - * Disconnects during SSL handshake are now propagated as transport errors - rather than SSL errors, since these disconnects are at the transport level - (e.g., incorrect listener, flaky load balancer, etc) and not due to SSL - issues. - * Increment metadata fast refresh interval backoff exponentially (@ajbarb, #3237). - * Unthrottled requests are no longer counted in the `brokers[].throttle` - statistics object. - * Log CONFWARN warning when global topic configuration properties - are overwritten by explicitly setting a `default_topic_conf`. - -### Consumer fixes - - * If a rebalance happened during a `consume_batch..()` call the already - accumulated messages for revoked partitions were not purged, which would - pass messages to the application for partitions that were no longer owned - by the consumer. Fixed by @jliunyu. #3340. - * Fix balancing and reassignment issues with the cooperative-sticky assignor. - #3306. - * Fix incorrect detection of first rebalance in sticky assignor (@hallfox). - * Aborted transactions with no messages produced to a partition could - cause further successfully committed messages in the same Fetch response to - be ignored, resulting in consumer-side message loss. - A log message along the lines `Abort txn ctrl msg bad order at offset - 7501: expected before or at 7702: messages in aborted transactions may be delivered to the application` - would be seen. - This is a rare occurrence where a transactional producer would register with - the partition but not produce any messages before aborting the transaction. - * The consumer group deemed cached metadata up to date by checking - `topic.metadata.refresh.interval.ms`: if this property was set too low - it would cause cached metadata to be unusable and new metadata to be fetched, - which could delay the time it took for a rebalance to settle. - It now correctly uses `metadata.max.age.ms` instead. - * The consumer group timed auto commit would attempt commits during rebalances, - which could result in "Illegal generation" errors. This is now fixed, the - timed auto committer is only employed in the steady state when no rebalances - are taking places. Offsets are still auto committed when partitions are - revoked. - * Retriable FindCoordinatorRequest errors are no longer propagated to - the application as they are retried automatically. - * Fix rare crash (assert `rktp_started`) on consumer termination - (introduced in v1.6.0). - * Fix unaligned access and possibly corrupted snappy decompression when - building with MSVC (@azat) - * A consumer configured with the `cooperative-sticky` assignor did - not actively Leave the group on unsubscribe(). This delayed the - rebalance for the remaining group members by up to `session.timeout.ms`. - * The current subscription list was sometimes leaked when unsubscribing. - -### Producer fixes - - * The timeout value of `flush()` was not respected when delivery reports - were scheduled as events (such as for confluent-kafka-go) rather than - callbacks. - * There was a race conditition in `purge()` which could cause newly - created partition objects, or partitions that were changing leaders, to - not have their message queues purged. This could cause - `abort_transaction()` to time out. This issue is now fixed. - * In certain high-thruput produce rate patterns producing could stall for - 1 second, regardless of `linger.ms`, due to rate-limiting of internal - queue wakeups. This is now fixed by not rate-limiting queue wakeups but - instead limiting them to one wakeup per queue reader poll. #2912. - -### Transactional Producer fixes - - * KIP-360: Fatal Idempotent producer errors are now recoverable by the - transactional producer and will raise a `txn_requires_abort()` error. - * If the cluster went down between `produce()` and `commit_transaction()` - and before any partitions had been registered with the coordinator, the - messages would time out but the commit would succeed because nothing - had been sent to the coordinator. This is now fixed. - * If the current transaction failed while `commit_transaction()` was - checking the current transaction state an invalid state transaction could - occur which in turn would trigger a assertion crash. - This issue showed up as "Invalid txn state transition: .." crashes, and is - now fixed by properly synchronizing both checking and transition of state. - - - -# librdkafka v1.6.1 - -librdkafka v1.6.1 is a maintenance release. - -## Upgrade considerations - - * Fatal idempotent producer errors are now also fatal to the transactional - producer. This is a necessary step to maintain data integrity prior to - librdkafka supporting KIP-360. Applications should check any transactional - API errors for the is_fatal flag and decommission the transactional producer - if the flag is set. - * The consumer error raised by `auto.offset.reset=error` now has error-code - set to `ERR__AUTO_OFFSET_RESET` to allow an application to differentiate - between auto offset resets and other consumer errors. - - -## Fixes - -### General fixes - - * Admin API and transactional `send_offsets_to_transaction()` coordinator - requests, such as TxnOffsetCommitRequest, could in rare cases be sent - multiple times which could cause a crash. - * `ssl.ca.location=probe` is now enabled by default on Mac OSX since the - librdkafka-bundled OpenSSL might not have the same default CA search paths - as the system or brew installed OpenSSL. Probing scans all known locations. - -### Transactional Producer fixes - - * Fatal idempotent producer errors are now also fatal to the transactional - producer. - * The transactional producer could crash if the transaction failed while - `send_offsets_to_transaction()` was called. - * Group coordinator requests for transactional - `send_offsets_to_transaction()` calls would leak memory if the - underlying request was attempted to be sent after the transaction had - failed. - * When gradually producing to multiple partitions (resulting in multiple - underlying AddPartitionsToTxnRequests) subsequent partitions could get - stuck in pending state under certain conditions. These pending partitions - would not send queued messages to the broker and eventually trigger - message timeouts, failing the current transaction. This is now fixed. - * Committing an empty transaction (no messages were produced and no - offsets were sent) would previously raise a fatal error due to invalid state - on the transaction coordinator. We now allow empty/no-op transactions to - be committed. - -### Consumer fixes - - * The consumer will now retry indefinitely (or until the assignment is changed) - to retrieve committed offsets. This fixes the issue where only two retries - were attempted when outstanding transactions were blocking OffsetFetch - requests with `ERR_UNSTABLE_OFFSET_COMMIT`. #3265 - - - - - -# librdkafka v1.6.0 - -librdkafka v1.6.0 is feature release: - - * [KIP-429 Incremental rebalancing](https://cwiki.apache.org/confluence/display/KAFKA/KIP-429%3A+Kafka+Consumer+Incremental+Rebalance+Protocol) with sticky - consumer group partition assignor (KIP-54) (by @mhowlett). - * [KIP-480 Sticky producer partitioning](https://cwiki.apache.org/confluence/display/KAFKA/KIP-480%3A+Sticky+Partitioner) (`sticky.partitioning.linger.ms`) - - achieves higher throughput and lower latency through sticky selection - of random partition (by @abbycriswell). - * AdminAPI: Add support for `DeleteRecords()`, `DeleteGroups()` and - `DeleteConsumerGroupOffsets()` (by @gridaphobe) - * [KIP-447 Producer scalability for exactly once semantics](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics) - - allows a single transactional producer to be used for multiple input - partitions. Requires Apache Kafka 2.5 or later. - * Transactional producer fixes and improvements, see **Transactional Producer fixes** below. - * The [librdkafka.redist](https://www.nuget.org/packages/librdkafka.redist/) - NuGet package now supports Linux ARM64/Aarch64. - - -## Upgrade considerations - - * Sticky producer partitioning (`sticky.partitioning.linger.ms`) is - enabled by default (10 milliseconds) which affects the distribution of - randomly partitioned messages, where previously these messages would be - evenly distributed over the available partitions they are now partitioned - to a single partition for the duration of the sticky time - (10 milliseconds by default) before a new random sticky partition - is selected. - * The new KIP-447 transactional producer scalability guarantees are only - supported on Apache Kafka 2.5 or later, on earlier releases you will - need to use one producer per input partition for EOS. This limitation - is not enforced by the producer or broker. - * Error handling for the transactional producer has been improved, see - the **Transactional Producer fixes** below for more information. - - -## Known issues - - * The Transactional Producer's API timeout handling is inconsistent with the - underlying protocol requests, it is therefore strongly recommended that - applications call `rd_kafka_commit_transaction()` and - `rd_kafka_abort_transaction()` with the `timeout_ms` parameter - set to `-1`, which will use the remaining transaction timeout. - - -## Enhancements - - * KIP-107, KIP-204: AdminAPI: Added `DeleteRecords()` (by @gridaphobe). - * KIP-229: AdminAPI: Added `DeleteGroups()` (by @gridaphobe). - * KIP-496: AdminAPI: Added `DeleteConsumerGroupOffsets()`. - * KIP-464: AdminAPI: Added support for broker-side default partition count - and replication factor for `CreateTopics()`. - * Windows: Added `ssl.ca.certificate.stores` to specify a list of - Windows Certificate Stores to read CA certificates from, e.g., - `CA,Root`. `Root` remains the default store. - * Use reentrant `rand_r()` on supporting platforms which decreases lock - contention (@azat). - * Added `assignor` debug context for troubleshooting consumer partition - assignments. - * Updated to OpenSSL v1.1.1i when building dependencies. - * Update bundled lz4 (used when `./configure --disable-lz4-ext`) to v1.9.3 - which has vast performance improvements. - * Added `rd_kafka_conf_get_default_topic_conf()` to retrieve the - default topic configuration object from a global configuration object. - * Added `conf` debugging context to `debug` - shows set configuration - properties on client and topic instantiation. Sensitive properties - are redacted. - * Added `rd_kafka_queue_yield()` to cancel a blocking queue call. - * Will now log a warning when multiple ClusterIds are seen, which is an - indication that the client might be erroneously configured to connect to - multiple clusters which is not supported. - * Added `rd_kafka_seek_partitions()` to seek multiple partitions to - per-partition specific offsets. - - -## Fixes - -### General fixes - - * Fix a use-after-free crash when certain coordinator requests were retried. - * The C++ `oauthbearer_set_token()` function would call `free()` on - a `new`-created pointer, possibly leading to crashes or heap corruption (#3194) - -### Consumer fixes - - * The consumer assignment and consumer group implementations have been - decoupled, simplified and made more strict and robust. This will sort out - a number of edge cases for the consumer where the behaviour was previously - undefined. - * Partition fetch state was not set to STOPPED if OffsetCommit failed. - * The session timeout is now enforced locally also when the coordinator - connection is down, which was not previously the case. - - -### Transactional Producer fixes - - * Transaction commit or abort failures on the broker, such as when the - producer was fenced by a newer instance, were not propagated to the - application resulting in failed commits seeming successful. - This was a critical race condition for applications that had a delay after - producing messages (or sendings offsets) before committing or - aborting the transaction. This issue has now been fixed and test coverage - improved. - * The transactional producer API would return `RD_KAFKA_RESP_ERR__STATE` - when API calls were attempted after the transaction had failed, we now - try to return the error that caused the transaction to fail in the first - place, such as `RD_KAFKA_RESP_ERR__FENCED` when the producer has - been fenced, or `RD_KAFKA_RESP_ERR__TIMED_OUT` when the transaction - has timed out. - * Transactional producer retry count for transactional control protocol - requests has been increased from 3 to infinite, retriable errors - are now automatically retried by the producer until success or the - transaction timeout is exceeded. This fixes the case where - `rd_kafka_send_offsets_to_transaction()` would fail the current - transaction into an abortable state when `CONCURRENT_TRANSACTIONS` was - returned by the broker (which is a transient error) and the 3 retries - were exhausted. - - -### Producer fixes - - * Calling `rd_kafka_topic_new()` with a topic config object with - `message.timeout.ms` set could sometimes adjust the global `linger.ms` - property (if not explicitly configured) which was not desired, this is now - fixed and the auto adjustment is only done based on the - `default_topic_conf` at producer creation. - * `rd_kafka_flush()` could previously return `RD_KAFKA_RESP_ERR__TIMED_OUT` - just as the timeout was reached if the messages had been flushed but - there were now no more messages. This has been fixed. - - - - -# librdkafka v1.5.3 - -librdkafka v1.5.3 is a maintenance release. - -## Upgrade considerations - - * CentOS 6 is now EOL and is no longer included in binary librdkafka packages, - such as NuGet. - -## Fixes - -### General fixes - - * Fix a use-after-free crash when certain coordinator requests were retried. - * Coordinator requests could be left uncollected on instance destroy which - could lead to hang. - * Fix rare 1 second stalls by forcing rdkafka main thread wakeup when a new - next-timer-to-be-fired is scheduled. - * Fix additional cases where broker-side automatic topic creation might be - triggered unexpectedly. - * AdminAPI: The operation_timeout (on-broker timeout) previously defaulted to 0, - but now defaults to `socket.timeout.ms` (60s). - * Fix possible crash for Admin API protocol requests that fail at the - transport layer or prior to sending. - - -### Consumer fixes - - * Consumer would not filter out messages for aborted transactions - if the messages were compressed (#3020). - * Consumer destroy without prior `close()` could hang in certain - cgrp states (@gridaphobe, #3127). - * Fix possible null dereference in `Message::errstr()` (#3140). - * The `roundrobin` partition assignment strategy could get stuck in an - endless loop or generate uneven assignments in case the group members - had asymmetric subscriptions (e.g., c1 subscribes to t1,t2 while c2 - subscribes to t2,t3). (#3159) - * Mixing committed and logical or absolute offsets in the partitions - passed to `rd_kafka_assign()` would in previous released ignore the - logical or absolute offsets and use the committed offsets for all partitions. - This is now fixed. (#2938) - - - - -# librdkafka v1.5.2 - -librdkafka v1.5.2 is a maintenance release. - - -## Upgrade considerations - - * The default value for the producer configuration property `retries` has - been increased from 2 to infinity, effectively limiting Produce retries to - only `message.timeout.ms`. - As the reasons for the automatic internal retries vary (various broker error - codes as well as transport layer issues), it doesn't make much sense to limit - the number of retries for retriable errors, but instead only limit the - retries based on the allowed time to produce a message. - * The default value for the producer configuration property - `request.timeout.ms` has been increased from 5 to 30 seconds to match - the Apache Kafka Java producer default. - This change yields increased robustness for broker-side congestion. - - -## Enhancements - - * The generated `CONFIGURATION.md` (through `rd_kafka_conf_properties_show())`) - now include all properties and values, regardless if they were included in - the build, and setting a disabled property or value through - `rd_kafka_conf_set()` now returns `RD_KAFKA_CONF_INVALID` and provides - a more useful error string saying why the property can't be set. - * Consumer configs on producers and vice versa will now be logged with - warning messages on client instantiation. - -## Fixes - -### Security fixes - - * There was an incorrect call to zlib's `inflateGetHeader()` with - unitialized memory pointers that could lead to the GZIP header of a fetched - message batch to be copied to arbitrary memory. - This function call has now been completely removed since the result was - not used. - Reported by Ilja van Sprundel. - - -### General fixes - - * `rd_kafka_topic_opaque()` (used by the C++ API) would cause object - refcounting issues when used on light-weight (error-only) topic objects - such as consumer errors (#2693). - * Handle name resolution failures when formatting IP addresses in error logs, - and increase printed hostname limit to ~256 bytes (was ~60). - * Broker sockets would be closed twice (thus leading to potential race - condition with fd-reuse in other threads) if a custom `socket_cb` would - return error. - -### Consumer fixes - - * The `roundrobin` `partition.assignment.strategy` could crash (assert) - for certain combinations of members and partitions. - This is a regression in v1.5.0. (#3024) - * The C++ `KafkaConsumer` destructor did not destroy the underlying - C `rd_kafka_t` instance, causing a leak if `close()` was not used. - * Expose rich error strings for C++ Consumer `Message->errstr()`. - * The consumer could get stuck if an outstanding commit failed during - rebalancing (#2933). - * Topic authorization errors during fetching are now reported only once (#3072). - -### Producer fixes - - * Topic authorization errors are now properly propagated for produced messages, - both through delivery reports and as `ERR_TOPIC_AUTHORIZATION_FAILED` - return value from `produce*()` (#2215) - * Treat cluster authentication failures as fatal in the transactional - producer (#2994). - * The transactional producer code did not properly reference-count partition - objects which could in very rare circumstances lead to a use-after-free bug - if a topic was deleted from the cluster when a transaction was using it. - * `ERR_KAFKA_STORAGE_ERROR` is now correctly treated as a retriable - produce error (#3026). - * Messages that timed out locally would not fail the ongoing transaction. - If the application did not take action on failed messages in its delivery - report callback and went on to commit the transaction, the transaction would - be successfully committed, simply omitting the failed messages. - * EndTxnRequests (sent on commit/abort) are only retried in allowed - states (#3041). - Previously the transaction could hang on commit_transaction() if an abortable - error was hit and the EndTxnRequest was to be retried. - - -*Note: there was no v1.5.1 librdkafka release* - - - - -# librdkafka v1.5.0 - -The v1.5.0 release brings usability improvements, enhancements and fixes to -librdkafka. - -## Enhancements - - * Improved broker connection error reporting with more useful information and - hints on the cause of the problem. - * Consumer: Propagate errors when subscribing to unavailable topics (#1540) - * Producer: Add `batch.size` producer configuration property (#638) - * Add `topic.metadata.propagation.max.ms` to allow newly manually created - topics to be propagated throughout the cluster before reporting them - as non-existent. This fixes race issues where CreateTopics() is - quickly followed by produce(). - * Prefer least idle connection for periodic metadata refreshes, et.al., - to allow truly idle connections to time out and to avoid load-balancer-killed - idle connection errors (#2845) - * Added `rd_kafka_event_debug_contexts()` to get the debug contexts for - a debug log line (by @wolfchimneyrock). - * Added Test scenarios which define the cluster configuration. - * Added MinGW-w64 builds (@ed-alertedh, #2553) - * `./configure --enable-XYZ` now requires the XYZ check to pass, - and `--disable-XYZ` disables the feature altogether (@benesch) - * Added `rd_kafka_produceva()` which takes an array of produce arguments - for situations where the existing `rd_kafka_producev()` va-arg approach - can't be used. - * Added `rd_kafka_message_broker_id()` to see the broker that a message - was produced or fetched from, or an error was associated with. - * Added RTT/delay simulation to mock brokers. - - -## Upgrade considerations - - * Subscribing to non-existent and unauthorized topics will now propagate - errors `RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART` and - `RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED` to the application through - the standard consumer error (the err field in the message object). - * Consumer will no longer trigger auto creation of topics, - `allow.auto.create.topics=true` may be used to re-enable the old deprecated - functionality. - * The default consumer pre-fetch queue threshold `queued.max.messages.kbytes` - has been decreased from 1GB to 64MB to avoid excessive network usage for low - and medium throughput consumer applications. High throughput consumer - applications may need to manually set this property to a higher value. - * The default consumer Fetch wait time has been increased from 100ms to 500ms - to avoid excessive network usage for low throughput topics. - * If OpenSSL is linked statically, or `ssl.ca.location=probe` is configured, - librdkafka will probe known CA certificate paths and automatically use the - first one found. This should alleviate the need to configure - `ssl.ca.location` when the statically linked OpenSSL's OPENSSLDIR differs - from the system's CA certificate path. - * The heuristics for handling Apache Kafka < 0.10 brokers has been removed to - improve connection error handling for modern Kafka versions. - Users on Brokers 0.9.x or older should already be configuring - `api.version.request=false` and `broker.version.fallback=...` so there - should be no functional change. - * The default producer batch accumulation time, `linger.ms`, has been changed - from 0.5ms to 5ms to improve batch sizes and throughput while reducing - the per-message protocol overhead. - Applications that require lower produce latency than 5ms will need to - manually set `linger.ms` to a lower value. - * librdkafka's build tooling now requires Python 3.x (python3 interpreter). - - -## Fixes - -### General fixes - - * The client could crash in rare circumstances on ApiVersion or - SaslHandshake request timeouts (#2326) - * `./configure --LDFLAGS='a=b, c=d'` with arguments containing = are now - supported (by @sky92zwq). - * `./configure` arguments now take precedence over cached `configure` variables - from previous invocation. - * Fix theoretical crash on coord request failure. - * Unknown partition error could be triggered for existing partitions when - additional partitions were added to a topic (@benesch, #2915) - * Quickly refresh topic metadata for desired but non-existent partitions. - This will speed up the initial discovery delay when new partitions are added - to an existing topic (#2917). - - -### Consumer fixes - - * The roundrobin partition assignor could crash if subscriptions - where asymmetrical (different sets from different members of the group). - Thanks to @ankon and @wilmai for identifying the root cause (#2121). - * The consumer assignors could ignore some topics if there were more subscribed - topics than consumers in taking part in the assignment. - * The consumer would connect to all partition leaders of a topic even - for partitions that were not being consumed (#2826). - * Initial consumer group joins should now be a couple of seconds quicker - thanks expedited query intervals (@benesch). - * Fix crash and/or inconsistent subscriptions when using multiple consumers - (in the same process) with wildcard topics on Windows. - * Don't propagate temporary offset lookup errors to application. - * Immediately refresh topic metadata when partitions are reassigned to other - brokers, avoiding a fetch stall of up to `topic.metadata.refresh.interval.ms`. (#2955) - * Memory for batches containing control messages would not be freed when - using the batch consume APIs (@pf-qiu, #2990). - - -### Producer fixes - - * Proper locking for transaction state in EndTxn handler. - - - -# librdkafka v1.4.4 - -v1.4.4 is a maintenance release with the following fixes and enhancements: - - * Transactional producer could crash on request timeout due to dereferencing - NULL pointer of non-existent response object. - * Mark `rd_kafka_send_offsets_to_transaction()` CONCURRENT_TRANSACTION (et.al) - errors as retriable. - * Fix crash on transactional coordinator FindCoordinator request failure. - * Minimize broker re-connect delay when broker's connection is needed to - send requests. - * Proper locking for transaction state in EndTxn handler. - * `socket.timeout.ms` was ignored when `transactional.id` was set. - * Added RTT/delay simulation to mock brokers. - -*Note: there was no v1.4.3 librdkafka release* - - - -# librdkafka v1.4.2 - -v1.4.2 is a maintenance release with the following fixes and enhancements: - - * Fix produce/consume hang after partition goes away and comes back, - such as when a topic is deleted and re-created. - * Consumer: Reset the stored offset when partitions are un-assign()ed (fixes #2782). - This fixes the case where a manual offset-less commit() or the auto-committer - would commit a stored offset from a previous assignment before - a new message was consumed by the application. - * Probe known CA cert paths and set default `ssl.ca.location` accordingly - if OpenSSL is statically linked or `ssl.ca.location` is set to `probe`. - * Per-partition OffsetCommit errors were unhandled (fixes #2791) - * Seed the PRNG (random number generator) by default, allow application to - override with `enable.random.seed=false` (#2795) - * Fix stack overwrite (of 1 byte) when SaslHandshake MechCnt is zero - * Align bundled c11 threads (tinycthreads) constants to glibc and musl (#2681) - * Fix return value of rd_kafka_test_fatal_error() (by @ckb42) - * Ensure CMake sets disabled defines to zero on Windows (@benesch) - - -*Note: there was no v1.4.1 librdkafka release* - - - - - -# Older releases - -See https://github.com/edenhill/librdkafka/releases diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/CONFIGURATION.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/CONFIGURATION.md deleted file mode 100755 index aea22534..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/CONFIGURATION.md +++ /dev/null @@ -1,174 +0,0 @@ -# Configuration properties -## Global configuration properties - -Property | C/P | Range | Default | Importance | Description ------------------------------------------|-----|-----------------|--------------:|------------| -------------------------- -builtin.features | * | | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins, zstd, sasl_oauthbearer | low | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support.
*Type: CSV flags* -client.id | * | | rdkafka | low | Client identifier.
*Type: string* -metadata.broker.list | * | | | high | Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* -bootstrap.servers | * | | | high | Alias for `metadata.broker.list`: Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* -message.max.bytes | * | 1000 .. 1000000000 | 1000000 | medium | Maximum Kafka protocol request message size. Due to differing framing overhead between protocol versions the producer is unable to reliably enforce a strict max message limit at produce time and may exceed the maximum size by one message in protocol ProduceRequests, the broker will enforce the the topic's `max.message.bytes` limit (see Apache Kafka documentation).
*Type: integer* -message.copy.max.bytes | * | 0 .. 1000000000 | 65535 | low | Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs.
*Type: integer* -receive.message.max.bytes | * | 1000 .. 2147483647 | 100000000 | medium | Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value must be at least `fetch.max.bytes` + 512 to allow for protocol overhead; the value is adjusted automatically unless the configuration property is explicitly set.
*Type: integer* -max.in.flight.requests.per.connection | * | 1 .. 1000000 | 1000000 | low | Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one.
*Type: integer* -max.in.flight | * | 1 .. 1000000 | 1000000 | low | Alias for `max.in.flight.requests.per.connection`: Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one.
*Type: integer* -topic.metadata.refresh.interval.ms | * | -1 .. 3600000 | 300000 | low | Period of time in milliseconds at which topic and broker metadata is refreshed in order to proactively discover any new brokers, topics, partitions or partition leader changes. Use -1 to disable the intervalled refresh (not recommended). If there are no locally referenced topics (no topic objects created, no messages produced, no subscription or no assignment) then only the broker list will be refreshed every interval but no more often than every 10s.
*Type: integer* -metadata.max.age.ms | * | 1 .. 86400000 | 900000 | low | Metadata cache max age. Defaults to topic.metadata.refresh.interval.ms * 3
*Type: integer* -topic.metadata.refresh.fast.interval.ms | * | 1 .. 60000 | 250 | low | When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers.
*Type: integer* -topic.metadata.refresh.fast.cnt | * | 0 .. 1000 | 10 | low | **DEPRECATED** No longer used.
*Type: integer* -topic.metadata.refresh.sparse | * | true, false | true | low | Sparse metadata requests (consumes less network bandwidth)
*Type: boolean* -topic.metadata.propagation.max.ms | * | 0 .. 3600000 | 30000 | low | Apache Kafka topic creation is asynchronous and it takes some time for a new topic to propagate throughout the cluster to all brokers. If a client requests topic metadata after manual topic creation but before the topic has been fully propagated to the broker the client is requesting metadata from, the topic will seem to be non-existent and the client will mark the topic as such, failing queued produced messages with `ERR__UNKNOWN_TOPIC`. This setting delays marking a topic as non-existent until the configured propagation max time has passed. The maximum propagation time is calculated from the time the topic is first referenced in the client, e.g., on produce().
*Type: integer* -topic.blacklist | * | | | low | Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist.
*Type: pattern list* -debug | * | generic, broker, topic, metadata, feature, queue, msg, protocol, cgrp, security, fetch, interceptor, plugin, consumer, admin, eos, mock, assignor, conf, all | | medium | A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch
*Type: CSV flags* -socket.timeout.ms | * | 10 .. 300000 | 60000 | low | Default timeout for network requests. Producer: ProduceRequests will use the lesser value of `socket.timeout.ms` and remaining `message.timeout.ms` for the first message in the batch. Consumer: FetchRequests will use `fetch.wait.max.ms` + `socket.timeout.ms`. Admin: Admin requests will use `socket.timeout.ms` or explicitly set `rd_kafka_AdminOptions_set_operation_timeout()` value.
*Type: integer* -socket.blocking.max.ms | * | 1 .. 60000 | 1000 | low | **DEPRECATED** No longer used.
*Type: integer* -socket.send.buffer.bytes | * | 0 .. 100000000 | 0 | low | Broker socket send buffer size. System default is used if 0.
*Type: integer* -socket.receive.buffer.bytes | * | 0 .. 100000000 | 0 | low | Broker socket receive buffer size. System default is used if 0.
*Type: integer* -socket.keepalive.enable | * | true, false | false | low | Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets
*Type: boolean* -socket.nagle.disable | * | true, false | false | low | Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.
*Type: boolean* -socket.max.fails | * | 0 .. 1000000 | 1 | low | Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. WARNING: It is highly recommended to leave this setting at its default value of 1 to avoid the client and broker to become desynchronized in case of request timeouts. NOTE: The connection is automatically re-established.
*Type: integer* -broker.address.ttl | * | 0 .. 86400000 | 1000 | low | How long to cache the broker address resolving results (milliseconds).
*Type: integer* -broker.address.family | * | any, v4, v6 | any | low | Allowed broker IP address families: any, v4, v6
*Type: enum value* -connections.max.idle.ms | * | 0 .. 2147483647 | 0 | medium | Close broker connections after the specified time of inactivity. Disable with 0. If this property is left at its default value some heuristics are performed to determine a suitable default value, this is currently limited to identifying brokers on Azure (see librdkafka issue #3109 for more info).
*Type: integer* -reconnect.backoff.jitter.ms | * | 0 .. 3600000 | 0 | low | **DEPRECATED** No longer used. See `reconnect.backoff.ms` and `reconnect.backoff.max.ms`.
*Type: integer* -reconnect.backoff.ms | * | 0 .. 3600000 | 100 | medium | The initial time to wait before reconnecting to a broker after the connection has been closed. The time is increased exponentially until `reconnect.backoff.max.ms` is reached. -25% to +50% jitter is applied to each reconnect backoff. A value of 0 disables the backoff and reconnects immediately.
*Type: integer* -reconnect.backoff.max.ms | * | 0 .. 3600000 | 10000 | medium | The maximum time to wait before reconnecting to a broker after the connection has been closed.
*Type: integer* -statistics.interval.ms | * | 0 .. 86400000 | 0 | high | librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics.
*Type: integer* -enabled_events | * | 0 .. 2147483647 | 0 | low | See `rd_kafka_conf_set_events()`
*Type: integer* -error_cb | * | | | low | Error callback (set with rd_kafka_conf_set_error_cb())
*Type: see dedicated API* -throttle_cb | * | | | low | Throttle callback (set with rd_kafka_conf_set_throttle_cb())
*Type: see dedicated API* -stats_cb | * | | | low | Statistics callback (set with rd_kafka_conf_set_stats_cb())
*Type: see dedicated API* -log_cb | * | | | low | Log callback (set with rd_kafka_conf_set_log_cb())
*Type: see dedicated API* -log_level | * | 0 .. 7 | 6 | low | Logging level (syslog(3) levels)
*Type: integer* -log.queue | * | true, false | false | low | Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set.
*Type: boolean* -log.thread.name | * | true, false | true | low | Print internal thread name in log messages (useful for debugging librdkafka internals)
*Type: boolean* -enable.random.seed | * | true, false | true | low | If enabled librdkafka will initialize the PRNG with srand(current_time.milliseconds) on the first invocation of rd_kafka_new() (required only if rand_r() is not available on your platform). If disabled the application must call srand() prior to calling rd_kafka_new().
*Type: boolean* -log.connection.close | * | true, false | true | low | Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connection.max.idle.ms` value.
*Type: boolean* -background_event_cb | * | | | low | Background queue event callback (set with rd_kafka_conf_set_background_event_cb())
*Type: see dedicated API* -socket_cb | * | | | low | Socket creation callback to provide race-free CLOEXEC
*Type: see dedicated API* -connect_cb | * | | | low | Socket connect callback
*Type: see dedicated API* -closesocket_cb | * | | | low | Socket close callback
*Type: see dedicated API* -open_cb | * | | | low | File open callback to provide race-free CLOEXEC
*Type: see dedicated API* -opaque | * | | | low | Application opaque (set with rd_kafka_conf_set_opaque())
*Type: see dedicated API* -default_topic_conf | * | | | low | Default topic configuration for automatically subscribed topics
*Type: see dedicated API* -internal.termination.signal | * | 0 .. 128 | 0 | low | Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed.
*Type: integer* -api.version.request | * | true, false | true | high | Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used.
*Type: boolean* -api.version.request.timeout.ms | * | 1 .. 300000 | 10000 | low | Timeout for broker API version requests.
*Type: integer* -api.version.fallback.ms | * | 0 .. 604800000 | 0 | medium | Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade).
*Type: integer* -broker.version.fallback | * | | 0.10.0 | medium | Older broker versions (before 0.10.0) provide no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value >= 0.10, such as 0.10.2.1, enables ApiVersionRequests.
*Type: string* -security.protocol | * | plaintext, ssl, sasl_plaintext, sasl_ssl | plaintext | high | Protocol used to communicate with brokers.
*Type: enum value* -ssl.cipher.suites | * | | | low | A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3).
*Type: string* -ssl.curves.list | * | | | low | The supported-curves extension in the TLS ClientHello message specifies the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client is willing to have the server use. See manual page for `SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.
*Type: string* -ssl.sigalgs.list | * | | | low | The client uses the TLS ClientHello signature_algorithms extension to indicate to the server which signature/hash algorithm pairs may be used in digital signatures. See manual page for `SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required.
*Type: string* -ssl.key.location | * | | | low | Path to client's private key (PEM) used for authentication.
*Type: string* -ssl.key.password | * | | | low | Private key passphrase (for use with `ssl.key.location` and `set_ssl_cert()`)
*Type: string* -ssl.key.pem | * | | | low | Client's private key string (PEM format) used for authentication.
*Type: string* -ssl_key | * | | | low | Client's private key as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* -ssl.certificate.location | * | | | low | Path to client's public key (PEM) used for authentication.
*Type: string* -ssl.certificate.pem | * | | | low | Client's public key string (PEM format) used for authentication.
*Type: string* -ssl_certificate | * | | | low | Client's public key as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* -ssl.ca.location | * | | | low | File or directory path to CA certificate(s) for verifying the broker's key. Defaults: On Windows the system's CA certificates are automatically looked up in the Windows Root certificate store. On Mac OSX this configuration defaults to `probe`. It is recommended to install openssl using Homebrew, to provide CA certificates. On Linux install the distribution's ca-certificates package. If OpenSSL is statically linked or `ssl.ca.location` is set to `probe` a list of standard paths will be probed and the first one found will be used as the default CA certificate location path. If OpenSSL is dynamically linked the OpenSSL library's default path will be used (see `OPENSSLDIR` in `openssl version -a`).
*Type: string* -ssl.ca.pem | * | | | low | CA certificate string (PEM format) for verifying the broker's key.
*Type: string* -ssl_ca | * | | | low | CA certificate as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* -ssl.ca.certificate.stores | * | | Root | low | Comma-separated list of Windows Certificate stores to load CA certificates from. Certificates will be loaded in the same order as stores are specified. If no certificates can be loaded from any of the specified stores an error is logged and the OpenSSL library's default CA location is used instead. Store names are typically one or more of: MY, Root, Trust, CA.
*Type: string* -ssl.crl.location | * | | | low | Path to CRL for verifying broker's certificate validity.
*Type: string* -ssl.keystore.location | * | | | low | Path to client's keystore (PKCS#12) used for authentication.
*Type: string* -ssl.keystore.password | * | | | low | Client's keystore (PKCS#12) password.
*Type: string* -ssl.engine.location | * | | | low | Path to OpenSSL engine library. OpenSSL >= 1.1.0 required.
*Type: string* -ssl.engine.id | * | | dynamic | low | OpenSSL engine id is the name used for loading engine.
*Type: string* -ssl_engine_callback_data | * | | | low | OpenSSL engine callback data (set with rd_kafka_conf_set_engine_callback_data()).
*Type: see dedicated API* -enable.ssl.certificate.verification | * | true, false | true | low | Enable OpenSSL's builtin broker (server) certificate verification. This verification can be extended by the application by implementing a certificate_verify_cb.
*Type: boolean* -ssl.endpoint.identification.algorithm | * | none, https | none | low | Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL >= 1.0.2 required.
*Type: enum value* -ssl.certificate.verify_cb | * | | | low | Callback to verify the broker certificate chain.
*Type: see dedicated API* -sasl.mechanisms | * | | GSSAPI | high | SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured.
*Type: string* -sasl.mechanism | * | | GSSAPI | high | Alias for `sasl.mechanisms`: SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured.
*Type: string* -sasl.kerberos.service.name | * | | kafka | low | Kerberos principal name that Kafka runs as, not including /hostname@REALM
*Type: string* -sasl.kerberos.principal | * | | kafkaclient | low | This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal).
*Type: string* -sasl.kerberos.kinit.cmd | * | | kinit -R -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} \|\| kinit -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} | low | Shell command to refresh or acquire the client's Kerberos ticket. This command is executed on client creation and every sasl.kerberos.min.time.before.relogin (0=disable). %{config.prop.name} is replaced by corresponding config object value.
*Type: string* -sasl.kerberos.keytab | * | | | low | Path to Kerberos keytab file. This configuration property is only used as a variable in `sasl.kerberos.kinit.cmd` as ` ... -t "%{sasl.kerberos.keytab}"`.
*Type: string* -sasl.kerberos.min.time.before.relogin | * | 0 .. 86400000 | 60000 | low | Minimum time in milliseconds between key refresh attempts. Disable automatic key refresh by setting this property to 0.
*Type: integer* -sasl.username | * | | | high | SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms
*Type: string* -sasl.password | * | | | high | SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism
*Type: string* -sasl.oauthbearer.config | * | | | low | SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is "sub", the default value for scopeClaimName is "scope", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_NAME=value`. For example: `principal=admin extension_traceId=123`
*Type: string* -enable.sasl.oauthbearer.unsecure.jwt | * | true, false | false | low | Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production.
*Type: boolean* -oauthbearer_token_refresh_cb | * | | | low | SASL/OAUTHBEARER token refresh callback (set with rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by rd_kafka_poll(), et.al. This callback will be triggered when it is time to refresh the client's OAUTHBEARER token.
*Type: see dedicated API* -plugin.library.paths | * | | | low | List of plugin libraries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically.
*Type: string* -interceptors | * | | | low | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors.
*Type: see dedicated API* -group.id | C | | | high | Client group id string. All clients sharing the same group.id belong to the same group.
*Type: string* -group.instance.id | C | | | medium | Enable static group membership. Static group members are able to leave and rejoin a group within the configured `session.timeout.ms` without prompting a group rebalance. This should be used in combination with a larger `session.timeout.ms` to avoid group rebalances caused by transient unavailability (e.g. process restarts). Requires broker version >= 2.3.0.
*Type: string* -partition.assignment.strategy | C | | range,roundrobin | medium | The name of one or more partition assignment strategies. The elected group leader will use a strategy supported by all members of the group to assign partitions to group members. If there is more than one eligible strategy, preference is determined by the order of this list (strategies earlier in the list have higher priority). Cooperative and non-cooperative (eager) strategies must not be mixed. Available strategies: range, roundrobin, cooperative-sticky.
*Type: string* -session.timeout.ms | C | 1 .. 3600000 | 45000 | high | Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. The allowed range is configured with the **broker** configuration properties `group.min.session.timeout.ms` and `group.max.session.timeout.ms`. Also see `max.poll.interval.ms`.
*Type: integer* -heartbeat.interval.ms | C | 1 .. 3600000 | 3000 | low | Group session keepalive heartbeat interval.
*Type: integer* -group.protocol.type | C | | consumer | low | Group protocol type. NOTE: Currently, the only supported group protocol type is `consumer`.
*Type: string* -coordinator.query.interval.ms | C | 1 .. 3600000 | 600000 | low | How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment.
*Type: integer* -max.poll.interval.ms | C | 1 .. 86400000 | 300000 | high | Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. Warning: Offset commits may be not possible at this point. Note: It is recommended to set `enable.auto.offset.store=false` for long-time processing applications and then explicitly store offsets (using offsets_store()) *after* message processing, to make sure offsets are not auto-committed prior to processing has finished. The interval is checked two times per second. See KIP-62 for more information.
*Type: integer* -enable.auto.commit | C | true, false | true | high | Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign().
*Type: boolean* -auto.commit.interval.ms | C | 0 .. 86400000 | 5000 | medium | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer.
*Type: integer* -enable.auto.offset.store | C | true, false | true | high | Automatically store offset of last message provided to application. The offset store is an in-memory store of the next offset to (auto-)commit for each partition.
*Type: boolean* -queued.min.messages | C | 1 .. 10000000 | 100000 | medium | Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue.
*Type: integer* -queued.max.messages.kbytes | C | 1 .. 2097151 | 65536 | medium | Maximum number of kilobytes of queued pre-fetched messages in the local consumer queue. If using the high-level consumer this setting applies to the single consumer queue, regardless of the number of partitions. When using the legacy simple consumer or when separate partition queues are used this setting applies per partition. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages.
*Type: integer* -fetch.wait.max.ms | C | 0 .. 300000 | 500 | low | Maximum time the broker may wait to fill the Fetch response with fetch.min.bytes of messages.
*Type: integer* -fetch.message.max.bytes | C | 1 .. 1000000000 | 1048576 | medium | Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched.
*Type: integer* -max.partition.fetch.bytes | C | 1 .. 1000000000 | 1048576 | medium | Alias for `fetch.message.max.bytes`: Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched.
*Type: integer* -fetch.max.bytes | C | 0 .. 2147483135 | 52428800 | medium | Maximum amount of data the broker shall return for a Fetch request. Messages are fetched in batches by the consumer and if the first message batch in the first non-empty partition of the Fetch request is larger than this value, then the message batch will still be returned to ensure the consumer can make progress. The maximum message batch size accepted by the broker is defined via `message.max.bytes` (broker config) or `max.message.bytes` (broker topic config). `fetch.max.bytes` is automatically adjusted upwards to be at least `message.max.bytes` (consumer config).
*Type: integer* -fetch.min.bytes | C | 1 .. 100000000 | 1 | low | Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting.
*Type: integer* -fetch.error.backoff.ms | C | 0 .. 300000 | 500 | medium | How long to postpone the next fetch request for a topic+partition in case of a fetch error.
*Type: integer* -offset.store.method | C | none, file, broker | broker | low | **DEPRECATED** Offset commit store method: 'file' - DEPRECATED: local file store (offset.store.path, et.al), 'broker' - broker commit store (requires Apache Kafka 0.8.2 or later on the broker).
*Type: enum value* -isolation.level | C | read_uncommitted, read_committed | read_committed | high | Controls how to read messages written transactionally: `read_committed` - only return transactional messages which have been committed. `read_uncommitted` - return all messages, even transactional messages which have been aborted.
*Type: enum value* -consume_cb | C | | | low | Message consume callback (set with rd_kafka_conf_set_consume_cb())
*Type: see dedicated API* -rebalance_cb | C | | | low | Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb())
*Type: see dedicated API* -offset_commit_cb | C | | | low | Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb())
*Type: see dedicated API* -enable.partition.eof | C | true, false | false | low | Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition.
*Type: boolean* -check.crcs | C | true, false | false | medium | Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage.
*Type: boolean* -allow.auto.create.topics | C | true, false | false | low | Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuraiton to take effect. Note: The default value (false) is different from the Java consumer (true). Requires broker version >= 0.11.0.0, for older broker versions only the broker configuration applies.
*Type: boolean* -client.rack | * | | | low | A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config `broker.rack`.
*Type: string* -transactional.id | P | | | high | Enables the transactional producer. The transactional.id is used to identify the same transactional producer instance across process restarts. It allows the producer to guarantee that transactions corresponding to earlier instances of the same producer have been finalized prior to starting any new transactions, and that any zombie instances are fenced off. If no transactional.id is provided, then the producer is limited to idempotent delivery (if enable.idempotence is set). Requires broker version >= 0.11.0.
*Type: string* -transaction.timeout.ms | P | 1000 .. 2147483647 | 60000 | medium | The maximum amount of time in milliseconds that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction. If this value is larger than the `transaction.max.timeout.ms` setting in the broker, the init_transactions() call will fail with ERR_INVALID_TRANSACTION_TIMEOUT. The transaction timeout automatically adjusts `message.timeout.ms` and `socket.timeout.ms`, unless explicitly configured in which case they must not exceed the transaction timeout (`socket.timeout.ms` must be at least 100ms lower than `transaction.timeout.ms`). This is also the default timeout value if no timeout (-1) is supplied to the transactional API methods.
*Type: integer* -enable.idempotence | P | true, false | false | high | When set to `true`, the producer will ensure that messages are successfully produced exactly once and in the original produce order. The following configuration properties are adjusted automatically (if not modified by the user) when idempotence is enabled: `max.in.flight.requests.per.connection=5` (must be less than or equal to 5), `retries=INT32_MAX` (must be greater than 0), `acks=all`, `queuing.strategy=fifo`. Producer instantation will fail if user-supplied configuration is incompatible.
*Type: boolean* -enable.gapless.guarantee | P | true, false | false | low | **EXPERIMENTAL**: subject to change or removal. When set to `true`, any error that could result in a gap in the produced message series when a batch of messages fails, will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop the producer. Messages failing due to `message.timeout.ms` are not covered by this guarantee. Requires `enable.idempotence=true`.
*Type: boolean* -queue.buffering.max.messages | P | 1 .. 10000000 | 100000 | high | Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions.
*Type: integer* -queue.buffering.max.kbytes | P | 1 .. 2147483647 | 1048576 | high | Maximum total message size sum allowed on the producer queue. This queue is shared by all topics and partitions. This property has higher priority than queue.buffering.max.messages.
*Type: integer* -queue.buffering.max.ms | P | 0 .. 900000 | 5 | high | Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: float* -linger.ms | P | 0 .. 900000 | 5 | high | Alias for `queue.buffering.max.ms`: Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: float* -message.send.max.retries | P | 0 .. 2147483647 | 2147483647 | high | How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true.
*Type: integer* -retries | P | 0 .. 2147483647 | 2147483647 | high | Alias for `message.send.max.retries`: How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true.
*Type: integer* -retry.backoff.ms | P | 1 .. 300000 | 100 | medium | The backoff time in milliseconds before retrying a protocol request.
*Type: integer* -queue.buffering.backpressure.threshold | P | 1 .. 1000000 | 1 | low | The threshold of outstanding not yet transmitted broker requests needed to backpressure the producer's message accumulator. If the number of not yet transmitted requests equals or exceeds this number, produce request creation that would have otherwise been triggered (for example, in accordance with linger.ms) will be delayed. A lower number yields larger and more effective batches. A higher value can improve latency when using compression on slow machines.
*Type: integer* -compression.codec | P | none, gzip, snappy, lz4, zstd | none | medium | compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
*Type: enum value* -compression.type | P | none, gzip, snappy, lz4, zstd | none | medium | Alias for `compression.codec`: compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
*Type: enum value* -batch.num.messages | P | 1 .. 1000000 | 10000 | medium | Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by batch.size and message.max.bytes.
*Type: integer* -batch.size | P | 1 .. 2147483647 | 1000000 | medium | Maximum size (in bytes) of all messages batched in one MessageSet, including protocol framing overhead. This limit is applied after the first message has been added to the batch, regardless of the first message's size, this is to ensure that messages that exceed batch.size are produced. The total MessageSet size is also limited by batch.num.messages and message.max.bytes.
*Type: integer* -delivery.report.only.error | P | true, false | false | low | Only provide delivery reports for failed messages.
*Type: boolean* -dr_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_cb())
*Type: see dedicated API* -dr_msg_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())
*Type: see dedicated API* -sticky.partitioning.linger.ms | P | 0 .. 900000 | 10 | low | Delay in milliseconds to wait to assign new sticky partitions for each topic. By default, set to double the time of linger.ms. To disable sticky behavior, set to 0. This behavior affects messages with the key NULL in all cases, and messages with key lengths of zero when the consistent_random partitioner is in use. These messages would otherwise be assigned randomly. A higher value allows for more effective batching of these messages.
*Type: integer* - - -## Topic configuration properties - -Property | C/P | Range | Default | Importance | Description ------------------------------------------|-----|-----------------|--------------:|------------| -------------------------- -request.required.acks | P | -1 .. 1000 | -1 | high | This field indicates the number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *-1* or *all*=Broker will block until message is committed by all in sync replicas (ISRs). If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail.
*Type: integer* -acks | P | -1 .. 1000 | -1 | high | Alias for `request.required.acks`: This field indicates the number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *-1* or *all*=Broker will block until message is committed by all in sync replicas (ISRs). If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail.
*Type: integer* -request.timeout.ms | P | 1 .. 900000 | 30000 | medium | The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0.
*Type: integer* -message.timeout.ms | P | 0 .. 2147483647 | 300000 | high | Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured.
*Type: integer* -delivery.timeout.ms | P | 0 .. 2147483647 | 300000 | high | Alias for `message.timeout.ms`: Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured.
*Type: integer* -queuing.strategy | P | fifo, lifo | fifo | low | **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Producer queuing strategy. FIFO preserves produce ordering, while LIFO prioritizes new messages.
*Type: enum value* -produce.offset.report | P | true, false | false | low | **DEPRECATED** No longer used.
*Type: boolean* -partitioner | P | | consistent_random | high | Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.), `fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), `fnv1a_random` - FNV-1a hash of key (NULL keys are randomly partitioned).
*Type: string* -partitioner_cb | P | | | low | Custom partitioner callback (set with rd_kafka_topic_conf_set_partitioner_cb())
*Type: see dedicated API* -msg_order_cmp | P | | | low | **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Message queue ordering comparator (set with rd_kafka_topic_conf_set_msg_order_cmp()). Also see `queuing.strategy`.
*Type: see dedicated API* -opaque | * | | | low | Application opaque (set with rd_kafka_topic_conf_set_opaque())
*Type: see dedicated API* -compression.codec | P | none, gzip, snappy, lz4, zstd, inherit | inherit | high | Compression codec to use for compressing message sets. inherit = inherit global compression.codec configuration.
*Type: enum value* -compression.type | P | none, gzip, snappy, lz4, zstd | none | medium | Alias for `compression.codec`: compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
*Type: enum value* -compression.level | P | -1 .. 12 | -1 | medium | Compression level parameter for algorithm selected by configuration property `compression.codec`. Higher values will result in better compression at the cost of more CPU usage. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level.
*Type: integer* -auto.commit.enable | C | true, false | true | low | **DEPRECATED** [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). Offsets will be written to broker or local file according to offset.store.method.
*Type: boolean* -enable.auto.commit | C | true, false | true | low | **DEPRECATED** Alias for `auto.commit.enable`: [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). Offsets will be written to broker or local file according to offset.store.method.
*Type: boolean* -auto.commit.interval.ms | C | 10 .. 86400000 | 60000 | high | [**LEGACY PROPERTY:** This setting is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `auto.commit.interval.ms` property must be used instead]. The frequency in milliseconds that the consumer offsets are committed (written) to offset storage.
*Type: integer* -auto.offset.reset | C | smallest, earliest, beginning, largest, latest, end, error | largest | high | Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'.
*Type: enum value* -offset.store.path | C | | . | low | **DEPRECATED** Path to local file for storing offsets. If the path is a directory a filename will be automatically generated in that directory based on the topic and partition. File-based offset storage will be removed in a future version.
*Type: string* -offset.store.sync.interval.ms | C | -1 .. 86400000 | -1 | low | **DEPRECATED** fsync() interval for the offset file, in milliseconds. Use -1 to disable syncing, and 0 for immediate sync after each write. File-based offset storage will be removed in a future version.
*Type: integer* -offset.store.method | C | file, broker | broker | low | **DEPRECATED** Offset commit store method: 'file' - DEPRECATED: local file store (offset.store.path, et.al), 'broker' - broker commit store (requires "group.id" to be configured and Apache Kafka 0.8.2 or later on the broker.).
*Type: enum value* -consume.callback.max.messages | C | 0 .. 1000000 | 0 | low | Maximum number of messages to dispatch in one `rd_kafka_consume_callback*()` call (0 = unlimited)
*Type: integer* - -### C/P legend: C = Consumer, P = Producer, * = both diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/INTRODUCTION.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/INTRODUCTION.md deleted file mode 100755 index 6eed11c3..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/INTRODUCTION.md +++ /dev/null @@ -1,2017 +0,0 @@ -# Introduction to librdkafka - the Apache Kafka C/C++ client library - - -librdkafka is a high performance C implementation of the Apache -Kafka client, providing a reliable and performant client for production use. -librdkafka also provides a native C++ interface. - - -**Table of Contents** - -- [Introduction to librdkafka - the Apache Kafka C/C++ client library](#introduction-to-librdkafka---the-apache-kafka-cc-client-library) - - [Performance](#performance) - - [High throughput](#high-throughput) - - [Low latency](#low-latency) - - [Latency measurement](#latency-measurement) - - [Compression](#compression) - - [Message reliability](#message-reliability) - - [Producer message delivery success](#producer-message-delivery-success) - - [Producer message delivery failure](#producer-message-delivery-failure) - - [Error: Timed out in transmission queue](#error-timed-out-in-transmission-queue) - - [Error: Timed out in flight to/from broker](#error-timed-out-in-flight-tofrom-broker) - - [Error: Temporary broker-side error](#error-temporary-broker-side-error) - - [Error: Temporary errors due to stale metadata](#error-temporary-errors-due-to-stale-metadata) - - [Error: Local time out](#error-local-time-out) - - [Error: Permanent errors](#error-permanent-errors) - - [Producer retries](#producer-retries) - - [Reordering](#reordering) - - [Idempotent Producer](#idempotent-producer) - - [Guarantees](#guarantees) - - [Ordering and message sequence numbers](#ordering-and-message-sequence-numbers) - - [Partitioner considerations](#partitioner-considerations) - - [Message timeout considerations](#message-timeout-considerations) - - [Leader change](#leader-change) - - [Error handling](#error-handling) - - [RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER](#rdkafkaresperroutofordersequencenumber) - - [RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER](#rdkafkaresperrduplicatesequencenumber) - - [RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID](#rdkafkaresperrunknownproducerid) - - [Standard errors](#standard-errors) - - [Message persistence status](#message-persistence-status) - - [Transactional Producer](#transactional-producer) - - [Error handling](#error-handling-1) - - [Old producer fencing](#old-producer-fencing) - - [Configuration considerations](#configuration-considerations) - - [Exactly Once Semantics (EOS) and transactions](#exactly-once-semantics-eos-and-transactions) - - [Usage](#usage) - - [Documentation](#documentation) - - [Initialization](#initialization) - - [Configuration](#configuration) - - [Example](#example) - - [Termination](#termination) - - [High-level KafkaConsumer](#high-level-kafkaconsumer) - - [Producer](#producer) - - [Speeding up termination](#speeding-up-termination) - - [Threads and callbacks](#threads-and-callbacks) - - [Brokers](#brokers) - - [SSL](#ssl) - - [Sparse connections](#sparse-connections) - - [Random broker selection](#random-broker-selection) - - [Persistent broker connections](#persistent-broker-connections) - - [Connection close](#connection-close) - - [Fetch From Follower](#fetch-from-follower) - - [Logging](#logging) - - [Debug contexts](#debug-contexts) - - [Feature discovery](#feature-discovery) - - [Producer API](#producer-api) - - [Simple Consumer API (legacy)](#simple-consumer-api-legacy) - - [Offset management](#offset-management) - - [Auto offset commit](#auto-offset-commit) - - [At-least-once processing](#at-least-once-processing) - - [Consumer groups](#consumer-groups) - - [Static consumer groups](#static-consumer-groups) - - [Topics](#topics) - - [Unknown or unauthorized topics](#unknown-or-unauthorized-topics) - - [Topic auto creation](#topic-auto-creation) - - [Metadata](#metadata) - - [< 0.9.3](#-093) - - [> 0.9.3](#-093) - - [Query reasons](#query-reasons) - - [Caching strategy](#caching-strategy) - - [Fatal errors](#fatal-errors) - - [Fatal producer errors](#fatal-producer-errors) - - [Fatal consumer errors](#fatal-consumer-errors) - - [Compatibility](#compatibility) - - [Broker version compatibility](#broker-version-compatibility) - - [Broker version >= 0.10.0.0 (or trunk)](#broker-version--01000-or-trunk) - - [Broker versions 0.9.0.x](#broker-versions-090x) - - [Broker versions 0.8.x.y](#broker-versions-08xy) - - [Detailed description](#detailed-description) - - [Supported KIPs](#supported-kips) - - [Supported protocol versions](#supported-protocol-versions) -- [Recommendations for language binding developers](#recommendations-for-language-binding-developers) - - [Expose the configuration interface pass-thru](#expose-the-configuration-interface-pass-thru) - - [Error constants](#error-constants) - - [Reporting client software name and version to broker](#reporting-client-software-name-and-version-to-broker) - - [Documentation reuse](#documentation-reuse) - - [Community support](#community-support) - - - - -## Performance - -librdkafka is a multi-threaded library designed for use on modern hardware and -it attempts to keep memory copying to a minimum. The payload of produced or -consumed messages may pass through without any copying -(if so desired by the application) putting no limit on message sizes. - -librdkafka allows you to decide if high throughput is the name of the game, -or if a low latency service is required, or a balance between the two, all -through the configuration property interface. - -The single most important configuration properties for performance tuning is -`linger.ms` - how long to wait for `batch.num.messages` or `batch.size` to -fill up in the local per-partition queue before sending the batch of messages -to the broker. - -In low throughput scenarios, a lower value improves latency. -As throughput increases, the cost of each broker request becomes significant -impacting both maximum throughput and latency. For higher throughput -applications, latency will typically be lower using a higher `linger.ms` due -to larger batches resulting in a lesser number of requests, yielding decreased -per-message load on the broker. A good general purpose setting is 5ms. -For applications seeking maximum throughput, the recommended value is >= 50ms. - - -### High throughput - -The key to high throughput is message batching - waiting for a certain amount -of messages to accumulate in the local queue before sending them off in -one large message set or batch to the peer. This amortizes the messaging -overhead and eliminates the adverse effect of the round trip time (rtt). - -`linger.ms` (also called `queue.buffering.max.ms`) allows librdkafka to -wait up to the specified amount of time to accumulate up to -`batch.num.messages` or `batch.size` in a single batch (MessageSet) before -sending to the broker. The larger the batch the higher the throughput. -Enabling `msg` debugging (set `debug` property to `msg`) will emit log -messages for the accumulation process which lets you see what batch sizes -are being produced. - -Example using `linger.ms=1`: - -``` -... test [0]: MessageSet with 1514 message(s) delivered -... test [3]: MessageSet with 1690 message(s) delivered -... test [0]: MessageSet with 1720 message(s) delivered -... test [3]: MessageSet with 2 message(s) delivered -... test [3]: MessageSet with 4 message(s) delivered -... test [0]: MessageSet with 4 message(s) delivered -... test [3]: MessageSet with 11 message(s) delivered -``` - -Example using `linger.ms=1000`: -``` -... test [0]: MessageSet with 10000 message(s) delivered -... test [0]: MessageSet with 10000 message(s) delivered -... test [0]: MessageSet with 4667 message(s) delivered -... test [3]: MessageSet with 10000 message(s) delivered -... test [3]: MessageSet with 10000 message(s) delivered -... test [3]: MessageSet with 4476 message(s) delivered - -``` - - -The default setting of `linger.ms=0.1` is not suitable for -high throughput, it is recommended to set this value to >50ms, with -throughput leveling out somewhere around 100-1000ms depending on -message produce pattern and sizes. - -These setting are set globally (`rd_kafka_conf_t`) but applies on a -per topic+partition basis. - - -### Low latency - -When low latency messaging is required the `linger.ms` should be -tuned to the maximum permitted producer-side latency. -Setting `linger.ms` to 0 or 0.1 will make sure messages are sent as -soon as possible. -Lower buffering time leads to smaller batches and larger per-message overheads, -increasing network, memory and CPU usage for producers, brokers and consumers. - -See [How to decrease message latency](https://github.com/edenhill/librdkafka/wiki/How-to-decrease-message-latency) for more info. - - -#### Latency measurement - -End-to-end latency is preferably measured by synchronizing clocks on producers -and consumers and using the message timestamp on the consumer to calculate -the full latency. Make sure the topic's `log.message.timestamp.type` is set to -the default `CreateTime` (Kafka topic configuration, not librdkafka topic). - -Latencies are typically incurred by the producer, network and broker, the -consumer effect on end-to-end latency is minimal. - -To break down the end-to-end latencies and find where latencies are adding up -there are a number of metrics available through librdkafka statistics -on the producer: - - * `brokers[].int_latency` is the time, per message, between produce() - and the message being written to a MessageSet and ProduceRequest. - High `int_latency` indicates CPU core contention: check CPU load and, - involuntary context switches (`/proc/<..>/status`). - Consider using a machine/instance with more CPU cores. - This metric is only relevant on the producer. - - * `brokers[].outbuf_latency` is the time, per protocol request - (such as ProduceRequest), between the request being enqueued (which happens - right after int_latency) and the time the request is written to the - TCP socket connected to the broker. - High `outbuf_latency` indicates CPU core contention or network congestion: - check CPU load and socket SendQ (`netstat -anp | grep :9092`). - - * `brokers[].rtt` is the time, per protocol request, between the request being - written to the TCP socket and the time the response is received from - the broker. - High `rtt` indicates broker load or network congestion: - check broker metrics, local socket SendQ, network performance, etc. - - * `brokers[].throttle` is the time, per throttled protocol request, the - broker throttled/delayed handling of a request due to usage quotas. - The throttle time will also be reflected in `rtt`. - - * `topics[].batchsize` is the size of individual Producer MessageSet batches. - See below. - - * `topics[].batchcnt` is the number of messages in individual Producer - MessageSet batches. Due to Kafka protocol overhead a batch with few messages - will have a higher relative processing and size overhead than a batch - with many messages. - Use the `linger.ms` client configuration property to set the maximum - amount of time allowed for accumulating a single batch, the larger the - value the larger the batches will grow, thus increasing efficiency. - When producing messages at a high rate it is recommended to increase - linger.ms, which will improve throughput and in some cases also latency. - - -See [STATISTICS.md](STATISTICS.md) for the full definition of metrics. -A JSON schema for the statistics is available in -[statistics-schema.json](src/statistics-schema.json). - - -### Compression - -Producer message compression is enabled through the `compression.codec` -configuration property. - -Compression is performed on the batch of messages in the local queue, the -larger the batch the higher likelyhood of a higher compression ratio. -The local batch queue size is controlled through the `batch.num.messages`, -`batch.size`, and `linger.ms` configuration properties as described in the -**High throughput** chapter above. - - - -## Message reliability - -Message reliability is an important factor of librdkafka - an application -can rely fully on librdkafka to deliver a message according to the specified -configuration (`request.required.acks` and `message.send.max.retries`, etc). - -If the topic configuration property `request.required.acks` is set to wait -for message commit acknowledgements from brokers (any value but 0, see -[`CONFIGURATION.md`](CONFIGURATION.md) -for specifics) then librdkafka will hold on to the message until -all expected acks have been received, gracefully handling the following events: - - * Broker connection failure - * Topic leader change - * Produce errors signaled by the broker - * Network problems - -We recommend `request.required.acks` to be set to `all` to make sure -produced messages are acknowledged by all in-sync replica brokers. - -This is handled automatically by librdkafka and the application does not need -to take any action at any of the above events. -The message will be resent up to `message.send.max.retries` times before -reporting a failure back to the application. - -The delivery report callback is used by librdkafka to signal the status of -a message back to the application, it will be called once for each message -to report the status of message delivery: - - * If `error_code` is non-zero the message delivery failed and the error_code - indicates the nature of the failure (`rd_kafka_resp_err_t` enum). - * If `error_code` is zero the message has been successfully delivered. - -See Producer API chapter for more details on delivery report callback usage. - -The delivery report callback is optional but highly recommended. - - -### Producer message delivery success - -When a ProduceRequest is successfully handled by the broker and a -ProduceResponse is received (also called the ack) without an error code -the messages from the ProduceRequest are enqueued on the delivery report -queue (if a delivery report callback has been set) and will be passed to -the application on the next invocation rd_kafka_poll(). - - -### Producer message delivery failure - -The following sub-chapters explains how different produce errors -are handled. - -If the error is retryable and there are remaining retry attempts for -the given message(s), an automatic retry will be scheduled by librdkafka, -these retries are not visible to the application. - -Only permanent errors and temporary errors that have reached their maximum -retry count will generate a delivery report event to the application with an -error code set. - -The application should typically not attempt to retry producing the message -on failure, but instead configure librdkafka to perform these retries -using the `retries` and `retry.backoff.ms` configuration properties. - - -#### Error: Timed out in transmission queue - -Internal error ERR__TIMED_OUT_QUEUE. - -The connectivity to the broker may be stalled due to networking contention, -local or remote system issues, etc, and the request has not yet been sent. - -The producer can be certain that the message has not been sent to the broker. - -This is a retryable error, but is not counted as a retry attempt -since the message was never actually transmitted. - -A retry by librdkafka at this point will not cause duplicate messages. - - -#### Error: Timed out in flight to/from broker - -Internal error ERR__TIMED_OUT, ERR__TRANSPORT. - -Same reasons as for `Timed out in transmission queue` above, with the -difference that the message may have been sent to the broker and might -be stalling waiting for broker replicas to ack the message, or the response -could be stalled due to networking issues. -At this point the producer can't know if the message reached the broker, -nor if the broker wrote the message to disk and replicas. - -This is a retryable error. - -A retry by librdkafka at this point may cause duplicate messages. - - -#### Error: Temporary broker-side error - -Broker errors ERR_REQUEST_TIMED_OUT, ERR_NOT_ENOUGH_REPLICAS, -ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND. - -These errors are considered temporary and librdkafka is will retry them -if permitted by configuration. - - -#### Error: Temporary errors due to stale metadata - -Broker errors ERR_LEADER_NOT_AVAILABLE, ERR_NOT_LEADER_FOR_PARTITION. - -These errors are considered temporary and a retry is warranted, a metadata -request is automatically sent to find a new leader for the partition. - -A retry by librdkafka at this point will not cause duplicate messages. - - -#### Error: Local time out - -Internal error ERR__MSG_TIMED_OUT. - -The message could not be successfully transmitted before `message.timeout.ms` -expired, typically due to no leader being available or no broker connection. -The message may have been retried due to other errors but -those error messages are abstracted by the ERR__MSG_TIMED_OUT error code. - -Since the `message.timeout.ms` has passed there will be no more retries -by librdkafka. - - -#### Error: Permanent errors - -Any other error is considered a permanent error and the message -will fail immediately, generating a delivery report event with the -distinctive error code. - -The full list of permanent errors depend on the broker version and -will likely grow in the future. - -Typical permanent broker errors are: - * ERR_CORRUPT_MESSAGE - * ERR_MSG_SIZE_TOO_LARGE - adjust client's or broker's `message.max.bytes`. - * ERR_UNKNOWN_TOPIC_OR_PART - topic or partition does not exist, - automatic topic creation is disabled on the - broker or the application is specifying a - partition that does not exist. - * ERR_RECORD_LIST_TOO_LARGE - * ERR_INVALID_REQUIRED_ACKS - * ERR_TOPIC_AUTHORIZATION_FAILED - * ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT - * ERR_CLUSTER_AUTHORIZATION_FAILED - - -### Producer retries - -The ProduceRequest itself is not retried, instead the messages -are put back on the internal partition queue by an insert sort -that maintains their original position (the message order is defined -at the time a message is initially appended to a partition queue, i.e., after -partitioning). -A backoff time (`retry.backoff.ms`) is set on the retried messages which -effectively blocks retry attempts until the backoff time has expired. - - -### Reordering - -As for all retries, if `max.in.flight` > 1 and `retries` > 0, retried messages -may be produced out of order, since a sub-sequent message in a sub-sequent -ProduceRequest may already be in-flight (and accepted by the broker) -by the time the retry for the failing message is sent. - -Using the Idempotent Producer prevents reordering even with `max.in.flight` > 1, -see [Idempotent Producer](#idempotent-producer) below for more information. - - -### Idempotent Producer - -librdkafka supports the idempotent producer which provides strict ordering and -and exactly-once producer guarantees. -The idempotent producer is enabled by setting the `enable.idempotence` -configuration property to `true`, this will automatically adjust a number of -other configuration properties to adhere to the idempotency requirements, -see the documentation of `enable.idempotence` in [CONFIGURATION.md](CONFIGURATION.md) for -more information. -Producer instantiation will fail if the user supplied an incompatible value -for any of the automatically adjusted properties, e.g., it is an error to -explicitly set `acks=1` when `enable.idempotence=true` is set. - - -#### Guarantees - -There are three types of guarantees that the idempotent producer can satisfy: - - * Exactly-once - a message is only written to the log once. - Does NOT cover the exactly-once consumer case. - * Ordering - a series of messages are written to the log in the - order they were produced. - * Gap-less - **EXPERIMENTAL** a series of messages are written once and - in order without risk of skipping messages. The sequence - of messages may be cut short and fail before all - messages are written, but may not fail individual - messages in the series. - This guarantee is disabled by default, but may be enabled - by setting `enable.gapless.guarantee` if individual message - failure is a concern. - Messages that fail due to exceeded timeout (`message.timeout.ms`), - are permitted by the gap-less guarantee and may cause - gaps in the message series without raising a fatal error. - See **Message timeout considerations** below for more info. - **WARNING**: This is an experimental property subject to - change or removal. - -All three guarantees are in effect when idempotence is enabled, only -gap-less may be disabled individually. - - -#### Ordering and message sequence numbers - -librdkafka maintains the original produce() ordering per-partition for all -messages produced, using an internal per-partition 64-bit counter -called the msgid which starts at 1. This msgid allows messages to be -re-inserted in the partition message queue in the original order in the -case of retries. - -The Idempotent Producer functionality in the Kafka protocol also has -a per-message sequence number, which is a signed 32-bit wrapping counter that is -reset each time the Producer's ID (PID) or Epoch changes. - -The librdkafka msgid is used, along with a base msgid value stored -at the time the PID/Epoch was bumped, to calculate the Kafka protocol's -message sequence number. - -With Idempotent Producer enabled there is no risk of reordering despite -`max.in.flight` > 1 (capped at 5). - -**Note**: "MsgId" in log messages refer to the librdkafka msgid, while "seq" - refers to the protocol message sequence, "baseseq" is the seq of - the first message in a batch. - MsgId starts at 1, while message seqs start at 0. - - -The producer statistics also maintain two metrics for tracking the next -expected response sequence: - - * `next_ack_seq` - the next sequence to expect an acknowledgement for, which - is the last successfully produced MessageSet's last - sequence + 1. - * `next_err_seq` - the next sequence to expect an error for, which is typically - the same as `next_ack_seq` until an error occurs, in which - case the `next_ack_seq` can't be incremented (since no - messages were acked on error). `next_err_seq` is used to - properly handle sub-sequent errors due to a failing - first request. - -**Note**: Both are exposed in partition statistics. - - - -#### Partitioner considerations - -Strict ordering is guaranteed on a **per partition** basis. - -An application utilizing the idempotent producer should not mix -producing to explicit partitions with partitioner-based partitions -since messages produced for the latter are queued separately until -a topic's partition count is known, which would insert these messages -after the partition-explicit messages regardless of produce order. - - -#### Message timeout considerations - -If messages time out (due to `message.timeout.ms`) while in the producer queue -there will be gaps in the series of produced messages. - -E.g., Messages 1,2,3,4,5 are produced by the application. - While messages 2,3,4 are transmitted to the broker the connection to - the broker goes down. - While the broker is down the message timeout expires for message 2 and 3. - As the connection comes back up messages 4, 5 are transmitted to the - broker, resulting in a final written message sequence of 1, 4, 5. - -The producer gracefully handles this case by draining the in-flight requests -for a given partition when one or more of its queued (not transmitted) -messages are timed out. When all requests are drained the Epoch is bumped and -the base sequence number is reset to the first message in the queue, effectively -skipping the timed out messages as if they had never existed from the -broker's point of view. -The message status for timed out queued messages will be -`RD_KAFKA_MSG_STATUS_NOT_PERSISTED`. - -If messages time out while in-flight to the broker (also due to -`message.timeout.ms`), the protocol request will fail, the broker -connection will be closed by the client, and the timed out messages will be -removed from the producer queue. In this case the in-flight messages may be -written to the topic log by the broker, even though -a delivery report with error `ERR__MSG_TIMED_OUT` will be raised, since -the producer timed out the request before getting an acknowledgement back -from the broker. -The message status for timed out in-flight messages will be -`RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED`, indicating that the producer -does not know if the messages were written and acked by the broker, -or dropped in-flight. - -An application may inspect the message status by calling -`rd_kafka_message_status()` on the message in the delivery report callback, -to see if the message was (possibly) persisted (written to the topic log) by -the broker or not. - -Despite the graceful handling of timeouts, we recommend to use a -large `message.timeout.ms` to minimize the risk of timeouts. - -**Warning**: `enable.gapless.guarantee` does not apply to timed-out messages. - -**Note**: `delivery.timeout.ms` is an alias for `message.timeout.ms`. - - -#### Leader change - -There are corner cases where an Idempotent Producer has outstanding -ProduceRequests in-flight to the previous leader while a new leader is elected. - -A leader change is typically triggered by the original leader -failing or terminating, which has the risk of also failing (some of) the -in-flight ProduceRequests to that broker. To recover the producer to a -consistent state it will not send any ProduceRequests for these partitions to -the new leader broker until all responses for any outstanding ProduceRequests -to the previous partition leader has been received, or these requests have -timed out. -This drain may take up to `min(socket.timeout.ms, message.timeout.ms)`. -If the connection to the previous broker goes down the outstanding requests -are failed immediately. - - -#### Error handling - -Background: -The error handling for the Idempotent Producer, as initially proposed -in the [EOS design document](https://docs.google.com/document/d/11Jqy_GjUGtdXJK94XGsEIK7CP1SnQGdp2eF0wSw9ra8), -missed some corner cases which are now being addressed in [KIP-360](https://cwiki.apache.org/confluence/display/KAFKA/KIP-360%3A+Improve+handling+of+unknown+producer). -There were some intermediate fixes and workarounds prior to KIP-360 that proved -to be incomplete and made the error handling in the client overly complex. -With the benefit of hindsight the librdkafka implementation will attempt -to provide correctness from the lessons learned in the Java client and -provide stricter and less complex error handling. - -The follow sections describe librdkafka's handling of the -Idempotent Producer specific errors that may be returned by the broker. - - -##### RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER - -This error is returned by the broker when the sequence number in the -ProduceRequest is larger than the expected next sequence -for the given PID+Epoch+Partition (last BaseSeq + msgcount + 1). -Note: sequence 0 is always accepted. - -If the failed request is the head-of-line (next expected sequence to be acked) -it indicates desynchronization between the client and broker: -the client thinks the sequence number is correct but the broker disagrees. -There is no way for the client to recover from this scenario without -risking message loss or duplication, and it is not safe for the -application to manually retry messages. -A fatal error (`RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER`) is raised. - -When the request is not head-of-line the previous request failed -(for any reason), which means the messages in the current request -can be retried after waiting for all outstanding requests for this -partition to drain and then reset the Producer ID and start over. - - -**Java Producer behaviour**: -Fail the batch, reset the pid, and then continue producing -(and retrying sub-sequent) messages. This will lead to gaps -in the message series. - - - -##### RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER - -Returned by broker when the request's base sequence number is -less than the expected sequence number (which is the last written -sequence + msgcount). -Note: sequence 0 is always accepted. - -This error is typically benign and occurs upon retrying a previously successful -send that was not acknowledged. - -The messages will be considered successfully produced but will have neither -timestamp or offset set. - - -**Java Producer behaviour:** -Treats the message as successfully delivered. - - -##### RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID - -Returned by broker when the PID+Epoch is unknown, which may occur when -the PID's state has expired (due to topic retention, DeleteRercords, -or compaction). - -The Java producer added quite a bit of error handling for this case, -extending the ProduceRequest protocol to return the logStartOffset -to give the producer a chance to differentiate between an actual -UNKNOWN_PRODUCER_ID or topic retention having deleted the last -message for this producer (effectively voiding the Producer ID cache). -This workaround proved to be error prone (see explanation in KIP-360) -when the partition leader changed. - -KIP-360 suggests removing this error checking in favour of failing fast, -librdkafka follows suite. - - -If the response is for the first ProduceRequest in-flight -and there are no messages waiting to be retried nor any ProduceRequests -unaccounted for, then the error is ignored and the epoch is incremented, -this is likely to happen for an idle producer who's last written -message has been deleted from the log, and thus its PID state. -Otherwise the producer raises a fatal error -(RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID) since the delivery guarantees can't -be satisfied. - - -**Java Producer behaviour:** -Retries the send in some cases (but KIP-360 will change this). -Not a fatal error in any case. - - -##### Standard errors - -All the standard Produce errors are handled in the usual way, -permanent errors will fail the messages in the batch, while -temporary errors will be retried (if retry count permits). - -If a permanent error is returned for a batch in a series of in-flight batches, -the sub-sequent batches will fail with -RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER since the sequence number of the -failed batched was never written to the topic log and next expected sequence -thus not incremented on the broker. - -A fatal error (RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE) is raised to satisfy -the gap-less guarantee (if `enable.gapless.guarantee` is set) by failing all -queued messages. - - -##### Message persistence status - -To help the application decide what to do in these error cases, a new -per-message API is introduced, `rd_kafka_message_status()`, -which returns one of the following values: - - * `RD_KAFKA_MSG_STATUS_NOT_PERSISTED` - the message has never - been transmitted to the broker, or failed with an error indicating - it was not written to the log. - Application retry will risk ordering, but not duplication. - * `RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED` - the message was transmitted - to the broker, but no acknowledgement was received. - Application retry will risk ordering and duplication. - * `RD_KAFKA_MSG_STATUS_PERSISTED` - the message was written to the log by - the broker and fully acknowledged. - No reason for application to retry. - -This method should be called by the application on delivery report error. - - -### Transactional Producer - - -#### Error handling - -Using the transactional producer simplifies error handling compared to the -standard or idempotent producer, a transactional application will only need -to care about these different types of errors: - - * Retriable errors - the operation failed due to temporary problems, - such as network timeouts, the operation may be safely retried. - Use `rd_kafka_error_is_retriable()` to distinguish this case. - * Abortable errors - if any of the transactional APIs return a non-fatal - error code the current transaction has failed and the application - must call `rd_kafka_abort_transaction()`, rewind its input to the - point before the current transaction started, and attempt a new transaction - by calling `rd_kafka_begin_transaction()`, etc. - Use `rd_kafka_error_txn_requires_abort()` to distinguish this case. - * Fatal errors - the application must cease operations and destroy the - producer instance. - Use `rd_kafka_error_is_fatal()` to distinguish this case. - * For all other errors returned from the transactional API: the current - recommendation is to treat any error that has neither retriable, abortable, - or fatal set, as a fatal error. - -While the application should log the actual fatal or abortable errors, there -is no need for the application to handle the underlying errors specifically. - - - -#### Old producer fencing - -If a new transactional producer instance is started with the same -`transactional.id`, any previous still running producer -instance will be fenced off at the next produce, commit or abort attempt, by -raising a fatal error with the error code set to -`RD_KAFKA_RESP_ERR__FENCED`. - - -#### Configuration considerations - -To make sure messages time out (in case of connectivity problems, etc) within -the transaction, the `message.timeout.ms` configuration property must be -set lower than the `transaction.timeout.ms`, this is enforced when -creating the producer instance. -If `message.timeout.ms` is not explicitly configured it will be adjusted -automatically. - - - - -### Exactly Once Semantics (EOS) and transactions - -librdkafka supports Exactly One Semantics (EOS) as defined in [KIP-98](https://cwiki.apache.org/confluence/display/KAFKA/KIP-98+-+Exactly+Once+Delivery+and+Transactional+Messaging). -For more on the use of transactions, see [Transactions in Apache Kafka](https://www.confluent.io/blog/transactions-apache-kafka/). - -See [examples/transactions.c](examples/transactions.c) for an example -transactional EOS application. - -**Warning** -If the broker version is older than Apache Kafka 2.5.0 then one transactional -producer instance per consumed input partition is required. -For 2.5.0 and later a single producer instance may be used regardless of -the number of input partitions. -See KIP-447 for more information. - - -## Usage - -### Documentation - -The librdkafka API is documented in the [`rdkafka.h`](src/rdkafka.h) -header file, the configuration properties are documented in -[`CONFIGURATION.md`](CONFIGURATION.md) - -### Initialization - -The application needs to instantiate a top-level object `rd_kafka_t` which is -the base container, providing global configuration and shared state. -It is created by calling `rd_kafka_new()`. - -It also needs to instantiate one or more topics (`rd_kafka_topic_t`) to be used -for producing to or consuming from. The topic object holds topic-specific -configuration and will be internally populated with a mapping of all available -partitions and their leader brokers. -It is created by calling `rd_kafka_topic_new()`. - -Both `rd_kafka_t` and `rd_kafka_topic_t` comes with a configuration API which -is optional. -Not using the API will cause librdkafka to use its default values which are -documented in [`CONFIGURATION.md`](CONFIGURATION.md). - -**Note**: An application may create multiple `rd_kafka_t` objects and - they share no state. - -**Note**: An `rd_kafka_topic_t` object may only be used with the `rd_kafka_t` - object it was created from. - - - -### Configuration - -To ease integration with the official Apache Kafka software and lower -the learning curve, librdkafka implements identical configuration -properties as found in the official clients of Apache Kafka. - -Configuration is applied prior to object creation using the -`rd_kafka_conf_set()` and `rd_kafka_topic_conf_set()` APIs. - -**Note**: The `rd_kafka.._conf_t` objects are not reusable after they have been - passed to `rd_kafka.._new()`. - The application does not need to free any config resources after a - `rd_kafka.._new()` call. - -#### Example - -```c - rd_kafka_conf_t *conf; - rd_kafka_conf_res_t res; - rd_kafka_t *rk; - char errstr[512]; - - conf = rd_kafka_conf_new(); - - res = rd_kafka_conf_set(conf, "compression.codec", "snappy", - errstr, sizeof(errstr)); - if (res != RD_KAFKA_CONF_OK) - fail("%s\n", errstr); - - res = rd_kafka_conf_set(conf, "batch.num.messages", "100", - errstr, sizeof(errstr)); - if (res != RD_KAFKA_CONF_OK) - fail("%s\n", errstr); - - rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); - if (!rk) { - rd_kafka_conf_destroy(rk); - fail("Failed to create producer: %s\n", errstr); - } - - /* Note: librdkafka takes ownership of the conf object on success */ -``` - -Configuration properties may be set in any order (except for interceptors) and -may be overwritten before being passed to `rd_kafka_new()`. -`rd_kafka_new()` will verify that the passed configuration is consistent -and will fail and return an error if incompatible configuration properties -are detected. It will also emit log warnings for deprecated and problematic -configuration properties. - - -### Termination - -librdkafka is asynchronous in its nature and performs most operation in its -background threads. - -Calling the librdkafka handle destructor tells the librdkafka background -threads to finalize their work, close network connections, clean up, etc, and -may thus take some time. The destructor (`rd_kafka_destroy()`) will block -until all background threads have terminated. - -If the destructor blocks indefinitely it typically means there is an outstanding -object reference, such as a message or topic object, that was not destroyed -prior to destroying the client handle. - -All objects except for the handle (C: `rd_kafka_t`, -C++: `Consumer,KafkaConsumer,Producer`), such as topic objects, messages, -`topic_partition_t`, `TopicPartition`, events, etc, **MUST** be -destroyed/deleted prior to destroying or closing the handle. - -For C, make sure the following objects are destroyed prior to calling -`rd_kafka_consumer_close()` and `rd_kafka_destroy()`: - * `rd_kafka_message_t` - * `rd_kafka_topic_t` - * `rd_kafka_topic_partition_t` - * `rd_kafka_topic_partition_list_t` - * `rd_kafka_event_t` - * `rd_kafka_queue_t` - -For C++ make sure the following objects are deleted prior to -calling `KafkaConsumer::close()` and delete on the Consumer, KafkaConsumer or -Producer handle: - * `Message` - * `Topic` - * `TopicPartition` - * `Event` - * `Queue` - - -#### High-level KafkaConsumer - -Proper termination sequence for the high-level KafkaConsumer is: -```c - /* 1) Leave the consumer group, commit final offsets, etc. */ - rd_kafka_consumer_close(rk); - - /* 2) Destroy handle object */ - rd_kafka_destroy(rk); -``` - -**NOTE**: There is no need to unsubscribe prior to calling `rd_kafka_consumer_close()`. - -**NOTE**: Any topic objects created must be destroyed prior to rd_kafka_destroy() - -Effects of not doing the above, for: - 1. Final offsets are not committed and the consumer will not actively leave - the group, it will be kicked out of the group after the `session.timeout.ms` - expires. It is okay to omit the `rd_kafka_consumer_close()` call in case - the application does not want to wait for the blocking close call. - 2. librdkafka will continue to operate on the handle. Actual memory leaks. - - -#### Producer - -The proper termination sequence for Producers is: - -```c - /* 1) Make sure all outstanding requests are transmitted and handled. */ - rd_kafka_flush(rk, 60*1000); /* One minute timeout */ - - /* 2) Destroy the topic and handle objects */ - rd_kafka_topic_destroy(rkt); /* Repeat for all topic objects held */ - rd_kafka_destroy(rk); -``` - -Effects of not doing the above, for: - 1. Messages in-queue or in-flight will be dropped. - 2. librdkafka will continue to operate on the handle. Actual memory leaks. - - -#### Admin API client - -Unlike the Java Admin client, the Admin APIs in librdkafka are available -on any type of client instance and can be used in combination with the -client type's main functionality, e.g., it is perfectly fine to call -`CreateTopics()` in your running producer, or `DeleteRecords()` in your -consumer. - -If you need a client instance to only perform Admin API operations the -recommendation is to create a producer instance since it requires less -configuration (no `group.id`) than the consumer and is generally more cost -efficient. -We do recommend that you set `allow.auto.create.topics=false` to avoid -topic metadata lookups to unexpectedly have the broker create topics. - - - -#### Speeding up termination -To speed up the termination of librdkafka an application can set a -termination signal that will be used internally by librdkafka to quickly -cancel any outstanding I/O waits. -Make sure you block this signal in your application. - -```c - char tmp[16]; - snprintf(tmp, sizeof(tmp), "%i", SIGIO); /* Or whatever signal you decide */ - rd_kafka_conf_set(rk_conf, "internal.termination.signal", tmp, errstr, sizeof(errstr)); -``` - - -### Threads and callbacks - -librdkafka uses multiple threads internally to fully utilize modern hardware. -The API is completely thread-safe and the calling application may call any -of the API functions from any of its own threads at any time. - -A poll-based API is used to provide signaling back to the application, -the application should call rd_kafka_poll() at regular intervals. -The poll API will call the following configured callbacks (optional): - - * `dr_msg_cb` - Message delivery report callback - signals that a message has - been delivered or failed delivery, allowing the application to take action - and to release any application resources used in the message. - * `error_cb` - Error callback - signals an error. These errors are usually of - an informational nature, i.e., failure to connect to a broker, and the - application usually does not need to take any action. - The type of error is passed as a rd_kafka_resp_err_t enum value, - including both remote broker errors as well as local failures. - An application typically does not have to perform any action when - an error is raised through the error callback, the client will - automatically try to recover from all errors, given that the - client and cluster is correctly configured. - In some specific cases a fatal error may occur which will render - the client more or less inoperable for further use: - if the error code in the error callback is set to - `RD_KAFKA_RESP_ERR__FATAL` the application should retrieve the - underlying fatal error and reason using the `rd_kafka_fatal_error()` call, - and then begin terminating the instance. - The Event API's EVENT_ERROR has a `rd_kafka_event_error_is_fatal()` - function, and the C++ EventCb has a `fatal()` method, to help the - application determine if an error is fatal or not. - * `stats_cb` - Statistics callback - triggered if `statistics.interval.ms` - is configured to a non-zero value, emitting metrics and internal state - in JSON format, see [STATISTICS.md]. - * `throttle_cb` - Throttle callback - triggered whenever a broker has - throttled (delayed) a request. - -These callbacks will also be triggered by `rd_kafka_flush()`, -`rd_kafka_consumer_poll()`, and any other functions that serve queues. - - -Optional callbacks not triggered by poll, these may be called spontaneously -from any thread at any time: - - * `log_cb` - Logging callback - allows the application to output log messages - generated by librdkafka. - * `partitioner` - Partitioner callback - application provided message partitioner. - The partitioner may be called in any thread at any time, it may be - called multiple times for the same key. - Partitioner function contraints: - - MUST NOT call any rd_kafka_*() functions - - MUST NOT block or execute for prolonged periods of time. - - MUST return a value between 0 and partition_cnt-1, or the - special RD_KAFKA_PARTITION_UA value if partitioning - could not be performed. - - - -### Brokers - -On initialization, librdkafka only needs a partial list of -brokers (at least one), called the bootstrap brokers. -The client will connect to the bootstrap brokers specified by the -`bootstrap.servers` configuration property and query cluster Metadata -information which contains the full list of brokers, topic, partitions and their -leaders in the Kafka cluster. - -Broker names are specified as `host[:port]` where the port is optional -(default 9092) and the host is either a resolvable hostname or an IPv4 or IPv6 -address. -If host resolves to multiple addresses librdkafka will round-robin the -addresses for each connection attempt. -A DNS record containing all broker address can thus be used to provide a -reliable bootstrap broker. - - -#### SSL - -If the client is to connect to a broker's SSL endpoints/listeners the client -needs to be configured with `security.protocol=SSL` for just SSL transport or -`security.protocol=SASL_SSL` for SASL authentication and SSL transport. -The client will try to verify the broker's certificate by checking the -CA root certificates, if the broker's certificate can't be verified -the connection is closed (and retried). This is to protect the client -from connecting to rogue brokers. - -The CA root certificate defaults are system specific: - * On Linux, Mac OSX, and other Unix-like system the OpenSSL default - CA path will be used, also called the OPENSSLDIR, which is typically - `/etc/ssl/certs` (on Linux, typcially in the `ca-certificates` package) and - `/usr/local/etc/openssl` on Mac OSX (Homebrew). - * On Windows the Root certificate store is used, unless - `ssl.ca.certificate.stores` is configured in which case certificates are - read from the specified stores. - * If OpenSSL is linked statically, librdkafka will set the default CA - location to the first of a series of probed paths (see below). - -If the system-provided default CA root certificates are not sufficient to -verify the broker's certificate, such as when a self-signed certificate -or a local CA authority is used, the CA certificate must be specified -explicitly so that the client can find it. -This can be done either by providing a PEM file (e.g., `cacert.pem`) -as the `ssl.ca.location` configuration property, or by passing an in-memory -PEM, X.509/DER or PKCS#12 certificate to `rd_kafka_conf_set_ssl_cert()`. - -It is also possible to disable broker certificate verification completely -by setting `enable.ssl.certificate.verification=false`, but this is not -recommended since it allows for rogue brokers and man-in-the-middle attacks, -and should only be used for testing and troubleshooting purposes. - -CA location probe paths (see [rdkafka_ssl.c](src/rdkafka_ssl.c) for full list) -used when OpenSSL is statically linked: - - "/etc/pki/tls/certs/ca-bundle.crt", - "/etc/ssl/certs/ca-bundle.crt", - "/etc/pki/tls/certs/ca-bundle.trust.crt", - "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", - "/etc/ssl/ca-bundle.pem", - "/etc/pki/tls/cacert.pem", - "/etc/ssl/cert.pem", - "/etc/ssl/cacert.pem", - "/etc/certs/ca-certificates.crt", - "/etc/ssl/certs/ca-certificates.crt", - "/etc/ssl/certs", - "/usr/local/etc/ssl/cert.pem", - "/usr/local/etc/ssl/cacert.pem", - "/usr/local/etc/ssl/certs/cert.pem", - "/usr/local/etc/ssl/certs/cacert.pem", - etc.. - - -On **Windows** the Root certificate store is read by default, but any number -of certificate stores can be read by setting the `ssl.ca.certificate.stores` -configuration property to a comma-separated list of certificate store names. -The predefined system store names are: - - * `MY` - User certificates - * `Root` - System CA certificates (default) - * `CA` - Intermediate CA certificates - * `Trust` - Trusted publishers - -For example, to read both intermediate and root CAs, set -`ssl.ca.certificate.stores=CA,Root`. - - - -#### Sparse connections - -The client will only connect to brokers it needs to communicate with, and -only when necessary. - -Examples of needed broker connections are: - - * leaders for partitions being consumed from - * leaders for partitions being produced to - * consumer group coordinator broker - * cluster controller for Admin API operations - - -##### Random broker selection - -When there is no broker connection and a connection to any broker -is needed, such as on startup to retrieve metadata, the client randomly selects -a broker from its list of brokers, which includes both the configured bootstrap -brokers (including brokers manually added with `rd_kafka_brokers_add()`), as -well as the brokers discovered from cluster metadata. -Brokers with no prior connection attempt are tried first. - -If there is already an available broker connection to any broker it is used, -rather than connecting to a new one. - -The random broker selection and connection scheduling is triggered when: - * bootstrap servers are configured (`rd_kafka_new()`) - * brokers are manually added (`rd_kafka_brokers_add()`). - * a consumer group coordinator needs to be found. - * acquiring a ProducerID for the Idempotent Producer. - * cluster or topic metadata is being refreshed. - -A single connection attempt will be performed, and the broker will -return to an idle INIT state on failure to connect. - -The random broker selection is rate-limited to: -10 < `reconnect.backoff.ms`/2 < 1000 milliseconds. - -**Note**: The broker connection will be maintained until it is closed - by the broker (idle connection reaper). - -##### Persistent broker connections - -While the random broker selection is useful for one-off queries, there -is need for the client to maintain persistent connections to certain brokers: - * Consumer: the group coordinator. - * Consumer: partition leader for topics being fetched from. - * Producer: partition leader for topics being produced to. - -These dependencies are discovered and maintained automatically, marking -matching brokers as persistent, which will make the client maintain connections -to these brokers at all times, reconnecting as necessary. - - -#### Connection close - -A broker connection may be closed by the broker, intermediary network gear, -due to network errors, timeouts, etc. -When a broker connection is closed, librdkafka will back off the next reconnect -attempt (to the given broker) for `reconnect.backoff.ms` -25% to +50% jitter, -this value is increased exponentially for each connect attempt until -`reconnect.backoff.max.ms` is reached, at which time the value is reset -to `reconnect.backoff.ms`. - -The broker will disconnect clients that have not sent any protocol requests -within `connections.max.idle.ms` (broker configuration propertion, defaults -to 10 minutes), but there is no fool proof way for the client to know that it -was a deliberate close by the broker and not an error. To avoid logging these -deliberate idle disconnects as errors the client employs some logic to try to -classify a disconnect as an idle disconnect if no requests have been sent in -the last `socket.timeout.ms` or there are no outstanding, or -queued, requests waiting to be sent. In this case the standard "Disconnect" -error log is silenced (will only be seen with debug enabled). - -Otherwise, if a connection is closed while there are requests in-flight -the logging level will be LOG_WARNING (4), else LOG_INFO (6). - -`log.connection.close=false` may be used to silence all disconnect logs, -but it is recommended to instead rely on the above heuristics. - - -#### Fetch From Follower - -librdkafka supports consuming messages from follower replicas -([KIP-392](https://cwiki.apache.org/confluence/display/KAFKA/KIP-392%3A+Allow+consumers+to+fetch+from+closest+replica)). -This is enabled by setting the `client.rack` configuration property which -corresponds to `broker.rack` on the broker. The actual assignment of -consumers to replicas is determined by the configured `replica.selector.class` -on the broker. - - -### Logging - -#### Debug contexts - -Extensive debugging of librdkafka can be enabled by setting the -`debug` configuration property to a CSV string of debug contexts: - -Debug context | Type | Description ---------------|----------|---------------------- -generic | * | General client instance level debugging. Includes initialization and termination debugging. -broker | * | Broker and connection state debugging. -topic | * | Topic and partition state debugging. Includes leader changes. -metadata | * | Cluster and topic metadata retrieval debugging. -feature | * | Kafka protocol feature support as negotiated with the broker. -queue | producer | Message queue debugging. -msg | * | Message debugging. Includes information about batching, compression, sizes, etc. -protocol | * | Kafka protocol request/response debugging. Includes latency (rtt) printouts. -cgrp | consumer | Low-level consumer group state debugging. -security | * | Security and authentication debugging. -fetch | consumer | Consumer message fetch debugging. Includes decision when and why messages are fetched. -interceptor | * | Interceptor interface debugging. -plugin | * | Plugin loading debugging. -consumer | consumer | High-level consumer debugging. -admin | admin | Admin API debugging. -eos | producer | Idempotent Producer debugging. -mock | * | Mock cluster functionality debugging. -assignor | consumer | Detailed consumer group partition assignor debugging. -conf | * | Display set configuration properties on startup. -all | * | All of the above. - - -Suggested debugging settings for troubleshooting: - -Problem space | Type | Debug setting ------------------------|----------|------------------- -Producer not delivering messages to broker | producer | `broker,topic,msg` -Consumer not fetching messages | consumer | Start with `consumer`, or use `cgrp,fetch` for detailed information. -Consumer starts reading at unexpected offset | consumer | `consumer` or `cgrp,fetch` -Authentication or connectivity issues | * | `broker,auth` -Protocol handling or latency | * | `broker,protocol` -Topic leader and state | * | `topic,metadata` - - - - -### Feature discovery - -Apache Kafka broker version 0.10.0 added support for the ApiVersionRequest API -which allows a client to query a broker for its range of supported API versions. - -librdkafka supports this functionality and will query each broker on connect -for this information (if `api.version.request=true`) and use it to enable or disable -various protocol features, such as MessageVersion 1 (timestamps), KafkaConsumer, etc. - -If the broker fails to respond to the ApiVersionRequest librdkafka will -assume the broker is too old to support the API and fall back to an older -broker version's API. These fallback versions are hardcoded in librdkafka -and is controlled by the `broker.version.fallback` configuration property. - - - -### Producer API - -After setting up the `rd_kafka_t` object with type `RD_KAFKA_PRODUCER` and one -or more `rd_kafka_topic_t` objects librdkafka is ready for accepting messages -to be produced and sent to brokers. - -The `rd_kafka_produce()` function takes the following arguments: - - * `rkt` - the topic to produce to, previously created with - `rd_kafka_topic_new()` - * `partition` - partition to produce to. If this is set to - `RD_KAFKA_PARTITION_UA` (UnAssigned) then the configured partitioner - function will be used to select a target partition. - * `msgflags` - 0, or one of: - * `RD_KAFKA_MSG_F_COPY` - librdkafka will immediately make a copy of - the payload. Use this when the payload is in non-persistent - memory, such as the stack. - * `RD_KAFKA_MSG_F_FREE` - let librdkafka free the payload using - `free(3)` when it is done with it. - - These two flags are mutually exclusive and neither need to be set in - which case the payload is neither copied nor freed by librdkafka. - - If `RD_KAFKA_MSG_F_COPY` flag is not set no data copying will be - performed and librdkafka will hold on the payload pointer until - the message has been delivered or fails. - The delivery report callback will be called when librdkafka is done - with the message to let the application regain ownership of the - payload memory. - The application must not free the payload in the delivery report - callback if `RD_KAFKA_MSG_F_FREE is set`. - * `payload`,`len` - the message payload - * `key`,`keylen` - an optional message key which can be used for partitioning. - It will be passed to the topic partitioner callback, if any, and - will be attached to the message when sending to the broker. - * `msg_opaque` - an optional application-provided per-message opaque pointer - that will be provided in the message delivery callback to let - the application reference a specific message. - - -`rd_kafka_produce()` is a non-blocking API, it will enqueue the message -on an internal queue and return immediately. -If the number of queued messages would exceed the `queue.buffering.max.messages` -configuration property then `rd_kafka_produce()` returns -1 and sets errno -to `ENOBUFS` and last_error to `RD_KAFKA_RESP_ERR__QUEUE_FULL`, thus -providing a backpressure mechanism. - - -`rd_kafka_producev()` provides an alternative produce API that does not -require a topic `rkt` object and also provides support for extended -message fields, such as timestamp and headers. - - -**Note**: See `examples/rdkafka_performance.c` for a producer implementation. - - -### Simple Consumer API (legacy) - -NOTE: For the high-level KafkaConsumer interface see rd_kafka_subscribe (rdkafka.h) or KafkaConsumer (rdkafkacpp.h) - -The consumer API is a bit more stateful than the producer API. -After creating `rd_kafka_t` with type `RD_KAFKA_CONSUMER` and -`rd_kafka_topic_t` instances the application must also start the consumer -for a given partition by calling `rd_kafka_consume_start()`. - -`rd_kafka_consume_start()` arguments: - - * `rkt` - the topic to start consuming from, previously created with - `rd_kafka_topic_new()`. - * `partition` - partition to consume from. - * `offset` - message offset to start consuming from. This may either be an - absolute message offset or one of the three special offsets: - `RD_KAFKA_OFFSET_BEGINNING` to start consuming from the beginning - of the partition's queue (oldest message), or - `RD_KAFKA_OFFSET_END` to start consuming at the next message to be - produced to the partition, or - `RD_KAFKA_OFFSET_STORED` to use the offset store. - -After a topic+partition consumer has been started librdkafka will attempt -to keep `queued.min.messages` messages in the local queue by repeatedly -fetching batches of messages from the broker. librdkafka will fetch all -consumed partitions for which that broker is a leader, through a single -request. - -This local message queue is then served to the application through three -different consume APIs: - - * `rd_kafka_consume()` - consumes a single message - * `rd_kafka_consume_batch()` - consumes one or more messages - * `rd_kafka_consume_callback()` - consumes all messages in the local - queue and calls a callback function for each one. - -These three APIs are listed above the ascending order of performance, -`rd_kafka_consume()` being the slowest and `rd_kafka_consume_callback()` being -the fastest. The different consume variants are provided to cater for different -application needs. - -A consumed message, as provided or returned by each of the consume functions, -is represented by the `rd_kafka_message_t` type. - -`rd_kafka_message_t` members: - - * `err` - Error signaling back to the application. If this field is non-zero - the `payload` field should be considered an error message and - `err` is an error code (`rd_kafka_resp_err_t`). - If `err` is zero then the message is a proper fetched message - and `payload` et.al contains message payload data. - * `rkt`,`partition` - Topic and partition for this message or error. - * `payload`,`len` - Message payload data or error message (err!=0). - * `key`,`key_len` - Optional message key as specified by the producer - * `offset` - Message offset - -Both the `payload` and `key` memory, as well as the message as a whole, is -owned by librdkafka and must not be used after an `rd_kafka_message_destroy()` -call. librdkafka will share the same messageset receive buffer memory for all -message payloads of that messageset to avoid excessive copying which means -that if the application decides to hang on to a single `rd_kafka_message_t` -it will hinder the backing memory to be released for all other messages -from the same messageset. - -When the application is done consuming messages from a topic+partition it -should call `rd_kafka_consume_stop()` to stop the consumer. This will also -purge any messages currently in the local queue. - - -**Note**: See `examples/rdkafka_performance.c` for a consumer implementation. - - -#### Offset management - -Broker based offset management is available for broker version >= 0.9.0 -in conjunction with using the high-level KafkaConsumer interface (see -rdkafka.h or rdkafkacpp.h) - -Offset management is also available through a deprecated local offset file, -where the offset is periodically written to a local file for each -topic+partition according to the following topic configuration properties: - - * `enable.auto.commit` - * `auto.commit.interval.ms` - * `offset.store.path` - * `offset.store.sync.interval.ms` - -The legacy `auto.commit.enable` topic configuration property is only to be used -with the legacy low-level consumer. -Use `enable.auto.commit` with the modern KafkaConsumer. - - -##### Auto offset commit - -The consumer will automatically commit offsets every `auto.commit.interval.ms` -when `enable.auto.commit` is enabled (default). - -Offsets to be committed are kept in a local in-memory offset store, -this offset store is updated by `consumer_poll()` (et.al) to -store the offset of the last message passed to the application -(per topic+partition). - -##### At-least-once processing -Since auto commits are performed in a background thread this may result in -the offset for the latest message being committed before the application has -finished processing the message. If the application was to crash or exit -prior to finishing processing, and the offset had been auto committed, -the next incarnation of the consumer application would start at the next -message, effectively missing the message that was processed when the -application crashed. -To avoid this scenario the application can disable the automatic -offset **store** by setting `enable.auto.offset.store` to false -and manually **storing** offsets after processing by calling -`rd_kafka_offsets_store()`. -This gives an application fine-grained control on when a message -is eligible for committing without having to perform the commit itself. -`enable.auto.commit` should be set to true when using manual offset storing. -The latest stored offset will be automatically committed every -`auto.commit.interval.ms`. - -**Note**: Only greater offsets are committed, e.g., if the latest committed - offset was 10 and the application performs an offsets_store() - with offset 9, that offset will not be committed. - - -##### Auto offset reset - -The consumer will by default try to acquire the last committed offsets for -each topic+partition it is assigned using its configured `group.id`. -If there is no committed offset available, or the consumer is unable to -fetch the committed offsets, the policy of `auto.offset.reset` will kick in. -This configuration property may be set to one the following values: - - * `earliest` - start consuming the earliest message of the partition. - * `latest` - start consuming the next message to be produced to the partition. - * `error` - don't start consuming but isntead raise a consumer error - with error-code `RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET` for - the topic+partition. This allows the application to decide what - to do in case there is no committed start offset. - - -### Consumer groups - -Broker based consumer groups (requires Apache Kafka broker >=0.9) are supported, -see KafkaConsumer in rdkafka.h or rdkafkacpp.h - -The following diagram visualizes the high-level balanced consumer group state -flow and synchronization between the application, librdkafka consumer, -group coordinator, and partition leader(s). - -![Consumer group state diagram](src/librdkafka_cgrp_synch.png) - - -#### Static consumer groups - -By default Kafka consumers are rebalanced each time a new consumer joins -the group or an existing member leaves. This is what is known as a dynamic -membership. Apache Kafka >= 2.3.0 introduces static membership. -Unlike dynamic membership, static members can leave and rejoin a group -within the `session.timeout.ms` without triggering a rebalance, retaining -their existing partitions assignment. - -To enable static group membership configure each consumer instance -in the group with a unique `group.instance.id`. - -Consumers with `group.instance.id` set will not send a leave group request on -close - session timeout, change of subscription, or a new group member joining -the group, are the only mechanisms that will trigger a group rebalance for -static consumer groups. - -If a new consumer joins the group with same `group.instance.id` as an -existing consumer, the existing consumer will be fenced and raise a fatal error. -The fatal error is propagated as a consumer error with error code -`RD_KAFKA_RESP_ERR__FATAL`, use `rd_kafka_fatal_error()` to retrieve -the original fatal error code and reason. - -To read more about static group membership, see [KIP-345](https://cwiki.apache.org/confluence/display/KAFKA/KIP-345%3A+Introduce+static+membership+protocol+to+reduce+consumer+rebalances). - - -### Topics - -#### Unknown or unauthorized topics - -If a consumer application subscribes to non-existent or unauthorized topics -a consumer error will be propagated for each unavailable topic with the -error code set to either `RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART` or a -broker-specific error code, such as -`RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED`. - -As the topic metadata is refreshed every `topic.metadata.refresh.interval.ms` -the unavailable topics are re-checked for availability, but the same error -will not be raised again for the same topic. - -If a consumer has Describe (ACL) permissions for a topic but not Read it will -be able to join a consumer group and start consuming the topic, but the Fetch -requests to retrieve messages from the broker will fail with -`RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED`. -This error will be raised to the application once per partition and -assign()/seek() and the fetcher will back off the next fetch 10 times longer than -the `fetch.error.backoff.ms` (but at least 1 second). -It is recommended that the application takes appropriate action when this -occurs, for instance adjusting its subscription or assignment to exclude the -unauthorized topic. - - -#### Topic metadata propagation for newly created topics - -Due to the asynchronous nature of topic creation in Apache Kafka it may -take some time for a newly created topic to be known by all brokers in the -cluster. -If a client tries to use a topic after topic creation but before the topic -has been fully propagated in the cluster it will seem as if the topic does not -exist which would raise `RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC` (et.al) -errors to the application. -To avoid these temporary errors being raised, the client will not flag -a topic as non-existent until a propagation time has elapsed, this propagation -defaults to 30 seconds and can be configured with -`topic.metadata.propagation.max.ms`. -The per-topic max propagation time starts ticking as soon as the topic is -referenced (e.g., by produce()). - -If messages are produced to unknown topics during the propagation time, the -messages will be queued for later delivery to the broker when the topic -metadata has propagated. -Should the topic propagation time expire without the topic being seen the -produced messages will fail with `RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC`. - -**Note**: The propagation time will not take affect if a topic is known to - the client and then deleted, in this case the topic will immediately - be marked as non-existent and remain non-existent until a topic - metadata refresh sees the topic again (after the topic has been - re-created). - - -#### Topic auto creation - -Topic auto creation is supported by librdkafka, if a non-existent topic is -referenced by the client (by produce to, or consuming from, the topic, etc) -the broker will automatically create the topic (with default partition counts -and replication factor) if the broker configuration property -`auto.create.topics.enable=true` is set. - -*Note*: A topic that is undergoing automatic creation may be reported as -unavailable, with e.g., `RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART`, during the -time the topic is being created and partition leaders are elected. - -While topic auto creation may be useful for producer applications, it is not -particularily valuable for consumer applications since even if the topic -to consume is auto created there is nothing writing messages to the topic. -To avoid consumers automatically creating topics the -`allow.auto.create.topics` consumer configuration property is set to -`false` by default, preventing the consumer to trigger automatic topic -creation on the broker. This requires broker version v0.11.0.0 or later. -The `allow.auto.create.topics` property may be set to `true` to allow -auto topic creation, which also requires `auto.create.topics.enable=true` to -be configured on the broker. - - - -### Metadata - -#### < 0.9.3 -Previous to the 0.9.3 release librdkafka's metadata handling -was chatty and excessive, which usually isn't a problem in small -to medium-sized clusters, but in large clusters with a large amount -of librdkafka clients the metadata requests could hog broker CPU and bandwidth. - -#### > 0.9.3 - -The remaining Metadata sections describe the current behaviour. - -**Note:** "Known topics" in the following section means topics for - locally created `rd_kafka_topic_t` objects. - - -#### Query reasons - -There are four reasons to query metadata: - - * brokers - update/populate cluster broker list, so the client can - find and connect to any new brokers added. - - * specific topic - find leader or partition count for specific topic - - * known topics - same, but for all locally known topics. - - * all topics - get topic names for consumer group wildcard subscription - matching - -The above list is sorted so that the sub-sequent entries contain the -information above, e.g., 'known topics' contains enough information to -also satisfy 'specific topic' and 'brokers'. - - -#### Caching strategy - -The prevalent cache timeout is `metadata.max.age.ms`, any cached entry -will remain authoritative for this long or until a relevant broker error -is returned. - - - * brokers - eternally cached, the broker list is additative. - - * topics - cached for `metadata.max.age.ms` - - - -### Fatal errors - -If an unrecoverable error occurs, a fatal error is triggered in one -or more of the follow ways depending on what APIs the application is utilizing: - - * C: the `error_cb` is triggered with error code `RD_KAFKA_RESP_ERR__FATAL`, - the application should call `rd_kafka_fatal_error()` to retrieve the - underlying fatal error code and error string. - * C: an `RD_KAFKA_EVENT_ERROR` event is triggered and - `rd_kafka_event_error_is_fatal()` returns true: the fatal error code - and string are available through `rd_kafka_event_error()`, and `.._string()`. - * C and C++: any API call may return `RD_KAFKA_RESP_ERR__FATAL`, use - `rd_kafka_fatal_error()` to retrieve the underlying fatal error code - and error string. - * C++: an `EVENT_ERROR` event is triggered and `event.fatal()` returns true: - the fatal error code and string are available through `event.err()` and - `event.str()`. - - -An application may call `rd_kafka_fatal_error()` at any time to check if -a fatal error has been raised. - - -#### Fatal producer errors - -The idempotent producer guarantees of ordering and no duplicates also -requires a way for the client to fail gracefully when these guarantees -can't be satisfied. - -If a fatal error has been raised, sub-sequent use of the following API calls -will fail: - - * `rd_kafka_produce()` - * `rd_kafka_producev()` - * `rd_kafka_produce_batch()` - -The underlying fatal error code will be returned, depending on the error -reporting scheme for each of those APIs. - - -When a fatal error has occurred the application should call `rd_kafka_flush()` -to wait for all outstanding and queued messages to drain before terminating -the application. -`rd_kafka_purge(RD_KAFKA_PURGE_F_QUEUE)` is automatically called by the client -when a producer fatal error has occurred, messages in-flight are not purged -automatically to allow waiting for the proper acknowledgement from the broker. -The purged messages in queue will fail with error code set to -`RD_KAFKA_RESP_ERR__PURGE_QUEUE`. - - -#### Fatal consumer errors - -A consumer configured for static group membership (`group.instance.id`) may -raise a fatal error if a new consumer instance is started with the same -instance id, causing the existing consumer to be fenced by the new consumer. - -This fatal error is propagated on the fenced existing consumer in multiple ways: - * `error_cb` (if configured) is triggered. - * `rd_kafka_consumer_poll()` (et.al) will return a message object - with the `err` field set to `RD_KAFKA_ERR__FATAL`. - * any sub-sequent calls to state-changing consumer calls will - return `RD_KAFKA_ERR___FATAL`. - This includes `rd_kafka_subscribe()`, `rd_kafka_assign()`, - `rd_kafka_consumer_close()`, `rd_kafka_commit*()`, etc. - -The consumer will automatically stop consuming when a fatal error has occurred -and no further subscription, assignment, consumption or offset committing -will be possible. At this point the application should simply destroy the -consumer instance and terminate the application since it has been replaced -by a newer instance. - - -## Compatibility - -### Broker version compatibility - -librdkafka supports all released Apache Kafka broker versions since 0.8.0.0.0, -but not all features may be available on all broker versions since some -features rely on newer broker functionality. - -**Current defaults:** - * `api.version.request=true` - * `broker.version.fallback=0.10.0` - * `api.version.fallback.ms=0` (never revert to `broker.version.fallback`) - -Depending on what broker version you are using, please configure your -librdkafka based client as follows: - -#### Broker version >= 0.10.0.0 (or trunk) - -For librdkafka >= v1.0.0 there is no need to set any api.version-related -configuration parameters, the defaults are tailored for broker version 0.10.0.0 -or later. - -For librdkafka < v1.0.0, please specify: -``` -api.version.request=true -api.version.fallback.ms=0 -``` - - -#### Broker versions 0.9.0.x - -``` -api.version.request=false -broker.version.fallback=0.9.0.x (the exact 0.9.0.. version you are using) -``` - -#### Broker versions 0.8.x.y - -``` -api.version.request=false -broker.version.fallback=0.8.x.y (your exact 0.8... broker version) -``` - -#### Detailed description - -Apache Kafka version 0.10.0.0 added support for -[KIP-35](https://cwiki.apache.org/confluence/display/KAFKA/KIP-35+-+Retrieving+protocol+version) - -querying the broker for supported API request types and versions - -allowing the client to figure out what features it can use. -But for older broker versions there is no way for the client to reliably know -what protocol features the broker supports. - -To alleviate this situation librdkafka has three configuration properties: - * `api.version.request=true|false` - enables the API version request, - this requires a >= 0.10.0.0 broker and will cause a disconnect on - brokers 0.8.x - this disconnect is recognized by librdkafka and on the next - connection attempt (which is immediate) it will disable the API version - request and use `broker.version.fallback` as a basis of available features. - **NOTE**: Due to a bug in broker version 0.9.0.0 & 0.9.0.1 the broker will - not close the connection when receiving the API version request, instead - the request will time out in librdkafka after 10 seconds and it will fall - back to `broker.version.fallback` on the next immediate connection attempt. - * `broker.version.fallback=X.Y.Z.N` - if the API version request fails - (if `api.version.request=true`) or API version requests are disabled - (`api.version.request=false`) then this tells librdkafka what version the - broker is running and adapts its feature set accordingly. - * `api.version.fallback.ms=MS` - In the case where `api.version.request=true` - and the API version request fails, this property dictates for how long - librdkafka will use `broker.version.fallback` instead of - `api.version.request=true`. After `MS` has passed the API version request - will be sent on any new connections made for the broker in question. - This allows upgrading the Kafka broker to a new version with extended - feature set without needing to restart or reconfigure the client - (given that `api.version.request=true`). - -*Note: These properties applies per broker.* - -The API version query was disabled by default (`api.version.request=false`) in -librdkafka up to and including v0.9.5 due to the afforementioned bug in -broker version 0.9.0.0 & 0.9.0.1, but was changed to `true` in -librdkafka v0.11.0. - - -### Supported KIPs - -The [Apache Kafka Implementation Proposals (KIPs)](https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals) supported by librdkafka. - - -| KIP | Kafka release | Status | -|--------------------------------------------------------------------------|-----------------------------|-----------------------------------------------------------------------------------------------| -| KIP-1 - Stop accepting request.required.acks > 1 | 0.9.0.0 | Not enforced on client (due to backwards compat with brokers <0.8.3) | -| KIP-4 - Metadata protocol changes | 0.9.0.0, 0.10.0.0, 0.10.1.0 | Supported | -| KIP-8 - Producer flush() | 0.9.0.0 | Supported | -| KIP-12 - SASL Kerberos | 0.9.0.0 | Supported (uses SSPI/logged-on-user on Windows, full KRB5 keytabs on Unix) | -| KIP-13 - Protocol request throttling (enforced on broker) | 0.9.0.0 | Supported | -| KIP-15 - Producer close with timeout | 0.9.0.0 | Supported (through flush() + destroy()) | -| KIP-19 - Request timeouts | 0.9.0.0 | Supported | -| KIP-22 - Producer pluggable partitioner | 0.9.0.0 | Supported (not supported by Go, .NET and Python) | -| KIP-31 - Relative offsets in messagesets | 0.10.0.0 | Supported | -| KIP-35 - ApiVersionRequest | 0.10.0.0 | Supported | -| KIP-40 - ListGroups and DescribeGroups | 0.9.0.0 | Supported | -| KIP-41 - max.poll.records | 0.10.0.0 | Supported through batch consumption interface (not supported by .NET and Go) | -| KIP-42 - Producer and Consumer interceptors | 0.10.0.0 | Supported (not supported by Go, .NET and Python) | -| KIP-43 - SASL PLAIN and handshake | 0.10.0.0 | Supported | -| KIP-48 - Delegation tokens | 1.1.0 | Not supported | -| KIP-54 - Sticky partition assignment strategy | 0.11.0.0 | Supported but not available, use KIP-429 instead. | -| KIP-57 - Interoperable LZ4 framing | 0.10.0.0 | Supported | -| KIP-62 - max.poll.interval and background heartbeats | 0.10.1.0 | Supported | -| KIP-70 - Proper client rebalance event on unsubscribe/subscribe | 0.10.1.0 | Supported | -| KIP-74 - max.partition.fetch.bytes | 0.10.1.0 | Supported | -| KIP-78 - Retrieve Cluster Id | 0.10.1.0 | Supported (not supported by .NET) | -| KIP-79 - OffsetsForTimes | 0.10.1.0 | Supported | -| KIP-81 - Consumer pre-fetch buffer size | 2.4.0 (WIP) | Supported | -| KIP-82 - Record Headers | 0.11.0.0 | Supported | -| KIP-84 - SASL SCRAM | 0.10.2.0 | Supported | -| KIP-85 - SASL config properties | 0.10.2.0 | Supported | -| KIP-86 - Configurable SASL callbacks | 2.0.0 | Not supported | -| KIP-88 - AdminAPI: ListGroupOffsets | 0.10.2.0 | Not supported | -| KIP-91 - Intuitive timeouts in Producer | 2.1.0 | Supported | -| KIP-92 - Per-partition lag metrics in Consumer | 0.10.2.0 | Supported | -| KIP-97 - Backwards compatibility with older brokers | 0.10.2.0 | Supported | -| KIP-98 - EOS | 0.11.0.0 | Supported | -| KIP-102 - Close with timeout in consumer | 0.10.2.0 | Not supported | -| KIP-107 - AdminAPI: DeleteRecordsBefore | 0.11.0.0 | Supported | -| KIP-110 - ZStd compression | 2.1.0 | Supported | -| KIP-117 - AdminClient | 0.11.0.0 | Supported | -| KIP-124 - Request rate quotas | 0.11.0.0 | Partially supported (depending on protocol request) | -| KIP-126 - Producer ensure proper batch size after compression | 0.11.0.0 | Supported | -| KIP-133 - AdminAPI: DescribeConfigs and AlterConfigs | 0.11.0.0 | Supported | -| KIP-140 - AdminAPI: ACLs | 0.11.0.0 | Not supported | -| KIP-144 - Broker reconnect backoff | 0.11.0.0 | Supported | -| KIP-152 - Improved SASL auth error messages | 1.0.0 | Supported | -| KIP-192 - Cleaner idempotence semantics | 1.0.0 | Not supported | -| KIP-195 - AdminAPI: CreatePartitions | 1.0.0 | Supported | -| KIP-204 - AdminAPI: DeleteRecords | 1.1.0 | Supported | -| KIP-219 - Client-side throttling | 2.0.0 | Not supported | -| KIP-222 - AdminAPI: Consumer group operations | 2.0.0 | Not supported (but some APIs available outside Admin client) | -| KIP-223 - Consumer partition lead metric | 2.0.0 | Not supported | -| KIP-226 - AdminAPI: Dynamic broker config | 1.1.0 | Supported | -| KIP-227 - Consumer Incremental Fetch | 1.1.0 | Not supported | -| KIP-229 - AdminAPI: DeleteGroups | 1.1.0 | Supported | -| KIP-235 - DNS alias for secure connections | 2.1.0 | Not supported | -| KIP-249 - AdminAPI: Deletegation Tokens | 2.0.0 | Not supported | -| KIP-255 - SASL OAUTHBEARER | 2.0.0 | Supported | -| KIP-266 - Fix indefinite consumer timeouts | 2.0.0 | Supported (bound by session.timeout.ms and max.poll.interval.ms) | -| KIP-289 - Consumer group.id default to NULL | 2.2.0 | Supported | -| KIP-294 - SSL endpoint verification | 2.0.0 | Supported | -| KIP-302 - Use all addresses for resolved broker hostname | 2.1.0 | Supported | -| KIP-320 - Consumer: handle log truncation | 2.1.0, 2.2.0 | Not supported | -| KIP-322 - DeleteTopics disabled error code | 2.1.0 | Supported | -| KIP-339 - AdminAPI: incrementalAlterConfigs | 2.3.0 | Not supported | -| KIP-341 - Update Sticky partition assignment data | 2.3.0 | Not supported (superceeded by KIP-429) | -| KIP-342 - Custom SASL OAUTHBEARER extensions | 2.1.0 | Supported | -| KIP-345 - Consumer: Static membership | 2.4.0 | Supported | -| KIP-357 - AdminAPI: list ACLs per principal | 2.1.0 | Not supported | -| KIP-359 - Producer: use EpochLeaderId | 2.4.0 | Not supported | -| KIP-360 - Improve handling of unknown Idempotent Producer | 2.5.0 | Supported | -| KIP-361 - Consumer: add config to disable auto topic creation | 2.3.0 | Supported | -| KIP-368 - SASL period reauth | 2.2.0 | Not supported | -| KIP-369 - Always roundRobin partitioner | 2.4.0 | Not supported | -| KIP-389 - Consumer group max size | 2.2.0 | Supported (error is propagated to application, but the consumer does not raise a fatal error) | -| KIP-392 - Allow consumers to fetch from closest replica | 2.4.0 | Supported | -| KIP-394 - Consumer: require member.id in JoinGroupRequest | 2.2.0 | Supported | -| KIP-396 - AdminAPI: commit/list offsets | 2.4.0 | Not supported (but some APIs available outside Admin client) | -| KIP-412 - AdminAPI: adjust log levels | 2.4.0 | Not supported | -| KIP-421 - Variables in client config files | 2.3.0 | Not applicable (librdkafka, et.al, does not provide a config file interface, and shouldn't) | -| KIP-429 - Consumer: incremental rebalance protocol | 2.4.0 | Supported | -| KIP-430 - AdminAPI: return authorized operations in Describe.. responses | 2.3.0 | Not supported | -| KIP-436 - Start time in stats | 2.3.0 | Supported | -| KIP-447 - Producer scalability for EOS | 2.5.0 | Supported | -| KIP-455 - AdminAPI: Replica assignment | 2.4.0 (WIP) | Not supported | -| KIP-460 - AdminAPI: electPreferredLeader | 2.4.0 | Not supported | -| KIP-464 - AdminAPI: defaults for createTopics | 2.4.0 | Supported | -| KIP-467 - Per-message (sort of) error codes in ProduceResponse | 2.4.0 (WIP) | Not supported | -| KIP-480 - Sticky partitioner | 2.4.0 | Not supported | -| KIP-482 - Optional fields in Kafka protocol | 2.4.0 | Partially supported (ApiVersionRequest) | -| KIP-496 - AdminAPI: delete offsets | 2.4.0 | Supported | -| KIP-511 - Collect Client's Name and Version | 2.4.0 | Supported | -| KIP-514 - Bounded flush() | 2.4.0 | Supported | -| KIP-517 - Consumer poll() metrics | 2.4.0 | Not supported | -| KIP-518 - Allow listing consumer groups per state | 2.6.0 | Not supported | -| KIP-519 - Make SSL engine configurable | 2.6.0 | Not supported | -| KIP-525 - Return topic metadata and configs in CreateTopics response | 2.4.0 | Not supported | -| KIP-526 - Reduce Producer Metadata Lookups for Large Number of Topics | 2.5.0 | Not supported | -| KIP-533 - Add default API timeout to AdminClient | 2.5.0 | Not supported | -| KIP-546 - Add Client Quota APIs to AdminClient | 2.6.0 | Not supported | -| KIP-559 - Make the Kafka Protocol Friendlier with L7 Proxies | 2.5.0 | Not supported | -| KIP-568 - Explicit rebalance triggering on the Consumer | 2.6.0 | Not supported | -| KIP-659 - Add metadata to DescribeConfigsResponse | 2.6.0 | Not supported | -| KIP-580 - Exponential backoff for Kafka clients | WIP | Partially supported | -| KIP-584 - Versioning scheme for features | WIP | Not supported | -| KIP-588 - Allow producers to recover gracefully from txn timeouts | 2.8.0 (WIP) | Not supported | -| KIP-602 - Use all resolved addresses by default | 2.6.0 | Supported | -| KIP-651 - Support PEM format for SSL certs and keys | 2.7.0 | Supported | -| KIP-654 - Aborted txns with non-flushed msgs should not be fatal | 2.7.0 | Supported | -| KIP-735 - Increase default consumer session timeout | TBA | Supported | - - - - -### Supported protocol versions - -"Kafka max" is the maximum ApiVersion supported in Apache Kafka 2.4.0, while -"librdkafka max" is the maximum ApiVersion supported in the latest -release of librdkafka. - - -| ApiKey | Request name | Kafka max | librdkafka max | -| ------- | ------------------- | ----------- | ----------------------- | -| 0 | Produce | 7 | 7 | -| 1 | Fetch | 11 | 11 | -| 2 | ListOffsets | 5 | 1 | -| 3 | Metadata | 8 | 2 | -| 8 | OffsetCommit | 7 | 7 | -| 9 | OffsetFetch | 5 | 1 | -| 10 | FindCoordinator | 2 | 2 | -| 11 | JoinGroup | 5 | 5 | -| 12 | Heartbeat | 3 | 3 | -| 13 | LeaveGroup | 3 | 1 | -| 14 | SyncGroup | 3 | 3 | -| 15 | DescribeGroups | 4 | 0 | -| 16 | ListGroups | 2 | 0 | -| 17 | SaslHandshake | 1 | 1 | -| 18 | ApiVersions | 3 | 3 | -| 19 | CreateTopics | 5 | 4 | -| 20 | DeleteTopics | 3 | 1 | -| 21 | DeleteRecords | 2 | 1 | -| 22 | InitProducerId | 4 | 4 | -| 24 | AddPartitionsToTxn | 1 | 0 | -| 25 | AddOffsetsToTxn | 1 | 0 | -| 26 | EndTxn | 1 | 1 | -| 28 | TxnOffsetCommit | 2 | 0 | -| 32 | DescribeConfigs | 2 | 1 | -| 33 | AlterConfigs | 1 | 0 | -| 36 | SaslAuthenticate | 1 | 0 | -| 37 | CreatePartitions | 1 | 0 | -| 42 | DeleteGroups | 2 | 1 | -| 47 | OffsetDelete | 0 | 0 | - - - -# Recommendations for language binding developers - -These recommendations are targeted for developers that wrap librdkafka -with their high-level languages, such as confluent-kafka-go or node-rdkafka. - -## Expose the configuration interface pass-thru - -librdkafka's string-based key=value configuration property interface controls -most runtime behaviour and evolves over time. -Most features are also only configuration-based, meaning they do not require a -new API (SSL and SASL are two good examples which are purely enabled through -configuration properties) and thus no changes needed to the binding/application -code. - -If your language binding/applications allows configuration properties to be set -in a pass-through fashion without any pre-checking done by your binding code it -means that a simple upgrade of the underlying librdkafka library (but not your -bindings) will provide new features to the user. - -## Error constants - -The error constants, both the official (value >= 0) errors as well as the -internal (value < 0) errors, evolve constantly. -To avoid hard-coding them to expose to your users, librdkafka provides an API -to extract the full list programmatically during runtime or for -code generation, see `rd_kafka_get_err_descs()`. - -## Reporting client software name and version to broker - -[KIP-511](https://cwiki.apache.org/confluence/display/KAFKA/KIP-511%3A+Collect+and+Expose+Client%27s+Name+and+Version+in+the+Brokers) introduces a means for a -Kafka client to report its implementation name and version to the broker, the -broker then exposes this as metrics (e.g., through JMX) to help Kafka operators -troubleshoot problematic clients, understand the impact of broker and client -upgrades, etc. -This requires broker version 2.4.0 or later (metrics added in 2.5.0). - -librdkafka will send its name (`librdkafka`) and version (e.g., `v1.3.0`) -upon connect to a supporting broker. -To help distinguish high-level client bindings on top of librdkafka, a client -binding should configure the following two properties: - * `client.software.name` - set to the binding name, e.g, - `confluent-kafka-go` or `node-rdkafka`. - * `client.software.version` - the version of the binding and the version - of librdkafka, e.g., `v1.3.0-librdkafka-v1.3.0` or - `1.2.0-librdkafka-v1.3.0`. - It is **highly recommended** to include the librdkafka version in this - version string. - -These configuration properties are hidden (from CONFIGURATION.md et.al.) as -they should typically not be modified by the user. - -## Documentation reuse - -You are free to reuse the librdkafka API and CONFIGURATION documentation in -your project, but please do return any documentation improvements back to -librdkafka (file a github pull request). - -## Community support - -You are welcome to direct your users to -[librdkafka's Gitter chat room](http://gitter.im/edenhill/librdkafka) as long as -you monitor the conversions in there to pick up questions specific to your -bindings. -But for the most part user questions are usually generic enough to apply to all -librdkafka bindings. diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/LICENSE b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/LICENSE deleted file mode 100755 index 193ffaae..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -librdkafka - Apache Kafka C driver library - -Copyright (c) 2012-2020, Magnus Edenhill -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/LICENSES.txt b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/LICENSES.txt deleted file mode 100755 index f2aa57d0..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/LICENSES.txt +++ /dev/null @@ -1,366 +0,0 @@ -LICENSE --------------------------------------------------------------- -librdkafka - Apache Kafka C driver library - -Copyright (c) 2012-2020, Magnus Edenhill -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - - -LICENSE.crc32c --------------------------------------------------------------- -# For src/crc32c.c copied (with modifications) from -# http://stackoverflow.com/a/17646775/1821055 - -/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction - * Copyright (C) 2013 Mark Adler - * Version 1.1 1 Aug 2013 Mark Adler - */ - -/* - This software is provided 'as-is', without any express or implied - warranty. In no event will the author be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Mark Adler - madler@alumni.caltech.edu - */ - - -LICENSE.fnv1a --------------------------------------------------------------- -parts of src/rdfnv1a.c: http://www.isthe.com/chongo/src/fnv/hash_32a.c - - -Please do not copyright this code. This code is in the public domain. - -LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, -INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO -EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF -USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR -OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. - -By: - chongo /\oo/\ - http://www.isthe.com/chongo/ - -Share and Enjoy! :-) - - -LICENSE.hdrhistogram --------------------------------------------------------------- -This license covers src/rdhdrhistogram.c which is a C port of -Coda Hale's Golang HdrHistogram https://github.com/codahale/hdrhistogram -at revision 3a0bb77429bd3a61596f5e8a3172445844342120 - ------------------------------------------------------------------------------ - -The MIT License (MIT) - -Copyright (c) 2014 Coda Hale - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE - - -LICENSE.lz4 --------------------------------------------------------------- -src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3 - -LZ4 Library -Copyright (c) 2011-2016, Yann Collet -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -LICENSE.murmur2 --------------------------------------------------------------- -parts of src/rdmurmur2.c: git@github.com:abrandoned/murmur2.git - - -MurMurHash2 Library -//----------------------------------------------------------------------------- -// MurmurHash2 was written by Austin Appleby, and is placed in the public -// domain. The author hereby disclaims copyright to this source code. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -LICENSE.pycrc --------------------------------------------------------------- -The following license applies to the files rdcrc32.c and rdcrc32.h which -have been generated by the pycrc tool. -============================================================================ - -Copyright (c) 2006-2012, Thomas Pircher - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -LICENSE.queue --------------------------------------------------------------- -For sys/queue.h: - - * Copyright (c) 1991, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)queue.h 8.5 (Berkeley) 8/20/94 - * $FreeBSD$ - -LICENSE.regexp --------------------------------------------------------------- -regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684 - -" -These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution. -" - - -LICENSE.snappy --------------------------------------------------------------- -###################################################################### -# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h # -# originally retrieved from http://github.com/andikleen/snappy-c # -# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219 # -###################################################################### - -The snappy-c code is under the same license as the original snappy source - -Copyright 2011 Intel Corporation All Rights Reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Intel Corporation nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -LICENSE.tinycthread --------------------------------------------------------------- -From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9 - -License -------- - -Copyright (c) 2012 Marcus Geelnard - 2013-2014 Evan Nemerson - -This software is provided 'as-is', without any express or implied -warranty. In no event will the authors be held liable for any damages -arising from the use of this software. - -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it -freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - - 3. This notice may not be removed or altered from any source - distribution. - - -LICENSE.wingetopt --------------------------------------------------------------- -For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt - -/* - * Copyright (c) 2002 Todd C. Miller - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - * Sponsored in part by the Defense Advanced Research Projects - * Agency (DARPA) and Air Force Research Laboratory, Air Force - * Materiel Command, USAF, under agreement number F39502-99-1-0512. - */ -/*- - * Copyright (c) 2000 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Dieter Baron and Thomas Klausner. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - - diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/README.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/README.md deleted file mode 100755 index cc6200d6..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/README.md +++ /dev/null @@ -1,194 +0,0 @@ -librdkafka - the Apache Kafka C/C++ client library -================================================== - -Copyright (c) 2012-2020, [Magnus Edenhill](http://www.edenhill.se/). - -[https://github.com/edenhill/librdkafka](https://github.com/edenhill/librdkafka) - -**librdkafka** is a C library implementation of the -[Apache Kafka](https://kafka.apache.org/) protocol, providing Producer, Consumer -and Admin clients. It was designed with message delivery reliability -and high performance in mind, current figures exceed 1 million msgs/second for -the producer and 3 million msgs/second for the consumer. - -**librdkafka** is licensed under the 2-clause BSD license. - -KAFKA is a registered trademark of The Apache Software Foundation and -has been licensed for use by librdkafka. librdkafka has no -affiliation with and is not endorsed by The Apache Software Foundation. - - -# Features # - * Full Exactly-Once-Semantics (EOS) support - * High-level producer, including Idempotent and Transactional producers - * High-level balanced KafkaConsumer (requires broker >= 0.9) - * Simple (legacy) consumer - * Admin client - * Compression: snappy, gzip, lz4, zstd - * [SSL](https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka) support - * [SASL](https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka) (GSSAPI/Kerberos/SSPI, PLAIN, SCRAM, OAUTHBEARER) support - * Full list of [supported KIPs](INTRODUCTION.md#supported-kips) - * Broker version support: >=0.8 (see [Broker version compatibility](INTRODUCTION.md#broker-version-compatibility)) - * Guaranteed API stability for C & C++ APIs (ABI safety guaranteed for C) - * [Statistics](STATISTICS.md) metrics - * Debian package: librdkafka1 and librdkafka-dev in Debian and Ubuntu - * RPM package: librdkafka and librdkafka-devel - * Gentoo package: dev-libs/librdkafka - * Portable: runs on Linux, MacOS X, Windows, Solaris, FreeBSD, AIX, ... - -# Documentation - - * Public API in [C header](src/rdkafka.h) and [C++ header](src-cpp/rdkafkacpp.h). - * Introduction and manual in [INTRODUCTION.md](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md). - * Configuration properties in -[CONFIGURATION.md](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). - * Statistics metrics in [STATISTICS.md](https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md). - * [Frequently asked questions](https://github.com/edenhill/librdkafka/wiki). - -**NOTE**: The `master` branch is actively developed, use latest [release](https://github.com/edenhill/librdkafka/releases) for production use. - - -# Installation - -## Installing prebuilt packages - -On Mac OSX, install librdkafka with homebrew: - -```bash -$ brew install librdkafka -``` - -On Debian and Ubuntu, install librdkafka from the Confluent APT repositories, -see instructions [here](https://docs.confluent.io/current/installation/installing_cp/deb-ubuntu.html#get-the-software) and then install librdkafka: - - ```bash - $ apt install librdkafka-dev - ``` - -On RedHat, CentOS, Fedora, install librdkafka from the Confluent YUM repositories, -instructions [here](https://docs.confluent.io/current/installation/installing_cp/rhel-centos.html#get-the-software) and then install librdkafka: - -```bash -$ yum install librdkafka-devel -``` - -On Windows, reference [librdkafka.redist](https://www.nuget.org/packages/librdkafka.redist/) NuGet package in your Visual Studio project. - - -For other platforms, follow the source building instructions below. - - -## Installing librdkafka using vcpkg - -You can download and install librdkafka using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: - -```bash -# Install vcpkg if not already installed -$ git clone https://github.com/Microsoft/vcpkg.git -$ cd vcpkg -$ ./bootstrap-vcpkg.sh -$ ./vcpkg integrate install - -# Install librdkafka -$ vcpkg install librdkafka -``` - -The librdkafka package in vcpkg is kept up to date by Microsoft team members and community contributors. -If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. - - -## Build from source - -### Requirements - The GNU toolchain - GNU make - pthreads - zlib-dev (optional, for gzip compression support) - libssl-dev (optional, for SSL and SASL SCRAM support) - libsasl2-dev (optional, for SASL GSSAPI support) - libzstd-dev (optional, for ZStd compression support) - -**NOTE**: Static linking of ZStd (requires zstd >= 1.2.1) in the producer - enables encoding the original size in the compression frame header, - which will speed up the consumer. - Use `STATIC_LIB_libzstd=/path/to/libzstd.a ./configure --enable-static` - to enable static ZStd linking. - MacOSX example: - `STATIC_LIB_libzstd=$(brew ls -v zstd | grep libzstd.a$) ./configure --enable-static` - - -### Building - - ./configure - # Or, to automatically install dependencies using the system's package manager: - # ./configure --install-deps - # Or, build dependencies from source: - # ./configure --install-deps --source-deps-only - - make - sudo make install - - -**NOTE**: See [README.win32](README.win32) for instructions how to build - on Windows with Microsoft Visual Studio. - -**NOTE**: See [CMake instructions](packaging/cmake/README.md) for experimental - CMake build (unsupported). - - -## Usage in code - -1. Refer to the [examples directory](examples/) for code using: - -* Producers: basic producers, idempotent producers, transactional producers. -* Consumers: basic consumers, reading batches of messages. -* Performance and latency testing tools. - -2. Refer to the [examples GitHub repo](https://github.com/confluentinc/examples/tree/master/clients/cloud/c) for code connecting to a cloud streaming data service based on Apache Kafka - -3. Link your program with `-lrdkafka` (C) or `-lrdkafka++` (C++). - - -## Commercial support - -Commercial support is available from [Confluent Inc](https://www.confluent.io/) - - -## Community support - -**Only the [last official release](https://github.com/edenhill/librdkafka/releases) is supported for community members.** - -File bug reports, feature requests and questions using -[GitHub Issues](https://github.com/edenhill/librdkafka/issues) - -Questions and discussions are also welcome on the [Confluent Community slack](https://launchpass.com/confluentcommunity) #clients channel. - - -# Language bindings # - - * C#/.NET: [confluent-kafka-dotnet](https://github.com/confluentinc/confluent-kafka-dotnet) (based on [rdkafka-dotnet](https://github.com/ah-/rdkafka-dotnet)) - * C++: [cppkafka](https://github.com/mfontanini/cppkafka) - * C++: [modern-cpp-kafka](https://github.com/Morgan-Stanley/modern-cpp-kafka) - * Common Lisp: [cl-rdkafka](https://github.com/SahilKang/cl-rdkafka) - * D (C-like): [librdkafka](https://github.com/DlangApache/librdkafka/) - * D (C++-like): [librdkafkad](https://github.com/tamediadigital/librdkafka-d) - * Erlang: [erlkaf](https://github.com/silviucpp/erlkaf) - * Go: [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go) - * Haskell (kafka, conduit, avro, schema registry): [hw-kafka](https://github.com/haskell-works/hw-kafka) - * Lua: [luardkafka](https://github.com/mistsv/luardkafka) - * Node.js: [node-rdkafka](https://github.com/Blizzard/node-rdkafka) - * OCaml: [ocaml-kafka](https://github.com/didier-wenzek/ocaml-kafka) - * Perl: [Net::Kafka](https://github.com/bookingcom/perl-Net-Kafka) - * PHP: [php-rdkafka](https://github.com/arnaud-lb/php-rdkafka) - * PHP: [php-simple-kafka-client](https://github.com/php-kafka/php-simple-kafka-client) - * Python: [confluent-kafka-python](https://github.com/confluentinc/confluent-kafka-python) - * Python: [PyKafka](https://github.com/Parsely/pykafka) - * Ruby: [Hermann](https://github.com/reiseburo/hermann) - * Ruby: [rdkafka-ruby](https://github.com/appsignal/rdkafka-ruby) - * Rust: [rust-rdkafka](https://github.com/fede1024/rust-rdkafka) - * Tcl: [KafkaTcl](https://github.com/flightaware/kafkatcl) - * Shell: [kafkacat](https://github.com/edenhill/kafkacat) - Apache Kafka command line tool - * Swift: [Perfect-Kafka](https://github.com/PerfectlySoft/Perfect-Kafka) - - -See [Powered by librdkafka](https://github.com/edenhill/librdkafka/wiki/Powered-by-librdkafka) for an incomplete list of librdkafka users. diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/STATISTICS.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/STATISTICS.md deleted file mode 100755 index 0a21ee08..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/deps/share/doc/librdkafka/STATISTICS.md +++ /dev/null @@ -1,621 +0,0 @@ -# Statistics - -librdkafka may be configured to emit internal metrics at a fixed interval -by setting the `statistics.interval.ms` configuration property to a value > 0 -and registering a `stats_cb` (or similar, depending on language). - -The stats are provided as a JSON object string. - -**Note**: The metrics returned may not be completely consistent between - brokers, toppars and totals, due to the internal asynchronous - nature of librdkafka. - E.g., the top level `tx` total may be less than the sum of - the broker `tx` values which it represents. - - -## General structure - -All fields that contain sizes are in bytes unless otherwise noted. - -``` -{ - - "brokers": { - , - "toppars": { } - }, - "topics": { - , - "partitions": { - - } - } -[, "cgrp": { } ] -[, "eos": { } ] -} -``` - -## Field type - -Fields are represented as follows: - * string - UTF8 string. - * int - Integer counter (64 bits wide). Ever increasing. - * int gauge - Integer gauge (64 bits wide). Will be reset to 0 on each stats emit. - * object - Nested JSON object. - * bool - `true` or `false`. - - -## Top-level - -Field | Type | Example | Description ------ | ---- | ------- | ----------- -name | string | `"rdkafka#producer-1"` | Handle instance name -client_id | string | `"rdkafka"` | The configured (or default) `client.id` -type | string | `"producer"` | Instance type (producer or consumer) -ts | int | 12345678912345 | librdkafka's internal monotonic clock (microseconds) -time | int | | Wall clock time in seconds since the epoch -age | int | | Time since this client instance was created (microseconds) -replyq | int gauge | | Number of ops (callbacks, events, etc) waiting in queue for application to serve with rd_kafka_poll() -msg_cnt | int gauge | | Current number of messages in producer queues -msg_size | int gauge | | Current total size of messages in producer queues -msg_max | int | | Threshold: maximum number of messages allowed allowed on the producer queues -msg_size_max | int | | Threshold: maximum total size of messages allowed on the producer queues -tx | int | | Total number of requests sent to Kafka brokers -tx_bytes | int | | Total number of bytes transmitted to Kafka brokers -rx | int | | Total number of responses received from Kafka brokers -rx_bytes | int | | Total number of bytes received from Kafka brokers -txmsgs | int | | Total number of messages transmitted (produced) to Kafka brokers -txmsg_bytes | int | | Total number of message bytes (including framing, such as per-Message framing and MessageSet/batch framing) transmitted to Kafka brokers -rxmsgs | int | | Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers. -rxmsg_bytes | int | | Total number of message bytes (including framing) received from Kafka brokers -simple_cnt | int gauge | | Internal tracking of legacy vs new consumer API state -metadata_cache_cnt | int gauge | | Number of topics in the metadata cache. -brokers | object | | Dict of brokers, key is broker name, value is object. See **brokers** below -topics | object | | Dict of topics, key is topic name, value is object. See **topics** below -cgrp | object | | Consumer group metrics. See **cgrp** below -eos | object | | EOS / Idempotent producer state and metrics. See **eos** below - -## brokers - -Per broker statistics. - -Field | Type | Example | Description ------ | ---- | ------- | ----------- -name | string | `"example.com:9092/13"` | Broker hostname, port and broker id -nodeid | int | 13 | Broker id (-1 for bootstraps) -nodename | string | `"example.com:9092"` | Broker hostname -source | string | `"configured"` | Broker source (learned, configured, internal, logical) -state | string | `"UP"` | Broker state (INIT, DOWN, CONNECT, AUTH, APIVERSION_QUERY, AUTH_HANDSHAKE, UP, UPDATE) -stateage | int gauge | | Time since last broker state change (microseconds) -outbuf_cnt | int gauge | | Number of requests awaiting transmission to broker -outbuf_msg_cnt | int gauge | | Number of messages awaiting transmission to broker -waitresp_cnt | int gauge | | Number of requests in-flight to broker awaiting response -waitresp_msg_cnt | int gauge | | Number of messages in-flight to broker awaiting response -tx | int | | Total number of requests sent -txbytes | int | | Total number of bytes sent -txerrs | int | | Total number of transmission errors -txretries | int | | Total number of request retries -txidle | int | | Microseconds since last socket send (or -1 if no sends yet for current connection). -req_timeouts | int | | Total number of requests timed out -rx | int | | Total number of responses received -rxbytes | int | | Total number of bytes received -rxerrs | int | | Total number of receive errors -rxcorriderrs | int | | Total number of unmatched correlation ids in response (typically for timed out requests) -rxpartial | int | | Total number of partial MessageSets received. The broker may return partial responses if the full MessageSet could not fit in the remaining Fetch response size. -rxidle | int | | Microseconds since last socket receive (or -1 if no receives yet for current connection). -req | object | | Request type counters. Object key is the request name, value is the number of requests sent. -zbuf_grow | int | | Total number of decompression buffer size increases -buf_grow | int | | Total number of buffer size increases (deprecated, unused) -wakeups | int | | Broker thread poll wakeups -connects | int | | Number of connection attempts, including successful and failed, and name resolution failures. -disconnects | int | | Number of disconnects (triggered by broker, network, load-balancer, etc.). -int_latency | object | | Internal producer queue latency in microseconds. See *Window stats* below -outbuf_latency | object | | Internal request queue latency in microseconds. This is the time between a request is enqueued on the transmit (outbuf) queue and the time the request is written to the TCP socket. Additional buffering and latency may be incurred by the TCP stack and network. See *Window stats* below -rtt | object | | Broker latency / round-trip time in microseconds. See *Window stats* below -throttle | object | | Broker throttling time in milliseconds. See *Window stats* below -toppars | object | | Partitions handled by this broker handle. Key is "topic-partition". See *brokers.toppars* below - - -## Window stats - -Rolling window statistics. The values are in microseconds unless otherwise stated. - -Field | Type | Example | Description ------ | ---- | ------- | ----------- -min | int gauge | | Smallest value -max | int gauge | | Largest value -avg | int gauge | | Average value -sum | int gauge | | Sum of values -cnt | int gauge | | Number of values sampled -stddev | int gauge | | Standard deviation (based on histogram) -hdrsize | int gauge | | Memory size of Hdr Histogram -p50 | int gauge | | 50th percentile -p75 | int gauge | | 75th percentile -p90 | int gauge | | 90th percentile -p95 | int gauge | | 95th percentile -p99 | int gauge | | 99th percentile -p99_99 | int gauge | | 99.99th percentile -outofrange | int gauge | | Values skipped due to out of histogram range - - -## brokers.toppars - -Topic partition assigned to broker. - -Field | Type | Example | Description ------ | ---- | ------- | ----------- -topic | string | `"mytopic"` | Topic name -partition | int | 3 | Partition id - -## topics - -Field | Type | Example | Description ------ | ---- | ------- | ----------- -topic | string | `"myatopic"` | Topic name -age | int gauge | | Age of client's topic object (milliseconds) -metadata_age | int gauge | | Age of metadata from broker for this topic (milliseconds) -batchsize | object | | Batch sizes in bytes. See *Window stats*· -batchcnt | object | | Batch message counts. See *Window stats*· -partitions | object | | Partitions dict, key is partition id. See **partitions** below. - - -## partitions - -Field | Type | Example | Description ------ | ---- | ------- | ----------- -partition | int | 3 | Partition Id (-1 for internal UA/UnAssigned partition) -broker | int | | The id of the broker that messages are currently being fetched from -leader | int | | Current leader broker id -desired | bool | | Partition is explicitly desired by application -unknown | bool | | Partition not seen in topic metadata from broker -msgq_cnt | int gauge | | Number of messages waiting to be produced in first-level queue -msgq_bytes | int gauge | | Number of bytes in msgq_cnt -xmit_msgq_cnt | int gauge | | Number of messages ready to be produced in transmit queue -xmit_msgq_bytes | int gauge | | Number of bytes in xmit_msgq -fetchq_cnt | int gauge | | Number of pre-fetched messages in fetch queue -fetchq_size | int gauge | | Bytes in fetchq -fetch_state | string | `"active"` | Consumer fetch state for this partition (none, stopping, stopped, offset-query, offset-wait, active). -query_offset | int gauge | | Current/Last logical offset query -next_offset | int gauge | | Next offset to fetch -app_offset | int gauge | | Offset of last message passed to application + 1 -stored_offset | int gauge | | Offset to be committed -committed_offset | int gauge | | Last committed offset -eof_offset | int gauge | | Last PARTITION_EOF signaled offset -lo_offset | int gauge | | Partition's low watermark offset on broker -hi_offset | int gauge | | Partition's high watermark offset on broker -ls_offset | int gauge | | Partition's last stable offset on broker, or same as hi_offset is broker version is less than 0.11.0.0. -consumer_lag | int gauge | | Difference between (hi_offset or ls_offset) and committed_offset). hi_offset is used when isolation.level=read_uncommitted, otherwise ls_offset. -consumer_lag_stored | int gauge | | Difference between (hi_offset or ls_offset) and stored_offset. See consumer_lag and stored_offset. -txmsgs | int | | Total number of messages transmitted (produced) -txbytes | int | | Total number of bytes transmitted for txmsgs -rxmsgs | int | | Total number of messages consumed, not including ignored messages (due to offset, etc). -rxbytes | int | | Total number of bytes received for rxmsgs -msgs | int | | Total number of messages received (consumer, same as rxmsgs), or total number of messages produced (possibly not yet transmitted) (producer). -rx_ver_drops | int | | Dropped outdated messages -msgs_inflight | int gauge | | Current number of messages in-flight to/from broker -next_ack_seq | int gauge | | Next expected acked sequence (idempotent producer) -next_err_seq | int gauge | | Next expected errored sequence (idempotent producer) -acked_msgid | int | | Last acked internal message id (idempotent producer) - -## cgrp - -Field | Type | Example | Description ------ | ---- | ------- | ----------- -state | string | "up" | Local consumer group handler's state. -stateage | int gauge | | Time elapsed since last state change (milliseconds). -join_state | string | "assigned" | Local consumer group handler's join state. -rebalance_age | int gauge | | Time elapsed since last rebalance (assign or revoke) (milliseconds). -rebalance_cnt | int | | Total number of rebalances (assign or revoke). -rebalance_reason | string | | Last rebalance reason, or empty string. -assignment_size | int gauge | | Current assignment's partition count. - - -## eos - -Field | Type | Example | Description ------ | ---- | ------- | ----------- -idemp_state | string | "Assigned" | Current idempotent producer id state. -idemp_stateage | int gauge | | Time elapsed since last idemp_state change (milliseconds). -txn_state | string | "InTransaction" | Current transactional producer state. -txn_stateage | int gauge | | Time elapsed since last txn_state change (milliseconds). -txn_may_enq | bool | | Transactional state allows enqueuing (producing) new messages. -producer_id | int gauge | | The currently assigned Producer ID (or -1). -producer_epoch | int gauge | | The current epoch (or -1). -epoch_cnt | int | | The number of Producer ID assignments since start. - - -# Example output - -This (prettified) example output is from a short-lived producer using the following command: -`rdkafka_performance -b localhost -P -t test -T 1000 -Y 'cat >> stats.json'`. - -Note: this output is prettified using `jq .`, the JSON object emitted by librdkafka does not contain line breaks. - -```json -{ - "name": "rdkafka#producer-1", - "client_id": "rdkafka", - "type": "producer", - "ts": 5016483227792, - "time": 1527060869, - "replyq": 0, - "msg_cnt": 22710, - "msg_size": 704010, - "msg_max": 500000, - "msg_size_max": 1073741824, - "simple_cnt": 0, - "metadata_cache_cnt": 1, - "brokers": { - "localhost:9092/2": { - "name": "localhost:9092/2", - "nodeid": 2, - "nodename": "localhost:9092", - "source": "learned", - "state": "UP", - "stateage": 9057234, - "outbuf_cnt": 0, - "outbuf_msg_cnt": 0, - "waitresp_cnt": 0, - "waitresp_msg_cnt": 0, - "tx": 320, - "txbytes": 84283332, - "txerrs": 0, - "txretries": 0, - "req_timeouts": 0, - "rx": 320, - "rxbytes": 15708, - "rxerrs": 0, - "rxcorriderrs": 0, - "rxpartial": 0, - "zbuf_grow": 0, - "buf_grow": 0, - "wakeups": 591067, - "int_latency": { - "min": 86, - "max": 59375, - "avg": 23726, - "sum": 5694616664, - "stddev": 13982, - "p50": 28031, - "p75": 36095, - "p90": 39679, - "p95": 43263, - "p99": 48639, - "p99_99": 59391, - "outofrange": 0, - "hdrsize": 11376, - "cnt": 240012 - }, - "rtt": { - "min": 1580, - "max": 3389, - "avg": 2349, - "sum": 79868, - "stddev": 474, - "p50": 2319, - "p75": 2543, - "p90": 3183, - "p95": 3199, - "p99": 3391, - "p99_99": 3391, - "outofrange": 0, - "hdrsize": 13424, - "cnt": 34 - }, - "throttle": { - "min": 0, - "max": 0, - "avg": 0, - "sum": 0, - "stddev": 0, - "p50": 0, - "p75": 0, - "p90": 0, - "p95": 0, - "p99": 0, - "p99_99": 0, - "outofrange": 0, - "hdrsize": 17520, - "cnt": 34 - }, - "toppars": { - "test-1": { - "topic": "test", - "partition": 1 - } - } - }, - "localhost:9093/3": { - "name": "localhost:9093/3", - "nodeid": 3, - "nodename": "localhost:9093", - "source": "learned", - "state": "UP", - "stateage": 9057209, - "outbuf_cnt": 0, - "outbuf_msg_cnt": 0, - "waitresp_cnt": 0, - "waitresp_msg_cnt": 0, - "tx": 310, - "txbytes": 84301122, - "txerrs": 0, - "txretries": 0, - "req_timeouts": 0, - "rx": 310, - "rxbytes": 15104, - "rxerrs": 0, - "rxcorriderrs": 0, - "rxpartial": 0, - "zbuf_grow": 0, - "buf_grow": 0, - "wakeups": 607956, - "int_latency": { - "min": 82, - "max": 58069, - "avg": 23404, - "sum": 5617432101, - "stddev": 14021, - "p50": 27391, - "p75": 35839, - "p90": 39679, - "p95": 42751, - "p99": 48639, - "p99_99": 58111, - "outofrange": 0, - "hdrsize": 11376, - "cnt": 240016 - }, - "rtt": { - "min": 1704, - "max": 3572, - "avg": 2493, - "sum": 87289, - "stddev": 559, - "p50": 2447, - "p75": 2895, - "p90": 3375, - "p95": 3407, - "p99": 3583, - "p99_99": 3583, - "outofrange": 0, - "hdrsize": 13424, - "cnt": 35 - }, - "throttle": { - "min": 0, - "max": 0, - "avg": 0, - "sum": 0, - "stddev": 0, - "p50": 0, - "p75": 0, - "p90": 0, - "p95": 0, - "p99": 0, - "p99_99": 0, - "outofrange": 0, - "hdrsize": 17520, - "cnt": 35 - }, - "toppars": { - "test-0": { - "topic": "test", - "partition": 0 - } - } - }, - "localhost:9094/4": { - "name": "localhost:9094/4", - "nodeid": 4, - "nodename": "localhost:9094", - "source": "learned", - "state": "UP", - "stateage": 9057207, - "outbuf_cnt": 0, - "outbuf_msg_cnt": 0, - "waitresp_cnt": 0, - "waitresp_msg_cnt": 0, - "tx": 1, - "txbytes": 25, - "txerrs": 0, - "txretries": 0, - "req_timeouts": 0, - "rx": 1, - "rxbytes": 272, - "rxerrs": 0, - "rxcorriderrs": 0, - "rxpartial": 0, - "zbuf_grow": 0, - "buf_grow": 0, - "wakeups": 4, - "int_latency": { - "min": 0, - "max": 0, - "avg": 0, - "sum": 0, - "stddev": 0, - "p50": 0, - "p75": 0, - "p90": 0, - "p95": 0, - "p99": 0, - "p99_99": 0, - "outofrange": 0, - "hdrsize": 11376, - "cnt": 0 - }, - "rtt": { - "min": 0, - "max": 0, - "avg": 0, - "sum": 0, - "stddev": 0, - "p50": 0, - "p75": 0, - "p90": 0, - "p95": 0, - "p99": 0, - "p99_99": 0, - "outofrange": 0, - "hdrsize": 13424, - "cnt": 0 - }, - "throttle": { - "min": 0, - "max": 0, - "avg": 0, - "sum": 0, - "stddev": 0, - "p50": 0, - "p75": 0, - "p90": 0, - "p95": 0, - "p99": 0, - "p99_99": 0, - "outofrange": 0, - "hdrsize": 17520, - "cnt": 0 - }, - "toppars": {} - } - }, - "topics": { - "test": { - "topic": "test", - "metadata_age": 9060, - "batchsize": { - "min": 99, - "max": 391805, - "avg": 272593, - "sum": 18808985, - "stddev": 180408, - "p50": 393215, - "p75": 393215, - "p90": 393215, - "p95": 393215, - "p99": 393215, - "p99_99": 393215, - "outofrange": 0, - "hdrsize": 14448, - "cnt": 69 - }, - "batchcnt": { - "min": 1, - "max": 10000, - "avg": 6956, - "sum": 480028, - "stddev": 4608, - "p50": 10047, - "p75": 10047, - "p90": 10047, - "p95": 10047, - "p99": 10047, - "p99_99": 10047, - "outofrange": 0, - "hdrsize": 8304, - "cnt": 69 - }, - "partitions": { - "0": { - "partition": 0, - "broker": 3, - "leader": 3, - "desired": false, - "unknown": false, - "msgq_cnt": 1, - "msgq_bytes": 31, - "xmit_msgq_cnt": 0, - "xmit_msgq_bytes": 0, - "fetchq_cnt": 0, - "fetchq_size": 0, - "fetch_state": "none", - "query_offset": 0, - "next_offset": 0, - "app_offset": -1001, - "stored_offset": -1001, - "commited_offset": -1001, - "committed_offset": -1001, - "eof_offset": -1001, - "lo_offset": -1001, - "hi_offset": -1001, - "consumer_lag": -1, - "txmsgs": 2150617, - "txbytes": 66669127, - "rxmsgs": 0, - "rxbytes": 0, - "msgs": 2160510, - "rx_ver_drops": 0 - }, - "1": { - "partition": 1, - "broker": 2, - "leader": 2, - "desired": false, - "unknown": false, - "msgq_cnt": 0, - "msgq_bytes": 0, - "xmit_msgq_cnt": 0, - "xmit_msgq_bytes": 0, - "fetchq_cnt": 0, - "fetchq_size": 0, - "fetch_state": "none", - "query_offset": 0, - "next_offset": 0, - "app_offset": -1001, - "stored_offset": -1001, - "commited_offset": -1001, - "committed_offset": -1001, - "eof_offset": -1001, - "lo_offset": -1001, - "hi_offset": -1001, - "consumer_lag": -1, - "txmsgs": 2150136, - "txbytes": 66654216, - "rxmsgs": 0, - "rxbytes": 0, - "msgs": 2159735, - "rx_ver_drops": 0 - }, - "-1": { - "partition": -1, - "broker": -1, - "leader": -1, - "desired": false, - "unknown": false, - "msgq_cnt": 0, - "msgq_bytes": 0, - "xmit_msgq_cnt": 0, - "xmit_msgq_bytes": 0, - "fetchq_cnt": 0, - "fetchq_size": 0, - "fetch_state": "none", - "query_offset": 0, - "next_offset": 0, - "app_offset": -1001, - "stored_offset": -1001, - "commited_offset": -1001, - "committed_offset": -1001, - "eof_offset": -1001, - "lo_offset": -1001, - "hi_offset": -1001, - "consumer_lag": -1, - "txmsgs": 0, - "txbytes": 0, - "rxmsgs": 0, - "rxbytes": 0, - "msgs": 1177, - "rx_ver_drops": 0 - } - } - } - }, - "tx": 631, - "tx_bytes": 168584479, - "rx": 631, - "rx_bytes": 31084, - "txmsgs": 4300753, - "txmsg_bytes": 133323343, - "rxmsgs": 0, - "rxmsg_bytes": 0 -} -``` diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/node-librdkafka.target.mk b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/node-librdkafka.target.mk deleted file mode 100644 index 3960e163..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/build/node-librdkafka.target.mk +++ /dev/null @@ -1,181 +0,0 @@ -# This file is generated by gyp; do not edit. - -TOOLSET := target -TARGET := node-librdkafka -DEFS_Debug := \ - '-DNODE_GYP_MODULE_NAME=node-librdkafka' \ - '-DUSING_UV_SHARED=1' \ - '-DUSING_V8_SHARED=1' \ - '-DV8_DEPRECATION_WARNINGS=1' \ - '-DV8_DEPRECATION_WARNINGS' \ - '-DV8_IMMINENT_DEPRECATION_WARNINGS' \ - '-D_LARGEFILE_SOURCE' \ - '-D_FILE_OFFSET_BITS=64' \ - '-D__STDC_FORMAT_MACROS' \ - '-DOPENSSL_NO_PINSHARED' \ - '-DOPENSSL_THREADS' \ - '-DBUILDING_NODE_EXTENSION' \ - '-DDEBUG' \ - '-D_DEBUG' \ - '-DV8_ENABLE_CHECKS' - -# Flags passed to all source files. -CFLAGS_Debug := \ - -fPIC \ - -pthread \ - -Wall \ - -Wextra \ - -Wno-unused-parameter \ - -m64 \ - -g \ - -O0 - -# Flags passed to only C files. -CFLAGS_C_Debug := - -# Flags passed to only C++ files. -CFLAGS_CC_Debug := \ - -fno-exceptions \ - -std=gnu++1y \ - -std=c++14 - -INCS_Debug := \ - -I/tmp/.cache/node-gyp/14.18.1/include/node \ - -I/tmp/.cache/node-gyp/14.18.1/src \ - -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config \ - -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include \ - -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include \ - -I/tmp/.cache/node-gyp/14.18.1/deps/zlib \ - -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include \ - -I$(srcdir)/../nan \ - -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka \ - -I$(srcdir)/deps/librdkafka/src \ - -I$(srcdir)/deps/librdkafka/src-cpp - -DEFS_Release := \ - '-DNODE_GYP_MODULE_NAME=node-librdkafka' \ - '-DUSING_UV_SHARED=1' \ - '-DUSING_V8_SHARED=1' \ - '-DV8_DEPRECATION_WARNINGS=1' \ - '-DV8_DEPRECATION_WARNINGS' \ - '-DV8_IMMINENT_DEPRECATION_WARNINGS' \ - '-D_LARGEFILE_SOURCE' \ - '-D_FILE_OFFSET_BITS=64' \ - '-D__STDC_FORMAT_MACROS' \ - '-DOPENSSL_NO_PINSHARED' \ - '-DOPENSSL_THREADS' \ - '-DBUILDING_NODE_EXTENSION' - -# Flags passed to all source files. -CFLAGS_Release := \ - -fPIC \ - -pthread \ - -Wall \ - -Wextra \ - -Wno-unused-parameter \ - -m64 \ - -O3 \ - -fno-omit-frame-pointer - -# Flags passed to only C files. -CFLAGS_C_Release := - -# Flags passed to only C++ files. -CFLAGS_CC_Release := \ - -fno-exceptions \ - -std=gnu++1y \ - -std=c++14 - -INCS_Release := \ - -I/tmp/.cache/node-gyp/14.18.1/include/node \ - -I/tmp/.cache/node-gyp/14.18.1/src \ - -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/config \ - -I/tmp/.cache/node-gyp/14.18.1/deps/openssl/openssl/include \ - -I/tmp/.cache/node-gyp/14.18.1/deps/uv/include \ - -I/tmp/.cache/node-gyp/14.18.1/deps/zlib \ - -I/tmp/.cache/node-gyp/14.18.1/deps/v8/include \ - -I$(srcdir)/../nan \ - -I/tmp/1431999136518149/cd6bbd60-ccfb-4157-8a94-407cf273a6e0-1431999136518149/nodejs/node_modules/node-rdkafka \ - -I$(srcdir)/deps/librdkafka/src \ - -I$(srcdir)/deps/librdkafka/src-cpp - -OBJS := \ - $(obj).target/$(TARGET)/src/binding.o \ - $(obj).target/$(TARGET)/src/callbacks.o \ - $(obj).target/$(TARGET)/src/common.o \ - $(obj).target/$(TARGET)/src/config.o \ - $(obj).target/$(TARGET)/src/connection.o \ - $(obj).target/$(TARGET)/src/errors.o \ - $(obj).target/$(TARGET)/src/kafka-consumer.o \ - $(obj).target/$(TARGET)/src/producer.o \ - $(obj).target/$(TARGET)/src/topic.o \ - $(obj).target/$(TARGET)/src/workers.o \ - $(obj).target/$(TARGET)/src/admin.o - -# Add to the list of files we specially track dependencies for. -all_deps += $(OBJS) - -# Make sure our dependencies are built before any of us. -$(OBJS): | $(obj).target/deps/librdkafka.stamp - -# CFLAGS et al overrides must be target-local. -# See "Target-specific Variable Values" in the GNU Make manual. -$(OBJS): TOOLSET := $(TOOLSET) -$(OBJS): GYP_CFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_C_$(BUILDTYPE)) -$(OBJS): GYP_CXXFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_CC_$(BUILDTYPE)) - -# Suffix rules, putting all outputs into $(obj). - -$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD - @$(call do_cmd,cxx,1) - -# Try building from generated source, too. - -$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD - @$(call do_cmd,cxx,1) - -$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.cc FORCE_DO_CMD - @$(call do_cmd,cxx,1) - -# End of this set of suffix rules -### Rules for final target. -LDFLAGS_Debug := \ - -pthread \ - -rdynamic \ - -m64 - -LDFLAGS_Release := \ - -pthread \ - -rdynamic \ - -m64 - -LIBS := \ - ../build/deps/librdkafka.so \ - ../build/deps/librdkafka++.so \ - -Wl,-rpath='$$ORIGIN/../deps' - -$(obj).target/node-librdkafka.node: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE)) -$(obj).target/node-librdkafka.node: LIBS := $(LIBS) -$(obj).target/node-librdkafka.node: TOOLSET := $(TOOLSET) -$(obj).target/node-librdkafka.node: $(OBJS) FORCE_DO_CMD - $(call do_cmd,solink_module) - -all_deps += $(obj).target/node-librdkafka.node -# Add target alias -.PHONY: node-librdkafka -node-librdkafka: $(builddir)/node-librdkafka.node - -# Copy this to the executable output path. -$(builddir)/node-librdkafka.node: TOOLSET := $(TOOLSET) -$(builddir)/node-librdkafka.node: $(obj).target/node-librdkafka.node FORCE_DO_CMD - $(call do_cmd,copy) - -all_deps += $(builddir)/node-librdkafka.node -# Short alias for building this executable. -.PHONY: node-librdkafka.node -node-librdkafka.node: $(obj).target/node-librdkafka.node $(builddir)/node-librdkafka.node - -# Add executable to "all" target. -.PHONY: all -all: $(builddir)/node-librdkafka.node - diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/checks/librdkafka-correct-version.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/checks/librdkafka-correct-version.js deleted file mode 100644 index 5b2506e5..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/checks/librdkafka-correct-version.js +++ /dev/null @@ -1,67 +0,0 @@ -const path = require('path'); -const fs = require('fs'); - -const root = path.resolve(__dirname, '..', '..'); -const pjsPath = path.join(root, 'package.json'); - -const librdkafkaPath = path.resolve(root, 'deps', 'librdkafka'); -const pjs = require(pjsPath); - -const majorMask = 0xff000000; -const minorMask = 0x00ff0000; -const patchMask = 0x0000ff00; -const revMask = 0x000000ff; - -// Read the header file -const headerFileLines = fs.readFileSync(path.resolve(librdkafkaPath, 'src', 'rdkafka.h')).toString().split('\n'); -const precompilerDefinitions = headerFileLines.filter((line) => line.startsWith('#def')); -const definedLines = precompilerDefinitions.map(definedLine => { - const content = definedLine.split(' ').filter(v => v != ''); - - return { - command: content[0], - key: content[1], - value: content[2] - }; -}); - -const defines = {}; - -for (let item of definedLines) { - if (item.command == '#define') { - defines[item.key] = item.value; - } -} - -function parseLibrdkafkaVersion(version) { - const intRepresentation = parseInt(version); - - const major = (intRepresentation & majorMask) >> (8 * 3); - const minor = (intRepresentation & minorMask) >> (8 * 2); - const patch = (intRepresentation & patchMask) >> (8 * 1); - const rev = (intRepresentation & revMask) >> (8 * 0); - - return { - major, - minor, - patch, - rev - }; -} - -function versionAsString(version) { - return [ - version.major, - version.minor, - version.patch, - version.rev === 255 ? null : version.rev, - ].filter(v => v != null).join('.'); -} - -const librdkafkaVersion = parseLibrdkafkaVersion(defines.RD_KAFKA_VERSION); -const versionString = versionAsString(librdkafkaVersion); - -if (pjs.librdkafka !== versionString) { - console.error(`Librdkafka version of ${versionString} does not match package json: ${pjs.librdkafka}`); - process.exit(1); -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/checks/librdkafka-exists.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/checks/librdkafka-exists.js deleted file mode 100644 index c1564764..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/checks/librdkafka-exists.js +++ /dev/null @@ -1,13 +0,0 @@ -const path = require('path'); -const fs = require('fs'); - -const root = path.resolve(__dirname, '..', '..'); -const librdkafkaPath = path.resolve(root, 'deps', 'librdkafka'); - -// Ensure librdkafka is in the deps directory - this makes sure we don't accidentally -// publish on a non recursive clone :) - -if (!fs.existsSync(librdkafkaPath)) { - console.error(`Could not find librdkafka at path ${librdkafkaPath}`); - process.exit(1); -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/librdkafka-defs-generator.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/librdkafka-defs-generator.js deleted file mode 100644 index 2a1364a5..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/librdkafka-defs-generator.js +++ /dev/null @@ -1,193 +0,0 @@ -const fs = require('fs'); -const path = require('path'); - -const LIBRDKAFKA_VERSION = require('../package.json').librdkafka; -const LIBRDKAFKA_DIR = path.resolve(__dirname, '../deps/librdkafka/'); - -function getHeader(file) { - return `// ====== Generated from librdkafka ${LIBRDKAFKA_VERSION} file ${file} ======`; -} - -function readLibRDKafkaFile(file) { - return fs.readFileSync(path.resolve(LIBRDKAFKA_DIR, file)).toString(); -} - -function extractConfigItems(configStr) { - const [_header, config] = configStr.split(/-{5,}\|.*/); - - const re = /(.*?)\|(.*?)\|(.*?)\|(.*?)\|(.*?)\|(.*)/g; - - const configItems = []; - - let m; - do { - m = re.exec(config); - if (m) { - const [ - _fullString, - property, - consumerOrProducer, - range, - defaultValue, - importance, - descriptionWithType, - ] = m.map(el => (typeof el === 'string' ? el.trim() : el)); - - const splitDescriptionRe = /(.*?)\s*?
.*?:\s.*?(.*?)\*/; - const [_, description, rawType] = splitDescriptionRe.exec(descriptionWithType); - - configItems.push({ - property, - consumerOrProducer, - range, - defaultValue, - importance, - description, - rawType, - }); - } - } while (m); - - return configItems.map(processItem); -} - -function processItem(configItem) { - // These items are overwritten by node-rdkafka - switch (configItem.property) { - case 'dr_msg_cb': - return { ...configItem, type: 'boolean' }; - case 'dr_cb': - return { ...configItem, type: 'boolean | Function' }; - case 'rebalance_cb': - return { ...configItem, type: 'boolean | Function' }; - case 'offset_commit_cb': - return { ...configItem, type: 'boolean | Function' }; - } - - switch (configItem.rawType) { - case 'integer': - return { ...configItem, type: 'number' }; - case 'boolean': - return { ...configItem, type: 'boolean' }; - case 'string': - case 'CSV flags': - return { ...configItem, type: 'string' }; - case 'enum value': - return { - ...configItem, - type: configItem.range - .split(',') - .map(str => `'${str.trim()}'`) - .join(' | '), - }; - default: - return { ...configItem, type: 'any' }; - } -} - -function generateInterface(interfaceDef, configItems) { - const fields = configItems - .map(item => - [ - `/**`, - ` * ${item.description}`, - ...(item.defaultValue ? [` *`, ` * @default ${item.defaultValue}`] : []), - ` */`, - `"${item.property}"?: ${item.type};`, - ] - .map(row => ` ${row}`) - .join('\n') - ) - .join('\n\n'); - - return `export interface ` + interfaceDef + ' {\n' + fields + '\n}'; -} - -function addSpecialGlobalProps(globalProps) { - globalProps.push({ - "property": "event_cb", - "consumerOrProducer": "*", - "range": "", - "defaultValue": "true", - "importance": "low", - "description": "Enables or disables `event.*` emitting.", - "rawType": "boolean", - "type": "boolean" - }); -} - -function generateConfigDTS(file) { - const configuration = readLibRDKafkaFile(file); - const [globalStr, topicStr] = configuration.split('Topic configuration properties'); - - const [globalProps, topicProps] = [extractConfigItems(globalStr), extractConfigItems(topicStr)]; - - addSpecialGlobalProps(globalProps); - - const [globalSharedProps, producerGlobalProps, consumerGlobalProps] = [ - globalProps.filter(i => i.consumerOrProducer === '*'), - globalProps.filter(i => i.consumerOrProducer === 'P'), - globalProps.filter(i => i.consumerOrProducer === 'C'), - ]; - - const [topicSharedProps, producerTopicProps, consumerTopicProps] = [ - topicProps.filter(i => i.consumerOrProducer === '*'), - topicProps.filter(i => i.consumerOrProducer === 'P'), - topicProps.filter(i => i.consumerOrProducer === 'C'), - ]; - - let output = `${getHeader(file)} -// Code that generated this is a derivative work of the code from Nam Nguyen -// https://gist.github.com/ntgn81/066c2c8ec5b4238f85d1e9168a04e3fb - -`; - - output += [ - generateInterface('GlobalConfig', globalSharedProps), - generateInterface('ProducerGlobalConfig extends GlobalConfig', producerGlobalProps), - generateInterface('ConsumerGlobalConfig extends GlobalConfig', consumerGlobalProps), - generateInterface('TopicConfig', topicSharedProps), - generateInterface('ProducerTopicConfig extends TopicConfig', producerTopicProps), - generateInterface('ConsumerTopicConfig extends TopicConfig', consumerTopicProps), - ].join('\n\n'); - - fs.writeFileSync(path.resolve(__dirname, '../config.d.ts'), output); -} - -function updateErrorDefinitions(file) { - const rdkafkacpp_h = readLibRDKafkaFile(file); - const m = /enum ErrorCode {([^}]+)}/g.exec(rdkafkacpp_h); - if (!m) { - throw new Error(`Can't read rdkafkacpp.h file`) - } - const body = m[1] - .replace(/(\t)|( +)/g, ' ') - .replace(/\n\n/g, '\n') - .replace(/\s+=\s+/g, ': ') - .replace(/[\t ]*#define +(\w+) +(\w+)/g, (_, define, original) => { - const value = new RegExp(`${original}\\s+=\\s+(\\d+)`).exec(m[1])[1]; - return ` ${define}: ${value},`; - }) - - // validate body - const emptyCheck = body - .replace(/(( \/\*)|( ?\*)).*/g, '') - .replace(/ ERR_\w+: -?\d+,?\r?\n/g, '') - .trim() - if (emptyCheck !== '') { - throw new Error(`Fail to parse ${file}. It contains these extra details:\n${emptyCheck}`); - } - - const error_js_file = path.resolve(__dirname, '../lib/error.js'); - const error_js = fs.readFileSync(error_js_file) - .toString() - .replace(/(\/\/.*\r?\n)?LibrdKafkaError.codes = {[^}]+/g, `${getHeader(file)}\nLibrdKafkaError.codes = {\n${body}`) - - fs.writeFileSync(error_js_file, error_js); - fs.writeFileSync(path.resolve(__dirname, '../errors.d.ts'), `${getHeader(file)}\nexport const CODES: { ERRORS: {${body.replace(/[ \.]*(\*\/\r?\n \w+: )(-?\d+),?/g, ' (**$2**) $1number,')}}}`) -} - -(async function updateTypeDefs() { - generateConfigDTS('CONFIGURATION.md'); - updateErrorDefinitions('src-cpp/rdkafkacpp.h'); -})() diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/prepublish.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/prepublish.js deleted file mode 100644 index 3526be85..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/prepublish.js +++ /dev/null @@ -1,4 +0,0 @@ -require('./checks/librdkafka-exists'); -require('./checks/librdkafka-correct-version'); -require('./librdkafka-defs-generator.js'); -require('./update-version'); diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/update-version.js b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/update-version.js deleted file mode 100644 index d14cdfc2..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/ci/update-version.js +++ /dev/null @@ -1,119 +0,0 @@ -const path = require('path'); -const semver = require('semver'); -const { spawn } = require('child_process'); -const fs = require('fs'); - -const root = path.resolve(__dirname, '..'); -const pjsPath = path.resolve(root, 'package.json'); -const pjs = require(pjsPath); - -function parseVersion(tag) { - const { major, minor, prerelease, patch } = semver.parse(tag); - - // Describe will give is commits since last tag - const [ commitsSinceTag, hash ] = prerelease[0] ? prerelease[0].split('-') : [ - 1, - process.env.TRAVIS_COMMIT || '' - ]; - - return { - major, - minor, - prerelease, - patch, - commit: commitsSinceTag - 1, - hash - }; -} - -function getCommandOutput(command, args, cb) { - let output = ''; - - const cmd = spawn(command, args); - - cmd.stdout.on('data', (data) => { - output += data; - }); - - cmd.on('close', (code) => { - if (code != 0) { - cb(new Error(`Command returned unsuccessful code: ${code}`)); - return; - } - - cb(null, output.trim()); - }); -} - -function getVersion(cb) { - // https://docs.travis-ci.com/user/environment-variables/ - if (process.env.TRAVIS_TAG) { - setImmediate(() => cb(null, parseVersion(process.env.TRAVIS_TAG.trim()))); - return; - } - - getCommandOutput('git', ['describe', '--tags'], (err, result) => { - if (err) { - cb(err); - return; - } - - cb(null, parseVersion(result.trim())); - }); -} - -function getBranch(cb) { - if (process.env.TRAVIS_TAG) { - // TRAVIS_BRANCH matches TRAVIS_TAG when TRAVIS_TAG is set - // "git branch --contains tags/TRAVIS_TAG" doesn't work on travis so we have to assume 'master' - setImmediate(() => cb(null, 'master')); - return; - } else if (process.env.TRAVIS_BRANCH) { - setImmediate(() => cb(null, process.env.TRAVIS_BRANCH.trim())); - return; - } - - getCommandOutput('git', ['rev-parse', '--abbrev-ref', 'HEAD'], (err, result) => { - if (err) { - cb(err); - return; - } - - cb(null, result.trim()); - }); -} - -function getPackageVersion(tag, branch) { - let baseVersion = `v${tag.major}.${tag.minor}.${tag.patch}`; - - if (tag.commit === 0 && branch === 'master') { - return baseVersion; - } - - baseVersion += '-'; - - if (branch !== 'master') { - baseVersion += (tag.commit + 1 + '.' + branch); - } else { - baseVersion += (tag.commit + 1); - } - - return baseVersion; -} - -getVersion((err, tag) => { - if (err) { - throw err; - } - - getBranch((err, branch) => { - if (err) { - throw err; - } - - pjs.version = getPackageVersion(tag, branch); - - fs.writeFileSync(pjsPath, JSON.stringify(pjs, null, 2)); - }) - -}); diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/config.d.ts b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/config.d.ts deleted file mode 100644 index e939f995..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/config.d.ts +++ /dev/null @@ -1,1043 +0,0 @@ -// ====== Generated from librdkafka 1.8.2 file CONFIGURATION.md ====== -// Code that generated this is a derivative work of the code from Nam Nguyen -// https://gist.github.com/ntgn81/066c2c8ec5b4238f85d1e9168a04e3fb - -export interface GlobalConfig { - /** - * Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support. - * - * @default gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins, zstd, sasl_oauthbearer - */ - "builtin.features"?: string; - - /** - * Client identifier. - * - * @default rdkafka - */ - "client.id"?: string; - - /** - * Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime. - */ - "metadata.broker.list"?: string; - - /** - * Alias for `metadata.broker.list`: Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime. - */ - "bootstrap.servers"?: string; - - /** - * Maximum Kafka protocol request message size. Due to differing framing overhead between protocol versions the producer is unable to reliably enforce a strict max message limit at produce time and may exceed the maximum size by one message in protocol ProduceRequests, the broker will enforce the the topic's `max.message.bytes` limit (see Apache Kafka documentation). - * - * @default 1000000 - */ - "message.max.bytes"?: number; - - /** - * Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs. - * - * @default 65535 - */ - "message.copy.max.bytes"?: number; - - /** - * Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value must be at least `fetch.max.bytes` + 512 to allow for protocol overhead; the value is adjusted automatically unless the configuration property is explicitly set. - * - * @default 100000000 - */ - "receive.message.max.bytes"?: number; - - /** - * Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one. - * - * @default 1000000 - */ - "max.in.flight.requests.per.connection"?: number; - - /** - * Alias for `max.in.flight.requests.per.connection`: Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one. - * - * @default 1000000 - */ - "max.in.flight"?: number; - - /** - * Period of time in milliseconds at which topic and broker metadata is refreshed in order to proactively discover any new brokers, topics, partitions or partition leader changes. Use -1 to disable the intervalled refresh (not recommended). If there are no locally referenced topics (no topic objects created, no messages produced, no subscription or no assignment) then only the broker list will be refreshed every interval but no more often than every 10s. - * - * @default 300000 - */ - "topic.metadata.refresh.interval.ms"?: number; - - /** - * Metadata cache max age. Defaults to topic.metadata.refresh.interval.ms * 3 - * - * @default 900000 - */ - "metadata.max.age.ms"?: number; - - /** - * When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers. - * - * @default 250 - */ - "topic.metadata.refresh.fast.interval.ms"?: number; - - /** - * **DEPRECATED** No longer used. - * - * @default 10 - */ - "topic.metadata.refresh.fast.cnt"?: number; - - /** - * Sparse metadata requests (consumes less network bandwidth) - * - * @default true - */ - "topic.metadata.refresh.sparse"?: boolean; - - /** - * Apache Kafka topic creation is asynchronous and it takes some time for a new topic to propagate throughout the cluster to all brokers. If a client requests topic metadata after manual topic creation but before the topic has been fully propagated to the broker the client is requesting metadata from, the topic will seem to be non-existent and the client will mark the topic as such, failing queued produced messages with `ERR__UNKNOWN_TOPIC`. This setting delays marking a topic as non-existent until the configured propagation max time has passed. The maximum propagation time is calculated from the time the topic is first referenced in the client, e.g., on produce(). - * - * @default 30000 - */ - "topic.metadata.propagation.max.ms"?: number; - - /** - * Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist. - */ - "topic.blacklist"?: any; - - /** - * A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch - */ - "debug"?: string; - - /** - * Default timeout for network requests. Producer: ProduceRequests will use the lesser value of `socket.timeout.ms` and remaining `message.timeout.ms` for the first message in the batch. Consumer: FetchRequests will use `fetch.wait.max.ms` + `socket.timeout.ms`. Admin: Admin requests will use `socket.timeout.ms` or explicitly set `rd_kafka_AdminOptions_set_operation_timeout()` value. - * - * @default 60000 - */ - "socket.timeout.ms"?: number; - - /** - * **DEPRECATED** No longer used. - * - * @default 1000 - */ - "socket.blocking.max.ms"?: number; - - /** - * Broker socket send buffer size. System default is used if 0. - * - * @default 0 - */ - "socket.send.buffer.bytes"?: number; - - /** - * Broker socket receive buffer size. System default is used if 0. - * - * @default 0 - */ - "socket.receive.buffer.bytes"?: number; - - /** - * Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets - * - * @default false - */ - "socket.keepalive.enable"?: boolean; - - /** - * Disable the Nagle algorithm (TCP_NODELAY) on broker sockets. - * - * @default false - */ - "socket.nagle.disable"?: boolean; - - /** - * Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. WARNING: It is highly recommended to leave this setting at its default value of 1 to avoid the client and broker to become desynchronized in case of request timeouts. NOTE: The connection is automatically re-established. - * - * @default 1 - */ - "socket.max.fails"?: number; - - /** - * How long to cache the broker address resolving results (milliseconds). - * - * @default 1000 - */ - "broker.address.ttl"?: number; - - /** - * Allowed broker IP address families: any, v4, v6 - * - * @default any - */ - "broker.address.family"?: 'any' | 'v4' | 'v6'; - - /** - * Close broker connections after the specified time of inactivity. Disable with 0. If this property is left at its default value some heuristics are performed to determine a suitable default value, this is currently limited to identifying brokers on Azure (see librdkafka issue #3109 for more info). - * - * @default 0 - */ - "connections.max.idle.ms"?: number; - - /** - * **DEPRECATED** No longer used. See `reconnect.backoff.ms` and `reconnect.backoff.max.ms`. - * - * @default 0 - */ - "reconnect.backoff.jitter.ms"?: number; - - /** - * The initial time to wait before reconnecting to a broker after the connection has been closed. The time is increased exponentially until `reconnect.backoff.max.ms` is reached. -25% to +50% jitter is applied to each reconnect backoff. A value of 0 disables the backoff and reconnects immediately. - * - * @default 100 - */ - "reconnect.backoff.ms"?: number; - - /** - * The maximum time to wait before reconnecting to a broker after the connection has been closed. - * - * @default 10000 - */ - "reconnect.backoff.max.ms"?: number; - - /** - * librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics. - * - * @default 0 - */ - "statistics.interval.ms"?: number; - - /** - * See `rd_kafka_conf_set_events()` - * - * @default 0 - */ - "enabled_events"?: number; - - /** - * Error callback (set with rd_kafka_conf_set_error_cb()) - */ - "error_cb"?: any; - - /** - * Throttle callback (set with rd_kafka_conf_set_throttle_cb()) - */ - "throttle_cb"?: any; - - /** - * Statistics callback (set with rd_kafka_conf_set_stats_cb()) - */ - "stats_cb"?: any; - - /** - * Log callback (set with rd_kafka_conf_set_log_cb()) - */ - "log_cb"?: any; - - /** - * Logging level (syslog(3) levels) - * - * @default 6 - */ - "log_level"?: number; - - /** - * Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set. - * - * @default false - */ - "log.queue"?: boolean; - - /** - * Print internal thread name in log messages (useful for debugging librdkafka internals) - * - * @default true - */ - "log.thread.name"?: boolean; - - /** - * If enabled librdkafka will initialize the PRNG with srand(current_time.milliseconds) on the first invocation of rd_kafka_new() (required only if rand_r() is not available on your platform). If disabled the application must call srand() prior to calling rd_kafka_new(). - * - * @default true - */ - "enable.random.seed"?: boolean; - - /** - * Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connection.max.idle.ms` value. - * - * @default true - */ - "log.connection.close"?: boolean; - - /** - * Background queue event callback (set with rd_kafka_conf_set_background_event_cb()) - */ - "background_event_cb"?: any; - - /** - * Socket creation callback to provide race-free CLOEXEC - */ - "socket_cb"?: any; - - /** - * Socket connect callback - */ - "connect_cb"?: any; - - /** - * Socket close callback - */ - "closesocket_cb"?: any; - - /** - * File open callback to provide race-free CLOEXEC - */ - "open_cb"?: any; - - /** - * Application opaque (set with rd_kafka_conf_set_opaque()) - */ - "opaque"?: any; - - /** - * Default topic configuration for automatically subscribed topics - */ - "default_topic_conf"?: any; - - /** - * Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed. - * - * @default 0 - */ - "internal.termination.signal"?: number; - - /** - * Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used. - * - * @default true - */ - "api.version.request"?: boolean; - - /** - * Timeout for broker API version requests. - * - * @default 10000 - */ - "api.version.request.timeout.ms"?: number; - - /** - * Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade). - * - * @default 0 - */ - "api.version.fallback.ms"?: number; - - /** - * Older broker versions (before 0.10.0) provide no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value >= 0.10, such as 0.10.2.1, enables ApiVersionRequests. - * - * @default 0.10.0 - */ - "broker.version.fallback"?: string; - - /** - * Protocol used to communicate with brokers. - * - * @default plaintext - */ - "security.protocol"?: 'plaintext' | 'ssl' | 'sasl_plaintext' | 'sasl_ssl'; - - /** - * A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3). - */ - "ssl.cipher.suites"?: string; - - /** - * The supported-curves extension in the TLS ClientHello message specifies the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client is willing to have the server use. See manual page for `SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required. - */ - "ssl.curves.list"?: string; - - /** - * The client uses the TLS ClientHello signature_algorithms extension to indicate to the server which signature/hash algorithm pairs may be used in digital signatures. See manual page for `SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required. - */ - "ssl.sigalgs.list"?: string; - - /** - * Path to client's private key (PEM) used for authentication. - */ - "ssl.key.location"?: string; - - /** - * Private key passphrase (for use with `ssl.key.location` and `set_ssl_cert()`) - */ - "ssl.key.password"?: string; - - /** - * Client's private key string (PEM format) used for authentication. - */ - "ssl.key.pem"?: string; - - /** - * Client's private key as set by rd_kafka_conf_set_ssl_cert() - */ - "ssl_key"?: any; - - /** - * Path to client's public key (PEM) used for authentication. - */ - "ssl.certificate.location"?: string; - - /** - * Client's public key string (PEM format) used for authentication. - */ - "ssl.certificate.pem"?: string; - - /** - * Client's public key as set by rd_kafka_conf_set_ssl_cert() - */ - "ssl_certificate"?: any; - - /** - * File or directory path to CA certificate(s) for verifying the broker's key. Defaults: On Windows the system's CA certificates are automatically looked up in the Windows Root certificate store. On Mac OSX this configuration defaults to `probe`. It is recommended to install openssl using Homebrew, to provide CA certificates. On Linux install the distribution's ca-certificates package. If OpenSSL is statically linked or `ssl.ca.location` is set to `probe` a list of standard paths will be probed and the first one found will be used as the default CA certificate location path. If OpenSSL is dynamically linked the OpenSSL library's default path will be used (see `OPENSSLDIR` in `openssl version -a`). - */ - "ssl.ca.location"?: string; - - /** - * CA certificate string (PEM format) for verifying the broker's key. - */ - "ssl.ca.pem"?: string; - - /** - * CA certificate as set by rd_kafka_conf_set_ssl_cert() - */ - "ssl_ca"?: any; - - /** - * Comma-separated list of Windows Certificate stores to load CA certificates from. Certificates will be loaded in the same order as stores are specified. If no certificates can be loaded from any of the specified stores an error is logged and the OpenSSL library's default CA location is used instead. Store names are typically one or more of: MY, Root, Trust, CA. - * - * @default Root - */ - "ssl.ca.certificate.stores"?: string; - - /** - * Path to CRL for verifying broker's certificate validity. - */ - "ssl.crl.location"?: string; - - /** - * Path to client's keystore (PKCS#12) used for authentication. - */ - "ssl.keystore.location"?: string; - - /** - * Client's keystore (PKCS#12) password. - */ - "ssl.keystore.password"?: string; - - /** - * Path to OpenSSL engine library. OpenSSL >= 1.1.0 required. - */ - "ssl.engine.location"?: string; - - /** - * OpenSSL engine id is the name used for loading engine. - * - * @default dynamic - */ - "ssl.engine.id"?: string; - - /** - * OpenSSL engine callback data (set with rd_kafka_conf_set_engine_callback_data()). - */ - "ssl_engine_callback_data"?: any; - - /** - * Enable OpenSSL's builtin broker (server) certificate verification. This verification can be extended by the application by implementing a certificate_verify_cb. - * - * @default true - */ - "enable.ssl.certificate.verification"?: boolean; - - /** - * Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL >= 1.0.2 required. - * - * @default none - */ - "ssl.endpoint.identification.algorithm"?: 'none' | 'https'; - - /** - * Callback to verify the broker certificate chain. - */ - "ssl.certificate.verify_cb"?: any; - - /** - * SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured. - * - * @default GSSAPI - */ - "sasl.mechanisms"?: string; - - /** - * Alias for `sasl.mechanisms`: SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured. - * - * @default GSSAPI - */ - "sasl.mechanism"?: string; - - /** - * Kerberos principal name that Kafka runs as, not including /hostname@REALM - * - * @default kafka - */ - "sasl.kerberos.service.name"?: string; - - /** - * This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal). - * - * @default kafkaclient - */ - "sasl.kerberos.principal"?: string; - - /** - * kinit -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} | low | Shell command to refresh or acquire the client's Kerberos ticket. This command is executed on client creation and every sasl.kerberos.min.time.before.relogin (0=disable). %{config.prop.name} is replaced by corresponding config object value. - * - * @default kinit -R -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} \ - */ - "sasl.kerberos.kinit.cmd"?: string; - - /** - * Path to Kerberos keytab file. This configuration property is only used as a variable in `sasl.kerberos.kinit.cmd` as ` ... -t "%{sasl.kerberos.keytab}"`. - */ - "sasl.kerberos.keytab"?: string; - - /** - * Minimum time in milliseconds between key refresh attempts. Disable automatic key refresh by setting this property to 0. - * - * @default 60000 - */ - "sasl.kerberos.min.time.before.relogin"?: number; - - /** - * SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms - */ - "sasl.username"?: string; - - /** - * SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism - */ - "sasl.password"?: string; - - /** - * SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is "sub", the default value for scopeClaimName is "scope", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_NAME=value`. For example: `principal=admin extension_traceId=123` - */ - "sasl.oauthbearer.config"?: string; - - /** - * Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production. - * - * @default false - */ - "enable.sasl.oauthbearer.unsecure.jwt"?: boolean; - - /** - * SASL/OAUTHBEARER token refresh callback (set with rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by rd_kafka_poll(), et.al. This callback will be triggered when it is time to refresh the client's OAUTHBEARER token. - */ - "oauthbearer_token_refresh_cb"?: any; - - /** - * List of plugin libraries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically. - */ - "plugin.library.paths"?: string; - - /** - * Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors. - */ - "interceptors"?: any; - - /** - * A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config `broker.rack`. - */ - "client.rack"?: string; - - /** - * Enables or disables `event.*` emitting. - * - * @default true - */ - "event_cb"?: boolean; -} - -export interface ProducerGlobalConfig extends GlobalConfig { - /** - * Enables the transactional producer. The transactional.id is used to identify the same transactional producer instance across process restarts. It allows the producer to guarantee that transactions corresponding to earlier instances of the same producer have been finalized prior to starting any new transactions, and that any zombie instances are fenced off. If no transactional.id is provided, then the producer is limited to idempotent delivery (if enable.idempotence is set). Requires broker version >= 0.11.0. - */ - "transactional.id"?: string; - - /** - * The maximum amount of time in milliseconds that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction. If this value is larger than the `transaction.max.timeout.ms` setting in the broker, the init_transactions() call will fail with ERR_INVALID_TRANSACTION_TIMEOUT. The transaction timeout automatically adjusts `message.timeout.ms` and `socket.timeout.ms`, unless explicitly configured in which case they must not exceed the transaction timeout (`socket.timeout.ms` must be at least 100ms lower than `transaction.timeout.ms`). This is also the default timeout value if no timeout (-1) is supplied to the transactional API methods. - * - * @default 60000 - */ - "transaction.timeout.ms"?: number; - - /** - * When set to `true`, the producer will ensure that messages are successfully produced exactly once and in the original produce order. The following configuration properties are adjusted automatically (if not modified by the user) when idempotence is enabled: `max.in.flight.requests.per.connection=5` (must be less than or equal to 5), `retries=INT32_MAX` (must be greater than 0), `acks=all`, `queuing.strategy=fifo`. Producer instantation will fail if user-supplied configuration is incompatible. - * - * @default false - */ - "enable.idempotence"?: boolean; - - /** - * **EXPERIMENTAL**: subject to change or removal. When set to `true`, any error that could result in a gap in the produced message series when a batch of messages fails, will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop the producer. Messages failing due to `message.timeout.ms` are not covered by this guarantee. Requires `enable.idempotence=true`. - * - * @default false - */ - "enable.gapless.guarantee"?: boolean; - - /** - * Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions. - * - * @default 100000 - */ - "queue.buffering.max.messages"?: number; - - /** - * Maximum total message size sum allowed on the producer queue. This queue is shared by all topics and partitions. This property has higher priority than queue.buffering.max.messages. - * - * @default 1048576 - */ - "queue.buffering.max.kbytes"?: number; - - /** - * Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency. - * - * @default 5 - */ - "queue.buffering.max.ms"?: any; - - /** - * Alias for `queue.buffering.max.ms`: Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency. - * - * @default 5 - */ - "linger.ms"?: any; - - /** - * How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true. - * - * @default 2147483647 - */ - "message.send.max.retries"?: number; - - /** - * Alias for `message.send.max.retries`: How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true. - * - * @default 2147483647 - */ - "retries"?: number; - - /** - * The backoff time in milliseconds before retrying a protocol request. - * - * @default 100 - */ - "retry.backoff.ms"?: number; - - /** - * The threshold of outstanding not yet transmitted broker requests needed to backpressure the producer's message accumulator. If the number of not yet transmitted requests equals or exceeds this number, produce request creation that would have otherwise been triggered (for example, in accordance with linger.ms) will be delayed. A lower number yields larger and more effective batches. A higher value can improve latency when using compression on slow machines. - * - * @default 1 - */ - "queue.buffering.backpressure.threshold"?: number; - - /** - * compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`. - * - * @default none - */ - "compression.codec"?: 'none' | 'gzip' | 'snappy' | 'lz4' | 'zstd'; - - /** - * Alias for `compression.codec`: compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`. - * - * @default none - */ - "compression.type"?: 'none' | 'gzip' | 'snappy' | 'lz4' | 'zstd'; - - /** - * Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by batch.size and message.max.bytes. - * - * @default 10000 - */ - "batch.num.messages"?: number; - - /** - * Maximum size (in bytes) of all messages batched in one MessageSet, including protocol framing overhead. This limit is applied after the first message has been added to the batch, regardless of the first message's size, this is to ensure that messages that exceed batch.size are produced. The total MessageSet size is also limited by batch.num.messages and message.max.bytes. - * - * @default 1000000 - */ - "batch.size"?: number; - - /** - * Only provide delivery reports for failed messages. - * - * @default false - */ - "delivery.report.only.error"?: boolean; - - /** - * Delivery report callback (set with rd_kafka_conf_set_dr_cb()) - */ - "dr_cb"?: boolean | Function; - - /** - * Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb()) - */ - "dr_msg_cb"?: boolean; - - /** - * Delay in milliseconds to wait to assign new sticky partitions for each topic. By default, set to double the time of linger.ms. To disable sticky behavior, set to 0. This behavior affects messages with the key NULL in all cases, and messages with key lengths of zero when the consistent_random partitioner is in use. These messages would otherwise be assigned randomly. A higher value allows for more effective batching of these messages. - * - * @default 10 - */ - "sticky.partitioning.linger.ms"?: number; -} - -export interface ConsumerGlobalConfig extends GlobalConfig { - /** - * Client group id string. All clients sharing the same group.id belong to the same group. - */ - "group.id"?: string; - - /** - * Enable static group membership. Static group members are able to leave and rejoin a group within the configured `session.timeout.ms` without prompting a group rebalance. This should be used in combination with a larger `session.timeout.ms` to avoid group rebalances caused by transient unavailability (e.g. process restarts). Requires broker version >= 2.3.0. - */ - "group.instance.id"?: string; - - /** - * The name of one or more partition assignment strategies. The elected group leader will use a strategy supported by all members of the group to assign partitions to group members. If there is more than one eligible strategy, preference is determined by the order of this list (strategies earlier in the list have higher priority). Cooperative and non-cooperative (eager) strategies must not be mixed. Available strategies: range, roundrobin, cooperative-sticky. - * - * @default range,roundrobin - */ - "partition.assignment.strategy"?: string; - - /** - * Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. The allowed range is configured with the **broker** configuration properties `group.min.session.timeout.ms` and `group.max.session.timeout.ms`. Also see `max.poll.interval.ms`. - * - * @default 45000 - */ - "session.timeout.ms"?: number; - - /** - * Group session keepalive heartbeat interval. - * - * @default 3000 - */ - "heartbeat.interval.ms"?: number; - - /** - * Group protocol type. NOTE: Currently, the only supported group protocol type is `consumer`. - * - * @default consumer - */ - "group.protocol.type"?: string; - - /** - * How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment. - * - * @default 600000 - */ - "coordinator.query.interval.ms"?: number; - - /** - * Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. Warning: Offset commits may be not possible at this point. Note: It is recommended to set `enable.auto.offset.store=false` for long-time processing applications and then explicitly store offsets (using offsets_store()) *after* message processing, to make sure offsets are not auto-committed prior to processing has finished. The interval is checked two times per second. See KIP-62 for more information. - * - * @default 300000 - */ - "max.poll.interval.ms"?: number; - - /** - * Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign(). - * - * @default true - */ - "enable.auto.commit"?: boolean; - - /** - * The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer. - * - * @default 5000 - */ - "auto.commit.interval.ms"?: number; - - /** - * Automatically store offset of last message provided to application. The offset store is an in-memory store of the next offset to (auto-)commit for each partition. - * - * @default true - */ - "enable.auto.offset.store"?: boolean; - - /** - * Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue. - * - * @default 100000 - */ - "queued.min.messages"?: number; - - /** - * Maximum number of kilobytes of queued pre-fetched messages in the local consumer queue. If using the high-level consumer this setting applies to the single consumer queue, regardless of the number of partitions. When using the legacy simple consumer or when separate partition queues are used this setting applies per partition. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages. - * - * @default 65536 - */ - "queued.max.messages.kbytes"?: number; - - /** - * Maximum time the broker may wait to fill the Fetch response with fetch.min.bytes of messages. - * - * @default 500 - */ - "fetch.wait.max.ms"?: number; - - /** - * Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched. - * - * @default 1048576 - */ - "fetch.message.max.bytes"?: number; - - /** - * Alias for `fetch.message.max.bytes`: Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched. - * - * @default 1048576 - */ - "max.partition.fetch.bytes"?: number; - - /** - * Maximum amount of data the broker shall return for a Fetch request. Messages are fetched in batches by the consumer and if the first message batch in the first non-empty partition of the Fetch request is larger than this value, then the message batch will still be returned to ensure the consumer can make progress. The maximum message batch size accepted by the broker is defined via `message.max.bytes` (broker config) or `max.message.bytes` (broker topic config). `fetch.max.bytes` is automatically adjusted upwards to be at least `message.max.bytes` (consumer config). - * - * @default 52428800 - */ - "fetch.max.bytes"?: number; - - /** - * Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting. - * - * @default 1 - */ - "fetch.min.bytes"?: number; - - /** - * How long to postpone the next fetch request for a topic+partition in case of a fetch error. - * - * @default 500 - */ - "fetch.error.backoff.ms"?: number; - - /** - * **DEPRECATED** Offset commit store method: 'file' - DEPRECATED: local file store (offset.store.path, et.al), 'broker' - broker commit store (requires Apache Kafka 0.8.2 or later on the broker). - * - * @default broker - */ - "offset.store.method"?: 'none' | 'file' | 'broker'; - - /** - * Controls how to read messages written transactionally: `read_committed` - only return transactional messages which have been committed. `read_uncommitted` - return all messages, even transactional messages which have been aborted. - * - * @default read_committed - */ - "isolation.level"?: 'read_uncommitted' | 'read_committed'; - - /** - * Message consume callback (set with rd_kafka_conf_set_consume_cb()) - */ - "consume_cb"?: any; - - /** - * Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb()) - */ - "rebalance_cb"?: boolean | Function; - - /** - * Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb()) - */ - "offset_commit_cb"?: boolean | Function; - - /** - * Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition. - * - * @default false - */ - "enable.partition.eof"?: boolean; - - /** - * Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage. - * - * @default false - */ - "check.crcs"?: boolean; - - /** - * Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuraiton to take effect. Note: The default value (false) is different from the Java consumer (true). Requires broker version >= 0.11.0.0, for older broker versions only the broker configuration applies. - * - * @default false - */ - "allow.auto.create.topics"?: boolean; -} - -export interface TopicConfig { - /** - * Application opaque (set with rd_kafka_topic_conf_set_opaque()) - */ - "opaque"?: any; -} - -export interface ProducerTopicConfig extends TopicConfig { - /** - * This field indicates the number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *-1* or *all*=Broker will block until message is committed by all in sync replicas (ISRs). If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail. - * - * @default -1 - */ - "request.required.acks"?: number; - - /** - * Alias for `request.required.acks`: This field indicates the number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *-1* or *all*=Broker will block until message is committed by all in sync replicas (ISRs). If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail. - * - * @default -1 - */ - "acks"?: number; - - /** - * The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0. - * - * @default 30000 - */ - "request.timeout.ms"?: number; - - /** - * Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured. - * - * @default 300000 - */ - "message.timeout.ms"?: number; - - /** - * Alias for `message.timeout.ms`: Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured. - * - * @default 300000 - */ - "delivery.timeout.ms"?: number; - - /** - * **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Producer queuing strategy. FIFO preserves produce ordering, while LIFO prioritizes new messages. - * - * @default fifo - */ - "queuing.strategy"?: 'fifo' | 'lifo'; - - /** - * **DEPRECATED** No longer used. - * - * @default false - */ - "produce.offset.report"?: boolean; - - /** - * Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.), `fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), `fnv1a_random` - FNV-1a hash of key (NULL keys are randomly partitioned). - * - * @default consistent_random - */ - "partitioner"?: string; - - /** - * Custom partitioner callback (set with rd_kafka_topic_conf_set_partitioner_cb()) - */ - "partitioner_cb"?: any; - - /** - * **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Message queue ordering comparator (set with rd_kafka_topic_conf_set_msg_order_cmp()). Also see `queuing.strategy`. - */ - "msg_order_cmp"?: any; - - /** - * Compression codec to use for compressing message sets. inherit = inherit global compression.codec configuration. - * - * @default inherit - */ - "compression.codec"?: 'none' | 'gzip' | 'snappy' | 'lz4' | 'zstd' | 'inherit'; - - /** - * Alias for `compression.codec`: compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`. - * - * @default none - */ - "compression.type"?: 'none' | 'gzip' | 'snappy' | 'lz4' | 'zstd'; - - /** - * Compression level parameter for algorithm selected by configuration property `compression.codec`. Higher values will result in better compression at the cost of more CPU usage. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level. - * - * @default -1 - */ - "compression.level"?: number; -} - -export interface ConsumerTopicConfig extends TopicConfig { - /** - * **DEPRECATED** [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). Offsets will be written to broker or local file according to offset.store.method. - * - * @default true - */ - "auto.commit.enable"?: boolean; - - /** - * **DEPRECATED** Alias for `auto.commit.enable`: [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). Offsets will be written to broker or local file according to offset.store.method. - * - * @default true - */ - "enable.auto.commit"?: boolean; - - /** - * [**LEGACY PROPERTY:** This setting is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `auto.commit.interval.ms` property must be used instead]. The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. - * - * @default 60000 - */ - "auto.commit.interval.ms"?: number; - - /** - * Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. - * - * @default largest - */ - "auto.offset.reset"?: 'smallest' | 'earliest' | 'beginning' | 'largest' | 'latest' | 'end' | 'error'; - - /** - * **DEPRECATED** Path to local file for storing offsets. If the path is a directory a filename will be automatically generated in that directory based on the topic and partition. File-based offset storage will be removed in a future version. - * - * @default . - */ - "offset.store.path"?: string; - - /** - * **DEPRECATED** fsync() interval for the offset file, in milliseconds. Use -1 to disable syncing, and 0 for immediate sync after each write. File-based offset storage will be removed in a future version. - * - * @default -1 - */ - "offset.store.sync.interval.ms"?: number; - - /** - * **DEPRECATED** Offset commit store method: 'file' - DEPRECATED: local file store (offset.store.path, et.al), 'broker' - broker commit store (requires "group.id" to be configured and Apache Kafka 0.8.2 or later on the broker.). - * - * @default broker - */ - "offset.store.method"?: 'file' | 'broker'; - - /** - * Maximum number of messages to dispatch in one `rd_kafka_consume_callback*()` call (0 = unlimited) - * - * @default 0 - */ - "consume.callback.max.messages"?: number; -} \ No newline at end of file diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/configure b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/configure deleted file mode 100755 index 8d25db32..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/configure +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -# This file is intended to be run on unix systems to configure librdkafka -# inside the submodules - -# This does not get run on windows which uses the build in solutions file - -# Get script directory -scriptdir=$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd) - -pushd ./deps/librdkafka &> /dev/null - -./configure --prefix="${scriptdir}/build/deps" --libdir="${scriptdir}/build/deps" $* - -popd &> /dev/null diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/cpplint.py b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/cpplint.py deleted file mode 100644 index 44726248..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/cpplint.py +++ /dev/null @@ -1,6325 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2009 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Does google-lint on c++ files. - -The goal of this script is to identify places in the code that *may* -be in non-compliance with google style. It does not attempt to fix -up these problems -- the point is to educate. It does also not -attempt to find all problems, or to ensure that everything it does -find is legitimately a problem. - -In particular, we can get very confused by /* and // inside strings! -We do a small hack, which is to ignore //'s with "'s after them on the -same line, but it is far from perfect (in either direction). -""" - -import codecs -import copy -import getopt -import math # for log -import os -import re -import sre_compile -import string -import sys -import unicodedata - - -_USAGE = """ -Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...] - [--counting=total|toplevel|detailed] [--root=subdir] - [--linelength=digits] - [file] ... - - The style guidelines this tries to follow are those in - http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml - - Every problem is given a confidence score from 1-5, with 5 meaning we are - certain of the problem, and 1 meaning it could be a legitimate construct. - This will miss some errors, and is not a substitute for a code review. - - To suppress false-positive errors of a certain category, add a - 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*) - suppresses errors of all categories on that line. - - The files passed in will be linted; at least one file must be provided. - Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the - extensions with the --extensions flag. - - Flags: - - output=vs7 - By default, the output is formatted to ease emacs parsing. Visual Studio - compatible output (vs7) may also be used. Other formats are unsupported. - - verbose=# - Specify a number 0-5 to restrict errors to certain verbosity levels. - - filter=-x,+y,... - Specify a comma-separated list of category-filters to apply: only - error messages whose category names pass the filters will be printed. - (Category names are printed with the message and look like - "[whitespace/indent]".) Filters are evaluated left to right. - "-FOO" and "FOO" means "do not print categories that start with FOO". - "+FOO" means "do print categories that start with FOO". - - Examples: --filter=-whitespace,+whitespace/braces - --filter=whitespace,runtime/printf,+runtime/printf_format - --filter=-,+build/include_what_you_use - - To see a list of all the categories used in cpplint, pass no arg: - --filter= - - counting=total|toplevel|detailed - The total number of errors found is always printed. If - 'toplevel' is provided, then the count of errors in each of - the top-level categories like 'build' and 'whitespace' will - also be printed. If 'detailed' is provided, then a count - is provided for each category like 'build/class'. - - root=subdir - The root directory used for deriving header guard CPP variable. - By default, the header guard CPP variable is calculated as the relative - path to the directory that contains .git, .hg, or .svn. When this flag - is specified, the relative path is calculated from the specified - directory. If the specified directory does not exist, this flag is - ignored. - - Examples: - Assuming that src/.git exists, the header guard CPP variables for - src/chrome/browser/ui/browser.h are: - - No flag => CHROME_BROWSER_UI_BROWSER_H_ - --root=chrome => BROWSER_UI_BROWSER_H_ - --root=chrome/browser => UI_BROWSER_H_ - - linelength=digits - This is the allowed line length for the project. The default value is - 80 characters. - - Examples: - --linelength=120 - - extensions=extension,extension,... - The allowed file extensions that cpplint will check - - Examples: - --extensions=hpp,cpp - - cpplint.py supports per-directory configurations specified in CPPLINT.cfg - files. CPPLINT.cfg file can contain a number of key=value pairs. - Currently the following options are supported: - - set noparent - filter=+filter1,-filter2,... - exclude_files=regex - linelength=80 - - "set noparent" option prevents cpplint from traversing directory tree - upwards looking for more .cfg files in parent directories. This option - is usually placed in the top-level project directory. - - The "filter" option is similar in function to --filter flag. It specifies - message filters in addition to the |_DEFAULT_FILTERS| and those specified - through --filter command-line flag. - - "exclude_files" allows to specify a regular expression to be matched against - a file name. If the expression matches, the file is skipped and not run - through liner. - - "linelength" allows to specify the allowed line length for the project. - - CPPLINT.cfg has an effect on files in the same directory and all - sub-directories, unless overridden by a nested configuration file. - - Example file: - filter=-build/include_order,+build/include_alpha - exclude_files=.*\.cc - - The above example disables build/include_order warning and enables - build/include_alpha as well as excludes all .cc from being - processed by linter, in the current directory (where the .cfg - file is located) and all sub-directories. -""" - -# We categorize each error message we print. Here are the categories. -# We want an explicit list so we can list them all in cpplint --filter=. -# If you add a new error message with a new category, add it to the list -# here! cpplint_unittest.py should tell you if you forget to do this. -_ERROR_CATEGORIES = [ - 'build/class', - 'build/c++11', - 'build/deprecated', - 'build/endif_comment', - 'build/explicit_make_pair', - 'build/forward_decl', - 'build/header_guard', - 'build/include', - 'build/include_alpha', - 'build/include_order', - 'build/include_what_you_use', - 'build/namespaces', - 'build/printf_format', - 'build/storage_class', - 'legal/copyright', - 'readability/alt_tokens', - 'readability/braces', - 'readability/casting', - 'readability/check', - 'readability/constructors', - 'readability/fn_size', - 'readability/function', - 'readability/inheritance', - 'readability/multiline_comment', - 'readability/multiline_string', - 'readability/namespace', - 'readability/nolint', - 'readability/nul', - 'readability/strings', - 'readability/todo', - 'readability/utf8', - 'runtime/arrays', - 'runtime/casting', - 'runtime/explicit', - 'runtime/int', - 'runtime/init', - 'runtime/invalid_increment', - 'runtime/member_string_references', - 'runtime/memset', - 'runtime/indentation_namespace', - 'runtime/operator', - 'runtime/printf', - 'runtime/printf_format', - 'runtime/references', - 'runtime/string', - 'runtime/threadsafe_fn', - 'runtime/vlog', - 'whitespace/blank_line', - 'whitespace/braces', - 'whitespace/comma', - 'whitespace/comments', - 'whitespace/empty_conditional_body', - 'whitespace/empty_loop_body', - 'whitespace/end_of_line', - 'whitespace/ending_newline', - 'whitespace/forcolon', - 'whitespace/indent', - 'whitespace/line_length', - 'whitespace/newline', - 'whitespace/operators', - 'whitespace/parens', - 'whitespace/semicolon', - 'whitespace/tab', - 'whitespace/todo', - ] - -# These error categories are no longer enforced by cpplint, but for backwards- -# compatibility they may still appear in NOLINT comments. -_LEGACY_ERROR_CATEGORIES = [ - 'readability/streams', - ] - -# The default state of the category filter. This is overridden by the --filter= -# flag. By default all errors are on, so only add here categories that should be -# off by default (i.e., categories that must be enabled by the --filter= flags). -# All entries here should start with a '-' or '+', as in the --filter= flag. -_DEFAULT_FILTERS = ['-build/include_alpha'] - -# We used to check for high-bit characters, but after much discussion we -# decided those were OK, as long as they were in UTF-8 and didn't represent -# hard-coded international strings, which belong in a separate i18n file. - -# C++ headers -_CPP_HEADERS = frozenset([ - # Legacy - 'algobase.h', - 'algo.h', - 'alloc.h', - 'builtinbuf.h', - 'bvector.h', - 'complex.h', - 'defalloc.h', - 'deque.h', - 'editbuf.h', - 'fstream.h', - 'function.h', - 'hash_map', - 'hash_map.h', - 'hash_set', - 'hash_set.h', - 'hashtable.h', - 'heap.h', - 'indstream.h', - 'iomanip.h', - 'iostream.h', - 'istream.h', - 'iterator.h', - 'list.h', - 'map.h', - 'multimap.h', - 'multiset.h', - 'ostream.h', - 'pair.h', - 'parsestream.h', - 'pfstream.h', - 'procbuf.h', - 'pthread_alloc', - 'pthread_alloc.h', - 'rope', - 'rope.h', - 'ropeimpl.h', - 'set.h', - 'slist', - 'slist.h', - 'stack.h', - 'stdiostream.h', - 'stl_alloc.h', - 'stl_relops.h', - 'streambuf.h', - 'stream.h', - 'strfile.h', - 'strstream.h', - 'tempbuf.h', - 'tree.h', - 'type_traits.h', - 'vector.h', - # 17.6.1.2 C++ library headers - 'algorithm', - 'array', - 'atomic', - 'bitset', - 'chrono', - 'codecvt', - 'complex', - 'condition_variable', - 'deque', - 'exception', - 'forward_list', - 'fstream', - 'functional', - 'future', - 'initializer_list', - 'iomanip', - 'ios', - 'iosfwd', - 'iostream', - 'istream', - 'iterator', - 'limits', - 'list', - 'locale', - 'map', - 'memory', - 'mutex', - 'new', - 'numeric', - 'ostream', - 'queue', - 'random', - 'ratio', - 'regex', - 'set', - 'sstream', - 'stack', - 'stdexcept', - 'streambuf', - 'string', - 'strstream', - 'system_error', - 'thread', - 'tuple', - 'typeindex', - 'typeinfo', - 'type_traits', - 'unordered_map', - 'unordered_set', - 'utility', - 'valarray', - 'vector', - # 17.6.1.2 C++ headers for C library facilities - 'cassert', - 'ccomplex', - 'cctype', - 'cerrno', - 'cfenv', - 'cfloat', - 'cinttypes', - 'ciso646', - 'climits', - 'clocale', - 'cmath', - 'csetjmp', - 'csignal', - 'cstdalign', - 'cstdarg', - 'cstdbool', - 'cstddef', - 'cstdint', - 'cstdio', - 'cstdlib', - 'cstring', - 'ctgmath', - 'ctime', - 'cuchar', - 'cwchar', - 'cwctype', - ]) - - -# These headers are excluded from [build/include] and [build/include_order] -# checks: -# - Anything not following google file name conventions (containing an -# uppercase character, such as Python.h or nsStringAPI.h, for example). -# - Lua headers. -# - rdkafka.cpp header, because it would be located in different directories depending -# on whether it's pulled from librdkafka sources or librdkafka-dev package. -_THIRD_PARTY_HEADERS_PATTERN = re.compile( - r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h|rdkafkacpp\.h)$') - - -# Assertion macros. These are defined in base/logging.h and -# testing/base/gunit.h. Note that the _M versions need to come first -# for substring matching to work. -_CHECK_MACROS = [ - 'DCHECK', 'CHECK', - 'EXPECT_TRUE_M', 'EXPECT_TRUE', - 'ASSERT_TRUE_M', 'ASSERT_TRUE', - 'EXPECT_FALSE_M', 'EXPECT_FALSE', - 'ASSERT_FALSE_M', 'ASSERT_FALSE', - ] - -# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE -_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) - -for op, replacement in [('==', 'EQ'), ('!=', 'NE'), - ('>=', 'GE'), ('>', 'GT'), - ('<=', 'LE'), ('<', 'LT')]: - _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement - _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement - _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement - _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement - _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement - _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement - -for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), - ('>=', 'LT'), ('>', 'LE'), - ('<=', 'GT'), ('<', 'GE')]: - _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement - _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement - _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement - _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement - -# Alternative tokens and their replacements. For full list, see section 2.5 -# Alternative tokens [lex.digraph] in the C++ standard. -# -# Digraphs (such as '%:') are not included here since it's a mess to -# match those on a word boundary. -_ALT_TOKEN_REPLACEMENT = { - 'and': '&&', - 'bitor': '|', - 'or': '||', - 'xor': '^', - 'compl': '~', - 'bitand': '&', - 'and_eq': '&=', - 'or_eq': '|=', - 'xor_eq': '^=', - 'not': '!', - 'not_eq': '!=' - } - -# Compile regular expression that matches all the above keywords. The "[ =()]" -# bit is meant to avoid matching these keywords outside of boolean expressions. -# -# False positives include C-style multi-line comments and multi-line strings -# but those have always been troublesome for cpplint. -_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( - r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') - - -# These constants define types of headers for use with -# _IncludeState.CheckNextIncludeOrder(). -_C_SYS_HEADER = 1 -_CPP_SYS_HEADER = 2 -_LIKELY_MY_HEADER = 3 -_POSSIBLE_MY_HEADER = 4 -_OTHER_HEADER = 5 - -# These constants define the current inline assembly state -_NO_ASM = 0 # Outside of inline assembly block -_INSIDE_ASM = 1 # Inside inline assembly block -_END_ASM = 2 # Last line of inline assembly block -_BLOCK_ASM = 3 # The whole block is an inline assembly block - -# Match start of assembly blocks -_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' - r'(?:\s+(volatile|__volatile__))?' - r'\s*[{(]') - - -_regexp_compile_cache = {} - -# {str, set(int)}: a map from error categories to sets of linenumbers -# on which those errors are expected and should be suppressed. -_error_suppressions = {} - -# The root directory used for deriving header guard CPP variable. -# This is set by --root flag. -_root = None - -# The allowed line length of files. -# This is set by --linelength flag. -_line_length = 80 - -# The allowed extensions for file names -# This is set by --extensions flag. -_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh']) - -def ParseNolintSuppressions(filename, raw_line, linenum, error): - """Updates the global list of error-suppressions. - - Parses any NOLINT comments on the current line, updating the global - error_suppressions store. Reports an error if the NOLINT comment - was malformed. - - Args: - filename: str, the name of the input file. - raw_line: str, the line of input text, with comments. - linenum: int, the number of the current line. - error: function, an error handler. - """ - matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line) - if matched: - if matched.group(1): - suppressed_line = linenum + 1 - else: - suppressed_line = linenum - category = matched.group(2) - if category in (None, '(*)'): # => "suppress all" - _error_suppressions.setdefault(None, set()).add(suppressed_line) - else: - if category.startswith('(') and category.endswith(')'): - category = category[1:-1] - if category in _ERROR_CATEGORIES: - _error_suppressions.setdefault(category, set()).add(suppressed_line) - elif category not in _LEGACY_ERROR_CATEGORIES: - error(filename, linenum, 'readability/nolint', 5, - 'Unknown NOLINT error category: %s' % category) - - -def ResetNolintSuppressions(): - """Resets the set of NOLINT suppressions to empty.""" - _error_suppressions.clear() - - -def IsErrorSuppressedByNolint(category, linenum): - """Returns true if the specified error category is suppressed on this line. - - Consults the global error_suppressions map populated by - ParseNolintSuppressions/ResetNolintSuppressions. - - Args: - category: str, the category of the error. - linenum: int, the current line number. - Returns: - bool, True iff the error should be suppressed due to a NOLINT comment. - """ - return (linenum in _error_suppressions.get(category, set()) or - linenum in _error_suppressions.get(None, set())) - - -def Match(pattern, s): - """Matches the string with the pattern, caching the compiled regexp.""" - # The regexp compilation caching is inlined in both Match and Search for - # performance reasons; factoring it out into a separate function turns out - # to be noticeably expensive. - if pattern not in _regexp_compile_cache: - _regexp_compile_cache[pattern] = sre_compile.compile(pattern) - return _regexp_compile_cache[pattern].match(s) - - -def ReplaceAll(pattern, rep, s): - """Replaces instances of pattern in a string with a replacement. - - The compiled regex is kept in a cache shared by Match and Search. - - Args: - pattern: regex pattern - rep: replacement text - s: search string - - Returns: - string with replacements made (or original string if no replacements) - """ - if pattern not in _regexp_compile_cache: - _regexp_compile_cache[pattern] = sre_compile.compile(pattern) - return _regexp_compile_cache[pattern].sub(rep, s) - - -def Search(pattern, s): - """Searches the string for the pattern, caching the compiled regexp.""" - if pattern not in _regexp_compile_cache: - _regexp_compile_cache[pattern] = sre_compile.compile(pattern) - return _regexp_compile_cache[pattern].search(s) - - -class _IncludeState(object): - """Tracks line numbers for includes, and the order in which includes appear. - - include_list contains list of lists of (header, line number) pairs. - It's a lists of lists rather than just one flat list to make it - easier to update across preprocessor boundaries. - - Call CheckNextIncludeOrder() once for each header in the file, passing - in the type constants defined above. Calls in an illegal order will - raise an _IncludeError with an appropriate error message. - - """ - # self._section will move monotonically through this set. If it ever - # needs to move backwards, CheckNextIncludeOrder will raise an error. - _INITIAL_SECTION = 0 - _MY_H_SECTION = 1 - _C_SECTION = 2 - _CPP_SECTION = 3 - _OTHER_H_SECTION = 4 - - _TYPE_NAMES = { - _C_SYS_HEADER: 'C system header', - _CPP_SYS_HEADER: 'C++ system header', - _LIKELY_MY_HEADER: 'header this file implements', - _POSSIBLE_MY_HEADER: 'header this file may implement', - _OTHER_HEADER: 'other header', - } - _SECTION_NAMES = { - _INITIAL_SECTION: "... nothing. (This can't be an error.)", - _MY_H_SECTION: 'a header this file implements', - _C_SECTION: 'C system header', - _CPP_SECTION: 'C++ system header', - _OTHER_H_SECTION: 'other header', - } - - def __init__(self): - self.include_list = [[]] - self.ResetSection('') - - def FindHeader(self, header): - """Check if a header has already been included. - - Args: - header: header to check. - Returns: - Line number of previous occurrence, or -1 if the header has not - been seen before. - """ - for section_list in self.include_list: - for f in section_list: - if f[0] == header: - return f[1] - return -1 - - def ResetSection(self, directive): - """Reset section checking for preprocessor directive. - - Args: - directive: preprocessor directive (e.g. "if", "else"). - """ - # The name of the current section. - self._section = self._INITIAL_SECTION - # The path of last found header. - self._last_header = '' - - # Update list of includes. Note that we never pop from the - # include list. - if directive in ('if', 'ifdef', 'ifndef'): - self.include_list.append([]) - elif directive in ('else', 'elif'): - self.include_list[-1] = [] - - def SetLastHeader(self, header_path): - self._last_header = header_path - - def CanonicalizeAlphabeticalOrder(self, header_path): - """Returns a path canonicalized for alphabetical comparison. - - - replaces "-" with "_" so they both cmp the same. - - removes '-inl' since we don't require them to be after the main header. - - lowercase everything, just in case. - - Args: - header_path: Path to be canonicalized. - - Returns: - Canonicalized path. - """ - return header_path.replace('-inl.h', '.h').replace('-', '_').lower() - - def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): - """Check if a header is in alphabetical order with the previous header. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - header_path: Canonicalized header to be checked. - - Returns: - Returns true if the header is in alphabetical order. - """ - # If previous section is different from current section, _last_header will - # be reset to empty string, so it's always less than current header. - # - # If previous line was a blank line, assume that the headers are - # intentionally sorted the way they are. - if (self._last_header > header_path and - Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])): - return False - return True - - def CheckNextIncludeOrder(self, header_type): - """Returns a non-empty error message if the next header is out of order. - - This function also updates the internal state to be ready to check - the next include. - - Args: - header_type: One of the _XXX_HEADER constants defined above. - - Returns: - The empty string if the header is in the right order, or an - error message describing what's wrong. - - """ - error_message = ('Found %s after %s' % - (self._TYPE_NAMES[header_type], - self._SECTION_NAMES[self._section])) - - last_section = self._section - - if header_type == _C_SYS_HEADER: - if self._section <= self._C_SECTION: - self._section = self._C_SECTION - else: - self._last_header = '' - return error_message - elif header_type == _CPP_SYS_HEADER: - if self._section <= self._CPP_SECTION: - self._section = self._CPP_SECTION - else: - self._last_header = '' - return error_message - elif header_type == _LIKELY_MY_HEADER: - if self._section <= self._MY_H_SECTION: - self._section = self._MY_H_SECTION - else: - self._section = self._OTHER_H_SECTION - elif header_type == _POSSIBLE_MY_HEADER: - if self._section <= self._MY_H_SECTION: - self._section = self._MY_H_SECTION - else: - # This will always be the fallback because we're not sure - # enough that the header is associated with this file. - self._section = self._OTHER_H_SECTION - else: - assert header_type == _OTHER_HEADER - self._section = self._OTHER_H_SECTION - - if last_section != self._section: - self._last_header = '' - - return '' - - -class _CppLintState(object): - """Maintains module-wide state..""" - - def __init__(self): - self.verbose_level = 1 # global setting. - self.error_count = 0 # global count of reported errors - # filters to apply when emitting error messages - self.filters = _DEFAULT_FILTERS[:] - # backup of filter list. Used to restore the state after each file. - self._filters_backup = self.filters[:] - self.counting = 'total' # In what way are we counting errors? - self.errors_by_category = {} # string to int dict storing error counts - - # output format: - # "emacs" - format that emacs can parse (default) - # "vs7" - format that Microsoft Visual Studio 7 can parse - self.output_format = 'emacs' - - def SetOutputFormat(self, output_format): - """Sets the output format for errors.""" - self.output_format = output_format - - def SetVerboseLevel(self, level): - """Sets the module's verbosity, and returns the previous setting.""" - last_verbose_level = self.verbose_level - self.verbose_level = level - return last_verbose_level - - def SetCountingStyle(self, counting_style): - """Sets the module's counting options.""" - self.counting = counting_style - - def SetFilters(self, filters): - """Sets the error-message filters. - - These filters are applied when deciding whether to emit a given - error message. - - Args: - filters: A string of comma-separated filters (eg "+whitespace/indent"). - Each filter should start with + or -; else we die. - - Raises: - ValueError: The comma-separated filters did not all start with '+' or '-'. - E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" - """ - # Default filters always have less priority than the flag ones. - self.filters = _DEFAULT_FILTERS[:] - self.AddFilters(filters) - - def AddFilters(self, filters): - """ Adds more filters to the existing list of error-message filters. """ - for filt in filters.split(','): - clean_filt = filt.strip() - if clean_filt: - self.filters.append(clean_filt) - for filt in self.filters: - if not (filt.startswith('+') or filt.startswith('-')): - raise ValueError('Every filter in --filters must start with + or -' - ' (%s does not)' % filt) - - def BackupFilters(self): - """ Saves the current filter list to backup storage.""" - self._filters_backup = self.filters[:] - - def RestoreFilters(self): - """ Restores filters previously backed up.""" - self.filters = self._filters_backup[:] - - def ResetErrorCounts(self): - """Sets the module's error statistic back to zero.""" - self.error_count = 0 - self.errors_by_category = {} - - def IncrementErrorCount(self, category): - """Bumps the module's error statistic.""" - self.error_count += 1 - if self.counting in ('toplevel', 'detailed'): - if self.counting != 'detailed': - category = category.split('/')[0] - if category not in self.errors_by_category: - self.errors_by_category[category] = 0 - self.errors_by_category[category] += 1 - - def PrintErrorCounts(self): - """Print a summary of errors by category, and the total.""" - for category, count in self.errors_by_category.iteritems(): - sys.stderr.write('Category \'%s\' errors found: %d\n' % - (category, count)) - sys.stderr.write('Total errors found: %d\n' % self.error_count) - -_cpplint_state = _CppLintState() - - -def _OutputFormat(): - """Gets the module's output format.""" - return _cpplint_state.output_format - - -def _SetOutputFormat(output_format): - """Sets the module's output format.""" - _cpplint_state.SetOutputFormat(output_format) - - -def _VerboseLevel(): - """Returns the module's verbosity setting.""" - return _cpplint_state.verbose_level - - -def _SetVerboseLevel(level): - """Sets the module's verbosity, and returns the previous setting.""" - return _cpplint_state.SetVerboseLevel(level) - - -def _SetCountingStyle(level): - """Sets the module's counting options.""" - _cpplint_state.SetCountingStyle(level) - - -def _Filters(): - """Returns the module's list of output filters, as a list.""" - return _cpplint_state.filters - - -def _SetFilters(filters): - """Sets the module's error-message filters. - - These filters are applied when deciding whether to emit a given - error message. - - Args: - filters: A string of comma-separated filters (eg "whitespace/indent"). - Each filter should start with + or -; else we die. - """ - _cpplint_state.SetFilters(filters) - -def _AddFilters(filters): - """Adds more filter overrides. - - Unlike _SetFilters, this function does not reset the current list of filters - available. - - Args: - filters: A string of comma-separated filters (eg "whitespace/indent"). - Each filter should start with + or -; else we die. - """ - _cpplint_state.AddFilters(filters) - -def _BackupFilters(): - """ Saves the current filter list to backup storage.""" - _cpplint_state.BackupFilters() - -def _RestoreFilters(): - """ Restores filters previously backed up.""" - _cpplint_state.RestoreFilters() - -class _FunctionState(object): - """Tracks current function name and the number of lines in its body.""" - - _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. - _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. - - def __init__(self): - self.in_a_function = False - self.lines_in_function = 0 - self.current_function = '' - - def Begin(self, function_name): - """Start analyzing function body. - - Args: - function_name: The name of the function being tracked. - """ - self.in_a_function = True - self.lines_in_function = 0 - self.current_function = function_name - - def Count(self): - """Count line in current function body.""" - if self.in_a_function: - self.lines_in_function += 1 - - def Check(self, error, filename, linenum): - """Report if too many lines in function body. - - Args: - error: The function to call with any errors found. - filename: The name of the current file. - linenum: The number of the line to check. - """ - if Match(r'T(EST|est)', self.current_function): - base_trigger = self._TEST_TRIGGER - else: - base_trigger = self._NORMAL_TRIGGER - trigger = base_trigger * 2**_VerboseLevel() - - if self.lines_in_function > trigger: - error_level = int(math.log(self.lines_in_function / base_trigger, 2)) - # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... - if error_level > 5: - error_level = 5 - error(filename, linenum, 'readability/fn_size', error_level, - 'Small and focused functions are preferred:' - ' %s has %d non-comment lines' - ' (error triggered by exceeding %d lines).' % ( - self.current_function, self.lines_in_function, trigger)) - - def End(self): - """Stop analyzing function body.""" - self.in_a_function = False - - -class _IncludeError(Exception): - """Indicates a problem with the include order in a file.""" - pass - - -class FileInfo(object): - """Provides utility functions for filenames. - - FileInfo provides easy access to the components of a file's path - relative to the project root. - """ - - def __init__(self, filename): - self._filename = filename - - def FullName(self): - """Make Windows paths like Unix.""" - return os.path.abspath(self._filename).replace('\\', '/') - - def RepositoryName(self): - """FullName after removing the local path to the repository. - - If we have a real absolute path name here we can try to do something smart: - detecting the root of the checkout and truncating /path/to/checkout from - the name so that we get header guards that don't include things like - "C:\Documents and Settings\..." or "/home/username/..." in them and thus - people on different computers who have checked the source out to different - locations won't see bogus errors. - """ - fullname = self.FullName() - - if os.path.exists(fullname): - project_dir = os.path.dirname(fullname) - - if os.path.exists(os.path.join(project_dir, ".svn")): - # If there's a .svn file in the current directory, we recursively look - # up the directory tree for the top of the SVN checkout - root_dir = project_dir - one_up_dir = os.path.dirname(root_dir) - while os.path.exists(os.path.join(one_up_dir, ".svn")): - root_dir = os.path.dirname(root_dir) - one_up_dir = os.path.dirname(one_up_dir) - - prefix = os.path.commonprefix([root_dir, project_dir]) - return fullname[len(prefix) + 1:] - - # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by - # searching up from the current path. - root_dir = os.path.dirname(fullname) - while (root_dir != os.path.dirname(root_dir) and - not os.path.exists(os.path.join(root_dir, ".git")) and - not os.path.exists(os.path.join(root_dir, ".hg")) and - not os.path.exists(os.path.join(root_dir, ".svn"))): - root_dir = os.path.dirname(root_dir) - - if (os.path.exists(os.path.join(root_dir, ".git")) or - os.path.exists(os.path.join(root_dir, ".hg")) or - os.path.exists(os.path.join(root_dir, ".svn"))): - prefix = os.path.commonprefix([root_dir, project_dir]) - return fullname[len(prefix) + 1:] - - # Don't know what to do; header guard warnings may be wrong... - return fullname - - def Split(self): - """Splits the file into the directory, basename, and extension. - - For 'chrome/browser/browser.cc', Split() would - return ('chrome/browser', 'browser', '.cc') - - Returns: - A tuple of (directory, basename, extension). - """ - - googlename = self.RepositoryName() - project, rest = os.path.split(googlename) - return (project,) + os.path.splitext(rest) - - def BaseName(self): - """File base name - text after the final slash, before the final period.""" - return self.Split()[1] - - def Extension(self): - """File extension - text following the final period.""" - return self.Split()[2] - - def NoExtension(self): - """File has no source file extension.""" - return '/'.join(self.Split()[0:2]) - - def IsSource(self): - """File has a source file extension.""" - return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx') - - -def _ShouldPrintError(category, confidence, linenum): - """If confidence >= verbose, category passes filter and is not suppressed.""" - - # There are three ways we might decide not to print an error message: - # a "NOLINT(category)" comment appears in the source, - # the verbosity level isn't high enough, or the filters filter it out. - if IsErrorSuppressedByNolint(category, linenum): - return False - - if confidence < _cpplint_state.verbose_level: - return False - - is_filtered = False - for one_filter in _Filters(): - if one_filter.startswith('-'): - if category.startswith(one_filter[1:]): - is_filtered = True - elif one_filter.startswith('+'): - if category.startswith(one_filter[1:]): - is_filtered = False - else: - assert False # should have been checked for in SetFilter. - if is_filtered: - return False - - return True - - -def Error(filename, linenum, category, confidence, message): - """Logs the fact we've found a lint error. - - We log where the error was found, and also our confidence in the error, - that is, how certain we are this is a legitimate style regression, and - not a misidentification or a use that's sometimes justified. - - False positives can be suppressed by the use of - "cpplint(category)" comments on the offending line. These are - parsed into _error_suppressions. - - Args: - filename: The name of the file containing the error. - linenum: The number of the line containing the error. - category: A string used to describe the "category" this bug - falls under: "whitespace", say, or "runtime". Categories - may have a hierarchy separated by slashes: "whitespace/indent". - confidence: A number from 1-5 representing a confidence score for - the error, with 5 meaning that we are certain of the problem, - and 1 meaning that it could be a legitimate construct. - message: The error message. - """ - if _ShouldPrintError(category, confidence, linenum): - _cpplint_state.IncrementErrorCount(category) - if _cpplint_state.output_format == 'vs7': - sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( - filename, linenum, message, category, confidence)) - elif _cpplint_state.output_format == 'eclipse': - sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( - filename, linenum, message, category, confidence)) - else: - sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( - filename, linenum, message, category, confidence)) - - -# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. -_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( - r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') -# Match a single C style comment on the same line. -_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/' -# Matches multi-line C style comments. -# This RE is a little bit more complicated than one might expect, because we -# have to take care of space removals tools so we can handle comments inside -# statements better. -# The current rule is: We only clear spaces from both sides when we're at the -# end of the line. Otherwise, we try to remove spaces from the right side, -# if this doesn't work we try on left side but only if there's a non-character -# on the right. -_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( - r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' + - _RE_PATTERN_C_COMMENTS + r'\s+|' + - r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' + - _RE_PATTERN_C_COMMENTS + r')') - - -def IsCppString(line): - """Does line terminate so, that the next symbol is in string constant. - - This function does not consider single-line nor multi-line comments. - - Args: - line: is a partial line of code starting from the 0..n. - - Returns: - True, if next character appended to 'line' is inside a - string constant. - """ - - line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" - return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 - - -def CleanseRawStrings(raw_lines): - """Removes C++11 raw strings from lines. - - Before: - static const char kData[] = R"( - multi-line string - )"; - - After: - static const char kData[] = "" - (replaced by blank line) - ""; - - Args: - raw_lines: list of raw lines. - - Returns: - list of lines with C++11 raw strings replaced by empty strings. - """ - - delimiter = None - lines_without_raw_strings = [] - for line in raw_lines: - if delimiter: - # Inside a raw string, look for the end - end = line.find(delimiter) - if end >= 0: - # Found the end of the string, match leading space for this - # line and resume copying the original lines, and also insert - # a "" on the last line. - leading_space = Match(r'^(\s*)\S', line) - line = leading_space.group(1) + '""' + line[end + len(delimiter):] - delimiter = None - else: - # Haven't found the end yet, append a blank line. - line = '""' - - # Look for beginning of a raw string, and replace them with - # empty strings. This is done in a loop to handle multiple raw - # strings on the same line. - while delimiter is None: - # Look for beginning of a raw string. - # See 2.14.15 [lex.string] for syntax. - matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) - if matched: - delimiter = ')' + matched.group(2) + '"' - - end = matched.group(3).find(delimiter) - if end >= 0: - # Raw string ended on same line - line = (matched.group(1) + '""' + - matched.group(3)[end + len(delimiter):]) - delimiter = None - else: - # Start of a multi-line raw string - line = matched.group(1) + '""' - else: - break - - lines_without_raw_strings.append(line) - - # TODO(unknown): if delimiter is not None here, we might want to - # emit a warning for unterminated string. - return lines_without_raw_strings - - -def FindNextMultiLineCommentStart(lines, lineix): - """Find the beginning marker for a multiline comment.""" - while lineix < len(lines): - if lines[lineix].strip().startswith('/*'): - # Only return this marker if the comment goes beyond this line - if lines[lineix].strip().find('*/', 2) < 0: - return lineix - lineix += 1 - return len(lines) - - -def FindNextMultiLineCommentEnd(lines, lineix): - """We are inside a comment, find the end marker.""" - while lineix < len(lines): - if lines[lineix].strip().endswith('*/'): - return lineix - lineix += 1 - return len(lines) - - -def RemoveMultiLineCommentsFromRange(lines, begin, end): - """Clears a range of lines for multi-line comments.""" - # Having // dummy comments makes the lines non-empty, so we will not get - # unnecessary blank line warnings later in the code. - for i in range(begin, end): - lines[i] = '/**/' - - -def RemoveMultiLineComments(filename, lines, error): - """Removes multiline (c-style) comments from lines.""" - lineix = 0 - while lineix < len(lines): - lineix_begin = FindNextMultiLineCommentStart(lines, lineix) - if lineix_begin >= len(lines): - return - lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) - if lineix_end >= len(lines): - error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, - 'Could not find end of multi-line comment') - return - RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) - lineix = lineix_end + 1 - - -def CleanseComments(line): - """Removes //-comments and single-line C-style /* */ comments. - - Args: - line: A line of C++ source. - - Returns: - The line with single-line comments removed. - """ - commentpos = line.find('//') - if commentpos != -1 and not IsCppString(line[:commentpos]): - line = line[:commentpos].rstrip() - # get rid of /* ... */ - return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) - - -class CleansedLines(object): - """Holds 4 copies of all lines with different preprocessing applied to them. - - 1) elided member contains lines without strings and comments. - 2) lines member contains lines without comments. - 3) raw_lines member contains all the lines without processing. - 4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw - strings removed. - All these members are of , and of the same length. - """ - - def __init__(self, lines): - self.elided = [] - self.lines = [] - self.raw_lines = lines - self.num_lines = len(lines) - self.lines_without_raw_strings = CleanseRawStrings(lines) - for linenum in range(len(self.lines_without_raw_strings)): - self.lines.append(CleanseComments( - self.lines_without_raw_strings[linenum])) - elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) - self.elided.append(CleanseComments(elided)) - - def NumLines(self): - """Returns the number of lines represented.""" - return self.num_lines - - @staticmethod - def _CollapseStrings(elided): - """Collapses strings and chars on a line to simple "" or '' blocks. - - We nix strings first so we're not fooled by text like '"http://"' - - Args: - elided: The line being processed. - - Returns: - The line with collapsed strings. - """ - if _RE_PATTERN_INCLUDE.match(elided): - return elided - - # Remove escaped characters first to make quote/single quote collapsing - # basic. Things that look like escaped characters shouldn't occur - # outside of strings and chars. - elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) - - # Replace quoted strings and digit separators. Both single quotes - # and double quotes are processed in the same loop, otherwise - # nested quotes wouldn't work. - collapsed = '' - while True: - # Find the first quote character - match = Match(r'^([^\'"]*)([\'"])(.*)$', elided) - if not match: - collapsed += elided - break - head, quote, tail = match.groups() - - if quote == '"': - # Collapse double quoted strings - second_quote = tail.find('"') - if second_quote >= 0: - collapsed += head + '""' - elided = tail[second_quote + 1:] - else: - # Unmatched double quote, don't bother processing the rest - # of the line since this is probably a multiline string. - collapsed += elided - break - else: - # Found single quote, check nearby text to eliminate digit separators. - # - # There is no special handling for floating point here, because - # the integer/fractional/exponent parts would all be parsed - # correctly as long as there are digits on both sides of the - # separator. So we are fine as long as we don't see something - # like "0.'3" (gcc 4.9.0 will not allow this literal). - if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): - match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail) - collapsed += head + match_literal.group(1).replace("'", '') - elided = match_literal.group(2) - else: - second_quote = tail.find('\'') - if second_quote >= 0: - collapsed += head + "''" - elided = tail[second_quote + 1:] - else: - # Unmatched single quote - collapsed += elided - break - - return collapsed - - -def FindEndOfExpressionInLine(line, startpos, stack): - """Find the position just after the end of current parenthesized expression. - - Args: - line: a CleansedLines line. - startpos: start searching at this position. - stack: nesting stack at startpos. - - Returns: - On finding matching end: (index just after matching end, None) - On finding an unclosed expression: (-1, None) - Otherwise: (-1, new stack at end of this line) - """ - for i in xrange(startpos, len(line)): - char = line[i] - if char in '([{': - # Found start of parenthesized expression, push to expression stack - stack.append(char) - elif char == '<': - # Found potential start of template argument list - if i > 0 and line[i - 1] == '<': - # Left shift operator - if stack and stack[-1] == '<': - stack.pop() - if not stack: - return (-1, None) - elif i > 0 and Search(r'\boperator\s*$', line[0:i]): - # operator<, don't add to stack - continue - else: - # Tentative start of template argument list - stack.append('<') - elif char in ')]}': - # Found end of parenthesized expression. - # - # If we are currently expecting a matching '>', the pending '<' - # must have been an operator. Remove them from expression stack. - while stack and stack[-1] == '<': - stack.pop() - if not stack: - return (-1, None) - if ((stack[-1] == '(' and char == ')') or - (stack[-1] == '[' and char == ']') or - (stack[-1] == '{' and char == '}')): - stack.pop() - if not stack: - return (i + 1, None) - else: - # Mismatched parentheses - return (-1, None) - elif char == '>': - # Found potential end of template argument list. - - # Ignore "->" and operator functions - if (i > 0 and - (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))): - continue - - # Pop the stack if there is a matching '<'. Otherwise, ignore - # this '>' since it must be an operator. - if stack: - if stack[-1] == '<': - stack.pop() - if not stack: - return (i + 1, None) - elif char == ';': - # Found something that look like end of statements. If we are currently - # expecting a '>', the matching '<' must have been an operator, since - # template argument list should not contain statements. - while stack and stack[-1] == '<': - stack.pop() - if not stack: - return (-1, None) - - # Did not find end of expression or unbalanced parentheses on this line - return (-1, stack) - - -def CloseExpression(clean_lines, linenum, pos): - """If input points to ( or { or [ or <, finds the position that closes it. - - If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the - linenum/pos that correspond to the closing of the expression. - - TODO(unknown): cpplint spends a fair bit of time matching parentheses. - Ideally we would want to index all opening and closing parentheses once - and have CloseExpression be just a simple lookup, but due to preprocessor - tricks, this is not so easy. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - pos: A position on the line. - - Returns: - A tuple (line, linenum, pos) pointer *past* the closing brace, or - (line, len(lines), -1) if we never find a close. Note we ignore - strings and comments when matching; and the line we return is the - 'cleansed' line at linenum. - """ - - line = clean_lines.elided[linenum] - if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]): - return (line, clean_lines.NumLines(), -1) - - # Check first line - (end_pos, stack) = FindEndOfExpressionInLine(line, pos, []) - if end_pos > -1: - return (line, linenum, end_pos) - - # Continue scanning forward - while stack and linenum < clean_lines.NumLines() - 1: - linenum += 1 - line = clean_lines.elided[linenum] - (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack) - if end_pos > -1: - return (line, linenum, end_pos) - - # Did not find end of expression before end of file, give up - return (line, clean_lines.NumLines(), -1) - - -def FindStartOfExpressionInLine(line, endpos, stack): - """Find position at the matching start of current expression. - - This is almost the reverse of FindEndOfExpressionInLine, but note - that the input position and returned position differs by 1. - - Args: - line: a CleansedLines line. - endpos: start searching at this position. - stack: nesting stack at endpos. - - Returns: - On finding matching start: (index at matching start, None) - On finding an unclosed expression: (-1, None) - Otherwise: (-1, new stack at beginning of this line) - """ - i = endpos - while i >= 0: - char = line[i] - if char in ')]}': - # Found end of expression, push to expression stack - stack.append(char) - elif char == '>': - # Found potential end of template argument list. - # - # Ignore it if it's a "->" or ">=" or "operator>" - if (i > 0 and - (line[i - 1] == '-' or - Match(r'\s>=\s', line[i - 1:]) or - Search(r'\boperator\s*$', line[0:i]))): - i -= 1 - else: - stack.append('>') - elif char == '<': - # Found potential start of template argument list - if i > 0 and line[i - 1] == '<': - # Left shift operator - i -= 1 - else: - # If there is a matching '>', we can pop the expression stack. - # Otherwise, ignore this '<' since it must be an operator. - if stack and stack[-1] == '>': - stack.pop() - if not stack: - return (i, None) - elif char in '([{': - # Found start of expression. - # - # If there are any unmatched '>' on the stack, they must be - # operators. Remove those. - while stack and stack[-1] == '>': - stack.pop() - if not stack: - return (-1, None) - if ((char == '(' and stack[-1] == ')') or - (char == '[' and stack[-1] == ']') or - (char == '{' and stack[-1] == '}')): - stack.pop() - if not stack: - return (i, None) - else: - # Mismatched parentheses - return (-1, None) - elif char == ';': - # Found something that look like end of statements. If we are currently - # expecting a '<', the matching '>' must have been an operator, since - # template argument list should not contain statements. - while stack and stack[-1] == '>': - stack.pop() - if not stack: - return (-1, None) - - i -= 1 - - return (-1, stack) - - -def ReverseCloseExpression(clean_lines, linenum, pos): - """If input points to ) or } or ] or >, finds the position that opens it. - - If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the - linenum/pos that correspond to the opening of the expression. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - pos: A position on the line. - - Returns: - A tuple (line, linenum, pos) pointer *at* the opening brace, or - (line, 0, -1) if we never find the matching opening brace. Note - we ignore strings and comments when matching; and the line we - return is the 'cleansed' line at linenum. - """ - line = clean_lines.elided[linenum] - if line[pos] not in ')}]>': - return (line, 0, -1) - - # Check last line - (start_pos, stack) = FindStartOfExpressionInLine(line, pos, []) - if start_pos > -1: - return (line, linenum, start_pos) - - # Continue scanning backward - while stack and linenum > 0: - linenum -= 1 - line = clean_lines.elided[linenum] - (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack) - if start_pos > -1: - return (line, linenum, start_pos) - - # Did not find start of expression before beginning of file, give up - return (line, 0, -1) - - -def CheckForCopyright(filename, lines, error): - """Logs an error if no Copyright message appears at the top of the file.""" - - # We'll say it should occur by line 10. Don't forget there's a - # dummy line at the front. - for line in xrange(1, min(len(lines), 11)): - if re.search(r'Copyright', lines[line], re.I): break - else: # means no copyright line was found - error(filename, 0, 'legal/copyright', 5, - 'No copyright message found. ' - 'You should have a line: "Copyright [year] "') - - -def GetIndentLevel(line): - """Return the number of leading spaces in line. - - Args: - line: A string to check. - - Returns: - An integer count of leading spaces, possibly zero. - """ - indent = Match(r'^( *)\S', line) - if indent: - return len(indent.group(1)) - else: - return 0 - - -def GetHeaderGuardCPPVariable(filename): - """Returns the CPP variable that should be used as a header guard. - - Args: - filename: The name of a C++ header file. - - Returns: - The CPP variable that should be used as a header guard in the - named file. - - """ - - # Restores original filename in case that cpplint is invoked from Emacs's - # flymake. - filename = re.sub(r'_flymake\.h$', '.h', filename) - filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) - # Replace 'c++' with 'cpp'. - filename = filename.replace('C++', 'cpp').replace('c++', 'cpp') - - fileinfo = FileInfo(filename) - file_path_from_root = fileinfo.RepositoryName() - if _root: - file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) - return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_' - - -def CheckForHeaderGuard(filename, clean_lines, error): - """Checks that the file contains a header guard. - - Logs an error if no #ifndef header guard is present. For other - headers, checks that the full pathname is used. - - Args: - filename: The name of the C++ header file. - clean_lines: A CleansedLines instance containing the file. - error: The function to call with any errors found. - """ - - # Don't check for header guards if there are error suppression - # comments somewhere in this file. - # - # Because this is silencing a warning for a nonexistent line, we - # only support the very specific NOLINT(build/header_guard) syntax, - # and not the general NOLINT or NOLINT(*) syntax. - raw_lines = clean_lines.lines_without_raw_strings - for i in raw_lines: - if Search(r'//\s*NOLINT\(build/header_guard\)', i): - return - - cppvar = GetHeaderGuardCPPVariable(filename) - - ifndef = '' - ifndef_linenum = 0 - define = '' - endif = '' - endif_linenum = 0 - for linenum, line in enumerate(raw_lines): - linesplit = line.split() - if len(linesplit) >= 2: - # find the first occurrence of #ifndef and #define, save arg - if not ifndef and linesplit[0] == '#ifndef': - # set ifndef to the header guard presented on the #ifndef line. - ifndef = linesplit[1] - ifndef_linenum = linenum - if not define and linesplit[0] == '#define': - define = linesplit[1] - # find the last occurrence of #endif, save entire line - if line.startswith('#endif'): - endif = line - endif_linenum = linenum - - if not ifndef or not define or ifndef != define: - error(filename, 0, 'build/header_guard', 5, - 'No #ifndef header guard found, suggested CPP variable is: %s' % - cppvar) - return - - # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ - # for backward compatibility. - if ifndef != cppvar: - error_level = 0 - if ifndef != cppvar + '_': - error_level = 5 - - ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum, - error) - error(filename, ifndef_linenum, 'build/header_guard', error_level, - '#ifndef header guard has wrong style, please use: %s' % cppvar) - - # Check for "//" comments on endif line. - ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum, - error) - match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif) - if match: - if match.group(1) == '_': - # Issue low severity warning for deprecated double trailing underscore - error(filename, endif_linenum, 'build/header_guard', 0, - '#endif line should be "#endif // %s"' % cppvar) - return - - # Didn't find the corresponding "//" comment. If this file does not - # contain any "//" comments at all, it could be that the compiler - # only wants "/**/" comments, look for those instead. - no_single_line_comments = True - for i in xrange(1, len(raw_lines) - 1): - line = raw_lines[i] - if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line): - no_single_line_comments = False - break - - if no_single_line_comments: - match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif) - if match: - if match.group(1) == '_': - # Low severity warning for double trailing underscore - error(filename, endif_linenum, 'build/header_guard', 0, - '#endif line should be "#endif /* %s */"' % cppvar) - return - - # Didn't find anything - error(filename, endif_linenum, 'build/header_guard', 5, - '#endif line should be "#endif // %s"' % cppvar) - - -def CheckHeaderFileIncluded(filename, include_state, error): - """Logs an error if a .cc file does not include its header.""" - - # Do not check test files - if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'): - return - - fileinfo = FileInfo(filename) - headerfile = filename[0:len(filename) - 2] + 'h' - if not os.path.exists(headerfile): - return - headername = FileInfo(headerfile).RepositoryName() - first_include = 0 - for section_list in include_state.include_list: - for f in section_list: - if headername in f[0] or f[0] in headername: - return - if not first_include: - first_include = f[1] - - error(filename, first_include, 'build/include', 5, - '%s should include its header file %s' % (fileinfo.RepositoryName(), - headername)) - - -def CheckForBadCharacters(filename, lines, error): - """Logs an error for each line containing bad characters. - - Two kinds of bad characters: - - 1. Unicode replacement characters: These indicate that either the file - contained invalid UTF-8 (likely) or Unicode replacement characters (which - it shouldn't). Note that it's possible for this to throw off line - numbering if the invalid UTF-8 occurred adjacent to a newline. - - 2. NUL bytes. These are problematic for some tools. - - Args: - filename: The name of the current file. - lines: An array of strings, each representing a line of the file. - error: The function to call with any errors found. - """ - for linenum, line in enumerate(lines): - if u'\ufffd' in line: - error(filename, linenum, 'readability/utf8', 5, - 'Line contains invalid UTF-8 (or Unicode replacement character).') - if '\0' in line: - error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') - - -def CheckForNewlineAtEOF(filename, lines, error): - """Logs an error if there is no newline char at the end of the file. - - Args: - filename: The name of the current file. - lines: An array of strings, each representing a line of the file. - error: The function to call with any errors found. - """ - - # The array lines() was created by adding two newlines to the - # original file (go figure), then splitting on \n. - # To verify that the file ends in \n, we just have to make sure the - # last-but-two element of lines() exists and is empty. - if len(lines) < 3 or lines[-2]: - error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, - 'Could not find a newline character at the end of the file.') - - -def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): - """Logs an error if we see /* ... */ or "..." that extend past one line. - - /* ... */ comments are legit inside macros, for one line. - Otherwise, we prefer // comments, so it's ok to warn about the - other. Likewise, it's ok for strings to extend across multiple - lines, as long as a line continuation character (backslash) - terminates each line. Although not currently prohibited by the C++ - style guide, it's ugly and unnecessary. We don't do well with either - in this lint program, so we warn about both. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Remove all \\ (escaped backslashes) from the line. They are OK, and the - # second (escaped) slash may trigger later \" detection erroneously. - line = line.replace('\\\\', '') - - if line.count('/*') > line.count('*/'): - error(filename, linenum, 'readability/multiline_comment', 5, - 'Complex multi-line /*...*/-style comment found. ' - 'Lint may give bogus warnings. ' - 'Consider replacing these with //-style comments, ' - 'with #if 0...#endif, ' - 'or with more clearly structured multi-line comments.') - - if (line.count('"') - line.count('\\"')) % 2: - error(filename, linenum, 'readability/multiline_string', 5, - 'Multi-line string ("...") found. This lint script doesn\'t ' - 'do well with such strings, and may give bogus warnings. ' - 'Use C++11 raw strings or concatenation instead.') - - -# (non-threadsafe name, thread-safe alternative, validation pattern) -# -# The validation pattern is used to eliminate false positives such as: -# _rand(); // false positive due to substring match. -# ->rand(); // some member function rand(). -# ACMRandom rand(seed); // some variable named rand. -# ISAACRandom rand(); // another variable named rand. -# -# Basically we require the return value of these functions to be used -# in some expression context on the same line by matching on some -# operator before the function name. This eliminates constructors and -# member function calls. -_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)' -_THREADING_LIST = ( - ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'), - ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'), - ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'), - ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'), - ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'), - ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'), - ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'), - ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'), - ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'), - ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'), - ('strtok(', 'strtok_r(', - _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'), - ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'), - ) - - -def CheckPosixThreading(filename, clean_lines, linenum, error): - """Checks for calls to thread-unsafe functions. - - Much code has been originally written without consideration of - multi-threading. Also, engineers are relying on their old experience; - they have learned posix before threading extensions were added. These - tests guide the engineers to use thread-safe functions (when using - posix directly). - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST: - # Additional pattern matching check to confirm that this is the - # function we are looking for - if Search(pattern, line): - error(filename, linenum, 'runtime/threadsafe_fn', 2, - 'Consider using ' + multithread_safe_func + - '...) instead of ' + single_thread_func + - '...) for improved thread safety.') - - -def CheckVlogArguments(filename, clean_lines, linenum, error): - """Checks that VLOG() is only used for defining a logging level. - - For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and - VLOG(FATAL) are not. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): - error(filename, linenum, 'runtime/vlog', 5, - 'VLOG() should be used with numeric verbosity level. ' - 'Use LOG() if you want symbolic severity levels.') - -# Matches invalid increment: *count++, which moves pointer instead of -# incrementing a value. -_RE_PATTERN_INVALID_INCREMENT = re.compile( - r'^\s*\*\w+(\+\+|--);') - - -def CheckInvalidIncrement(filename, clean_lines, linenum, error): - """Checks for invalid increment *count++. - - For example following function: - void increment_counter(int* count) { - *count++; - } - is invalid, because it effectively does count++, moving pointer, and should - be replaced with ++*count, (*count)++ or *count += 1. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - if _RE_PATTERN_INVALID_INCREMENT.match(line): - error(filename, linenum, 'runtime/invalid_increment', 5, - 'Changing pointer instead of value (or unused value of operator*).') - - -def IsMacroDefinition(clean_lines, linenum): - if Search(r'^#define', clean_lines[linenum]): - return True - - if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]): - return True - - return False - - -def IsForwardClassDeclaration(clean_lines, linenum): - return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum]) - - -class _BlockInfo(object): - """Stores information about a generic block of code.""" - - def __init__(self, seen_open_brace): - self.seen_open_brace = seen_open_brace - self.open_parentheses = 0 - self.inline_asm = _NO_ASM - self.check_namespace_indentation = False - - def CheckBegin(self, filename, clean_lines, linenum, error): - """Run checks that applies to text up to the opening brace. - - This is mostly for checking the text after the class identifier - and the "{", usually where the base class is specified. For other - blocks, there isn't much to check, so we always pass. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - pass - - def CheckEnd(self, filename, clean_lines, linenum, error): - """Run checks that applies to text after the closing brace. - - This is mostly used for checking end of namespace comments. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - pass - - def IsBlockInfo(self): - """Returns true if this block is a _BlockInfo. - - This is convenient for verifying that an object is an instance of - a _BlockInfo, but not an instance of any of the derived classes. - - Returns: - True for this class, False for derived classes. - """ - return self.__class__ == _BlockInfo - - -class _ExternCInfo(_BlockInfo): - """Stores information about an 'extern "C"' block.""" - - def __init__(self): - _BlockInfo.__init__(self, True) - - -class _ClassInfo(_BlockInfo): - """Stores information about a class.""" - - def __init__(self, name, class_or_struct, clean_lines, linenum): - _BlockInfo.__init__(self, False) - self.name = name - self.starting_linenum = linenum - self.is_derived = False - self.check_namespace_indentation = True - if class_or_struct == 'struct': - self.access = 'public' - self.is_struct = True - else: - self.access = 'private' - self.is_struct = False - - # Remember initial indentation level for this class. Using raw_lines here - # instead of elided to account for leading comments. - self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum]) - - # Try to find the end of the class. This will be confused by things like: - # class A { - # } *x = { ... - # - # But it's still good enough for CheckSectionSpacing. - self.last_line = 0 - depth = 0 - for i in range(linenum, clean_lines.NumLines()): - line = clean_lines.elided[i] - depth += line.count('{') - line.count('}') - if not depth: - self.last_line = i - break - - def CheckBegin(self, filename, clean_lines, linenum, error): - # Look for a bare ':' - if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): - self.is_derived = True - - def CheckEnd(self, filename, clean_lines, linenum, error): - # If there is a DISALLOW macro, it should appear near the end of - # the class. - seen_last_thing_in_class = False - for i in xrange(linenum - 1, self.starting_linenum, -1): - match = Search( - r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' + - self.name + r'\)', - clean_lines.elided[i]) - if match: - if seen_last_thing_in_class: - error(filename, i, 'readability/constructors', 3, - match.group(1) + ' should be the last thing in the class') - break - - if not Match(r'^\s*$', clean_lines.elided[i]): - seen_last_thing_in_class = True - - # Check that closing brace is aligned with beginning of the class. - # Only do this if the closing brace is indented by only whitespaces. - # This means we will not check single-line class definitions. - indent = Match(r'^( *)\}', clean_lines.elided[linenum]) - if indent and len(indent.group(1)) != self.class_indent: - if self.is_struct: - parent = 'struct ' + self.name - else: - parent = 'class ' + self.name - error(filename, linenum, 'whitespace/indent', 3, - 'Closing brace should be aligned with beginning of %s' % parent) - - -class _NamespaceInfo(_BlockInfo): - """Stores information about a namespace.""" - - def __init__(self, name, linenum): - _BlockInfo.__init__(self, False) - self.name = name or '' - self.starting_linenum = linenum - self.check_namespace_indentation = True - - def CheckEnd(self, filename, clean_lines, linenum, error): - """Check end of namespace comments.""" - line = clean_lines.raw_lines[linenum] - - # Check how many lines is enclosed in this namespace. Don't issue - # warning for missing namespace comments if there aren't enough - # lines. However, do apply checks if there is already an end of - # namespace comment and it's incorrect. - # - # TODO(unknown): We always want to check end of namespace comments - # if a namespace is large, but sometimes we also want to apply the - # check if a short namespace contained nontrivial things (something - # other than forward declarations). There is currently no logic on - # deciding what these nontrivial things are, so this check is - # triggered by namespace size only, which works most of the time. - if (linenum - self.starting_linenum < 10 - and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): - return - - # Look for matching comment at end of namespace. - # - # Note that we accept C style "/* */" comments for terminating - # namespaces, so that code that terminate namespaces inside - # preprocessor macros can be cpplint clean. - # - # We also accept stuff like "// end of namespace ." with the - # period at the end. - # - # Besides these, we don't accept anything else, otherwise we might - # get false negatives when existing comment is a substring of the - # expected namespace. - if self.name: - # Named namespace - if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + - r'[\*/\.\\\s]*$'), - line): - error(filename, linenum, 'readability/namespace', 5, - 'Namespace should be terminated with "// namespace %s"' % - self.name) - else: - # Anonymous namespace - if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): - # If "// namespace anonymous" or "// anonymous namespace (more text)", - # mention "// anonymous namespace" as an acceptable form - if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line): - error(filename, linenum, 'readability/namespace', 5, - 'Anonymous namespace should be terminated with "// namespace"' - ' or "// anonymous namespace"') - else: - error(filename, linenum, 'readability/namespace', 5, - 'Anonymous namespace should be terminated with "// namespace"') - - -class _PreprocessorInfo(object): - """Stores checkpoints of nesting stacks when #if/#else is seen.""" - - def __init__(self, stack_before_if): - # The entire nesting stack before #if - self.stack_before_if = stack_before_if - - # The entire nesting stack up to #else - self.stack_before_else = [] - - # Whether we have already seen #else or #elif - self.seen_else = False - - -class NestingState(object): - """Holds states related to parsing braces.""" - - def __init__(self): - # Stack for tracking all braces. An object is pushed whenever we - # see a "{", and popped when we see a "}". Only 3 types of - # objects are possible: - # - _ClassInfo: a class or struct. - # - _NamespaceInfo: a namespace. - # - _BlockInfo: some other type of block. - self.stack = [] - - # Top of the previous stack before each Update(). - # - # Because the nesting_stack is updated at the end of each line, we - # had to do some convoluted checks to find out what is the current - # scope at the beginning of the line. This check is simplified by - # saving the previous top of nesting stack. - # - # We could save the full stack, but we only need the top. Copying - # the full nesting stack would slow down cpplint by ~10%. - self.previous_stack_top = [] - - # Stack of _PreprocessorInfo objects. - self.pp_stack = [] - - def SeenOpenBrace(self): - """Check if we have seen the opening brace for the innermost block. - - Returns: - True if we have seen the opening brace, False if the innermost - block is still expecting an opening brace. - """ - return (not self.stack) or self.stack[-1].seen_open_brace - - def InNamespaceBody(self): - """Check if we are currently one level inside a namespace body. - - Returns: - True if top of the stack is a namespace block, False otherwise. - """ - return self.stack and isinstance(self.stack[-1], _NamespaceInfo) - - def InExternC(self): - """Check if we are currently one level inside an 'extern "C"' block. - - Returns: - True if top of the stack is an extern block, False otherwise. - """ - return self.stack and isinstance(self.stack[-1], _ExternCInfo) - - def InClassDeclaration(self): - """Check if we are currently one level inside a class or struct declaration. - - Returns: - True if top of the stack is a class/struct, False otherwise. - """ - return self.stack and isinstance(self.stack[-1], _ClassInfo) - - def InAsmBlock(self): - """Check if we are currently one level inside an inline ASM block. - - Returns: - True if the top of the stack is a block containing inline ASM. - """ - return self.stack and self.stack[-1].inline_asm != _NO_ASM - - def InTemplateArgumentList(self, clean_lines, linenum, pos): - """Check if current position is inside template argument list. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - pos: position just after the suspected template argument. - Returns: - True if (linenum, pos) is inside template arguments. - """ - while linenum < clean_lines.NumLines(): - # Find the earliest character that might indicate a template argument - line = clean_lines.elided[linenum] - match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:]) - if not match: - linenum += 1 - pos = 0 - continue - token = match.group(1) - pos += len(match.group(0)) - - # These things do not look like template argument list: - # class Suspect { - # class Suspect x; } - if token in ('{', '}', ';'): return False - - # These things look like template argument list: - # template - # template - # template - # template - if token in ('>', '=', '[', ']', '.'): return True - - # Check if token is an unmatched '<'. - # If not, move on to the next character. - if token != '<': - pos += 1 - if pos >= len(line): - linenum += 1 - pos = 0 - continue - - # We can't be sure if we just find a single '<', and need to - # find the matching '>'. - (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1) - if end_pos < 0: - # Not sure if template argument list or syntax error in file - return False - linenum = end_line - pos = end_pos - return False - - def UpdatePreprocessor(self, line): - """Update preprocessor stack. - - We need to handle preprocessors due to classes like this: - #ifdef SWIG - struct ResultDetailsPageElementExtensionPoint { - #else - struct ResultDetailsPageElementExtensionPoint : public Extension { - #endif - - We make the following assumptions (good enough for most files): - - Preprocessor condition evaluates to true from #if up to first - #else/#elif/#endif. - - - Preprocessor condition evaluates to false from #else/#elif up - to #endif. We still perform lint checks on these lines, but - these do not affect nesting stack. - - Args: - line: current line to check. - """ - if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): - # Beginning of #if block, save the nesting stack here. The saved - # stack will allow us to restore the parsing state in the #else case. - self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) - elif Match(r'^\s*#\s*(else|elif)\b', line): - # Beginning of #else block - if self.pp_stack: - if not self.pp_stack[-1].seen_else: - # This is the first #else or #elif block. Remember the - # whole nesting stack up to this point. This is what we - # keep after the #endif. - self.pp_stack[-1].seen_else = True - self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) - - # Restore the stack to how it was before the #if - self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) - else: - # TODO(unknown): unexpected #else, issue warning? - pass - elif Match(r'^\s*#\s*endif\b', line): - # End of #if or #else blocks. - if self.pp_stack: - # If we saw an #else, we will need to restore the nesting - # stack to its former state before the #else, otherwise we - # will just continue from where we left off. - if self.pp_stack[-1].seen_else: - # Here we can just use a shallow copy since we are the last - # reference to it. - self.stack = self.pp_stack[-1].stack_before_else - # Drop the corresponding #if - self.pp_stack.pop() - else: - # TODO(unknown): unexpected #endif, issue warning? - pass - - # TODO(unknown): Update() is too long, but we will refactor later. - def Update(self, filename, clean_lines, linenum, error): - """Update nesting state with current line. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Remember top of the previous nesting stack. - # - # The stack is always pushed/popped and not modified in place, so - # we can just do a shallow copy instead of copy.deepcopy. Using - # deepcopy would slow down cpplint by ~28%. - if self.stack: - self.previous_stack_top = self.stack[-1] - else: - self.previous_stack_top = None - - # Update pp_stack - self.UpdatePreprocessor(line) - - # Count parentheses. This is to avoid adding struct arguments to - # the nesting stack. - if self.stack: - inner_block = self.stack[-1] - depth_change = line.count('(') - line.count(')') - inner_block.open_parentheses += depth_change - - # Also check if we are starting or ending an inline assembly block. - if inner_block.inline_asm in (_NO_ASM, _END_ASM): - if (depth_change != 0 and - inner_block.open_parentheses == 1 and - _MATCH_ASM.match(line)): - # Enter assembly block - inner_block.inline_asm = _INSIDE_ASM - else: - # Not entering assembly block. If previous line was _END_ASM, - # we will now shift to _NO_ASM state. - inner_block.inline_asm = _NO_ASM - elif (inner_block.inline_asm == _INSIDE_ASM and - inner_block.open_parentheses == 0): - # Exit assembly block - inner_block.inline_asm = _END_ASM - - # Consume namespace declaration at the beginning of the line. Do - # this in a loop so that we catch same line declarations like this: - # namespace proto2 { namespace bridge { class MessageSet; } } - while True: - # Match start of namespace. The "\b\s*" below catches namespace - # declarations even if it weren't followed by a whitespace, this - # is so that we don't confuse our namespace checker. The - # missing spaces will be flagged by CheckSpacing. - namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) - if not namespace_decl_match: - break - - new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) - self.stack.append(new_namespace) - - line = namespace_decl_match.group(2) - if line.find('{') != -1: - new_namespace.seen_open_brace = True - line = line[line.find('{') + 1:] - - # Look for a class declaration in whatever is left of the line - # after parsing namespaces. The regexp accounts for decorated classes - # such as in: - # class LOCKABLE API Object { - # }; - class_decl_match = Match( - r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?' - r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))' - r'(.*)$', line) - if (class_decl_match and - (not self.stack or self.stack[-1].open_parentheses == 0)): - # We do not want to accept classes that are actually template arguments: - # template , - # template class Ignore3> - # void Function() {}; - # - # To avoid template argument cases, we scan forward and look for - # an unmatched '>'. If we see one, assume we are inside a - # template argument list. - end_declaration = len(class_decl_match.group(1)) - if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration): - self.stack.append(_ClassInfo( - class_decl_match.group(3), class_decl_match.group(2), - clean_lines, linenum)) - line = class_decl_match.group(4) - - # If we have not yet seen the opening brace for the innermost block, - # run checks here. - if not self.SeenOpenBrace(): - self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) - - # Update access control if we are inside a class/struct - if self.stack and isinstance(self.stack[-1], _ClassInfo): - classinfo = self.stack[-1] - access_match = Match( - r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' - r':(?:[^:]|$)', - line) - if access_match: - classinfo.access = access_match.group(2) - - # Check that access keywords are indented +1 space. Skip this - # check if the keywords are not preceded by whitespaces. - indent = access_match.group(1) - if (len(indent) != classinfo.class_indent + 1 and - Match(r'^\s*$', indent)): - if classinfo.is_struct: - parent = 'struct ' + classinfo.name - else: - parent = 'class ' + classinfo.name - slots = '' - if access_match.group(3): - slots = access_match.group(3) - error(filename, linenum, 'whitespace/indent', 3, - '%s%s: should be indented +1 space inside %s' % ( - access_match.group(2), slots, parent)) - - # Consume braces or semicolons from what's left of the line - while True: - # Match first brace, semicolon, or closed parenthesis. - matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) - if not matched: - break - - token = matched.group(1) - if token == '{': - # If namespace or class hasn't seen a opening brace yet, mark - # namespace/class head as complete. Push a new block onto the - # stack otherwise. - if not self.SeenOpenBrace(): - self.stack[-1].seen_open_brace = True - elif Match(r'^extern\s*"[^"]*"\s*\{', line): - self.stack.append(_ExternCInfo()) - else: - self.stack.append(_BlockInfo(True)) - if _MATCH_ASM.match(line): - self.stack[-1].inline_asm = _BLOCK_ASM - - elif token == ';' or token == ')': - # If we haven't seen an opening brace yet, but we already saw - # a semicolon, this is probably a forward declaration. Pop - # the stack for these. - # - # Similarly, if we haven't seen an opening brace yet, but we - # already saw a closing parenthesis, then these are probably - # function arguments with extra "class" or "struct" keywords. - # Also pop these stack for these. - if not self.SeenOpenBrace(): - self.stack.pop() - else: # token == '}' - # Perform end of block checks and pop the stack. - if self.stack: - self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) - self.stack.pop() - line = matched.group(2) - - def InnermostClass(self): - """Get class info on the top of the stack. - - Returns: - A _ClassInfo object if we are inside a class, or None otherwise. - """ - for i in range(len(self.stack), 0, -1): - classinfo = self.stack[i - 1] - if isinstance(classinfo, _ClassInfo): - return classinfo - return None - - def CheckCompletedBlocks(self, filename, error): - """Checks that all classes and namespaces have been completely parsed. - - Call this when all lines in a file have been processed. - Args: - filename: The name of the current file. - error: The function to call with any errors found. - """ - # Note: This test can result in false positives if #ifdef constructs - # get in the way of brace matching. See the testBuildClass test in - # cpplint_unittest.py for an example of this. - for obj in self.stack: - if isinstance(obj, _ClassInfo): - error(filename, obj.starting_linenum, 'build/class', 5, - 'Failed to find complete declaration of class %s' % - obj.name) - elif isinstance(obj, _NamespaceInfo): - error(filename, obj.starting_linenum, 'build/namespaces', 5, - 'Failed to find complete declaration of namespace %s' % - obj.name) - - -def CheckForNonStandardConstructs(filename, clean_lines, linenum, - nesting_state, error): - r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. - - Complain about several constructs which gcc-2 accepts, but which are - not standard C++. Warning about these in lint is one way to ease the - transition to new compilers. - - put storage class first (e.g. "static const" instead of "const static"). - - "%lld" instead of %qd" in printf-type functions. - - "%1$d" is non-standard in printf-type functions. - - "\%" is an undefined character escape sequence. - - text after #endif is not allowed. - - invalid inner-style forward declaration. - - >? and ?= and )\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', - line): - error(filename, linenum, 'build/deprecated', 3, - '>? and ))?' - # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' - error(filename, linenum, 'runtime/member_string_references', 2, - 'const string& members are dangerous. It is much better to use ' - 'alternatives, such as pointers or simple constants.') - - # Everything else in this function operates on class declarations. - # Return early if the top of the nesting stack is not a class, or if - # the class head is not completed yet. - classinfo = nesting_state.InnermostClass() - if not classinfo or not classinfo.seen_open_brace: - return - - # The class may have been declared with namespace or classname qualifiers. - # The constructor and destructor will not have those qualifiers. - base_classname = classinfo.name.split('::')[-1] - - # Look for single-argument constructors that aren't marked explicit. - # Technically a valid construct, but against style. Also look for - # non-single-argument constructors which are also technically valid, but - # strongly suggest something is wrong. - explicit_constructor_match = Match( - r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*' - r'\(((?:[^()]|\([^()]*\))*)\)' - % re.escape(base_classname), - line) - - if explicit_constructor_match: - is_marked_explicit = explicit_constructor_match.group(1) - - if not explicit_constructor_match.group(2): - constructor_args = [] - else: - constructor_args = explicit_constructor_match.group(2).split(',') - - # collapse arguments so that commas in template parameter lists and function - # argument parameter lists don't split arguments in two - i = 0 - while i < len(constructor_args): - constructor_arg = constructor_args[i] - while (constructor_arg.count('<') > constructor_arg.count('>') or - constructor_arg.count('(') > constructor_arg.count(')')): - constructor_arg += ',' + constructor_args[i + 1] - del constructor_args[i + 1] - constructor_args[i] = constructor_arg - i += 1 - - defaulted_args = [arg for arg in constructor_args if '=' in arg] - noarg_constructor = (not constructor_args or # empty arg list - # 'void' arg specifier - (len(constructor_args) == 1 and - constructor_args[0].strip() == 'void')) - onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg - not noarg_constructor) or - # all but at most one arg defaulted - (len(constructor_args) >= 1 and - not noarg_constructor and - len(defaulted_args) >= len(constructor_args) - 1)) - initializer_list_constructor = bool( - onearg_constructor and - Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0])) - copy_constructor = bool( - onearg_constructor and - Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&' - % re.escape(base_classname), constructor_args[0].strip())) - - if (not is_marked_explicit and - onearg_constructor and - not initializer_list_constructor and - not copy_constructor): - if defaulted_args: - error(filename, linenum, 'runtime/explicit', 5, - 'Constructors callable with one argument ' - 'should be marked explicit.') - else: - error(filename, linenum, 'runtime/explicit', 5, - 'Single-parameter constructors should be marked explicit.') - elif is_marked_explicit and not onearg_constructor: - if noarg_constructor: - error(filename, linenum, 'runtime/explicit', 5, - 'Zero-parameter constructors should not be marked explicit.') - else: - error(filename, linenum, 'runtime/explicit', 0, - 'Constructors that require multiple arguments ' - 'should not be marked explicit.') - - -def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): - """Checks for the correctness of various spacing around function calls. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Since function calls often occur inside if/for/while/switch - # expressions - which have their own, more liberal conventions - we - # first see if we should be looking inside such an expression for a - # function call, to which we can apply more strict standards. - fncall = line # if there's no control flow construct, look at whole line - for pattern in (r'\bif\s*\((.*)\)\s*{', - r'\bfor\s*\((.*)\)\s*{', - r'\bwhile\s*\((.*)\)\s*[{;]', - r'\bswitch\s*\((.*)\)\s*{'): - match = Search(pattern, line) - if match: - fncall = match.group(1) # look inside the parens for function calls - break - - # Except in if/for/while/switch, there should never be space - # immediately inside parens (eg "f( 3, 4 )"). We make an exception - # for nested parens ( (a+b) + c ). Likewise, there should never be - # a space before a ( when it's a function argument. I assume it's a - # function argument when the char before the whitespace is legal in - # a function name (alnum + _) and we're not starting a macro. Also ignore - # pointers and references to arrays and functions coz they're too tricky: - # we use a very simple way to recognize these: - # " (something)(maybe-something)" or - # " (something)(maybe-something," or - # " (something)[something]" - # Note that we assume the contents of [] to be short enough that - # they'll never need to wrap. - if ( # Ignore control structures. - not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', - fncall) and - # Ignore pointers/references to functions. - not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and - # Ignore pointers/references to arrays. - not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): - if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call - error(filename, linenum, 'whitespace/parens', 4, - 'Extra space after ( in function call') - elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): - error(filename, linenum, 'whitespace/parens', 2, - 'Extra space after (') - if (Search(r'\w\s+\(', fncall) and - not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and - not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and - not Search(r'\bcase\s+\(', fncall)): - # TODO(unknown): Space after an operator function seem to be a common - # error, silence those for now by restricting them to highest verbosity. - if Search(r'\boperator_*\b', line): - error(filename, linenum, 'whitespace/parens', 0, - 'Extra space before ( in function call') - else: - error(filename, linenum, 'whitespace/parens', 4, - 'Extra space before ( in function call') - # If the ) is followed only by a newline or a { + newline, assume it's - # part of a control statement (if/while/etc), and don't complain - if Search(r'[^)]\s+\)\s*[^{\s]', fncall): - # If the closing parenthesis is preceded by only whitespaces, - # try to give a more descriptive error message. - if Search(r'^\s+\)', fncall): - error(filename, linenum, 'whitespace/parens', 2, - 'Closing ) should be moved to the previous line') - else: - error(filename, linenum, 'whitespace/parens', 2, - 'Extra space before )') - - -def IsBlankLine(line): - """Returns true if the given line is blank. - - We consider a line to be blank if the line is empty or consists of - only white spaces. - - Args: - line: A line of a string. - - Returns: - True, if the given line is blank. - """ - return not line or line.isspace() - - -def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, - error): - is_namespace_indent_item = ( - len(nesting_state.stack) > 1 and - nesting_state.stack[-1].check_namespace_indentation and - isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and - nesting_state.previous_stack_top == nesting_state.stack[-2]) - - if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, - clean_lines.elided, line): - CheckItemIndentationInNamespace(filename, clean_lines.elided, - line, error) - - -def CheckForFunctionLengths(filename, clean_lines, linenum, - function_state, error): - """Reports for long function bodies. - - For an overview why this is done, see: - http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions - - Uses a simplistic algorithm assuming other style guidelines - (especially spacing) are followed. - Only checks unindented functions, so class members are unchecked. - Trivial bodies are unchecked, so constructors with huge initializer lists - may be missed. - Blank/comment lines are not counted so as to avoid encouraging the removal - of vertical space and comments just to get through a lint check. - NOLINT *on the last line of a function* disables this check. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - function_state: Current function name and lines in body so far. - error: The function to call with any errors found. - """ - lines = clean_lines.lines - line = lines[linenum] - joined_line = '' - - starting_func = False - regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... - match_result = Match(regexp, line) - if match_result: - # If the name is all caps and underscores, figure it's a macro and - # ignore it, unless it's TEST or TEST_F. - function_name = match_result.group(1).split()[-1] - if function_name == 'TEST' or function_name == 'TEST_F' or ( - not Match(r'[A-Z_]+$', function_name)): - starting_func = True - - if starting_func: - body_found = False - for start_linenum in xrange(linenum, clean_lines.NumLines()): - start_line = lines[start_linenum] - joined_line += ' ' + start_line.lstrip() - if Search(r'(;|})', start_line): # Declarations and trivial functions - body_found = True - break # ... ignore - elif Search(r'{', start_line): - body_found = True - function = Search(r'((\w|:)*)\(', line).group(1) - if Match(r'TEST', function): # Handle TEST... macros - parameter_regexp = Search(r'(\(.*\))', joined_line) - if parameter_regexp: # Ignore bad syntax - function += parameter_regexp.group(1) - else: - function += '()' - function_state.Begin(function) - break - if not body_found: - # No body for the function (or evidence of a non-function) was found. - error(filename, linenum, 'readability/fn_size', 5, - 'Lint failed to find start of function body.') - elif Match(r'^\}\s*$', line): # function end - function_state.Check(error, filename, linenum) - function_state.End() - elif not Match(r'^\s*$', line): - function_state.Count() # Count non-blank/non-comment lines. - - -_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?') - - -def CheckComment(line, filename, linenum, next_line_start, error): - """Checks for common mistakes in comments. - - Args: - line: The line in question. - filename: The name of the current file. - linenum: The number of the line to check. - next_line_start: The first non-whitespace column of the next line. - error: The function to call with any errors found. - """ - commentpos = line.find('//') - if commentpos != -1: - # Check if the // may be in quotes. If so, ignore it - # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison - if (line.count('"', 0, commentpos) - - line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes - # Allow one space for new scopes, two spaces otherwise: - if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and - ((commentpos >= 1 and - line[commentpos-1] not in string.whitespace) or - (commentpos >= 2 and - line[commentpos-2] not in string.whitespace))): - error(filename, linenum, 'whitespace/comments', 2, - 'At least two spaces is best between code and comments') - - # Checks for common mistakes in TODO comments. - comment = line[commentpos:] - match = _RE_PATTERN_TODO.match(comment) - if match: - # One whitespace is correct; zero whitespace is handled elsewhere. - leading_whitespace = match.group(1) - if len(leading_whitespace) > 1: - error(filename, linenum, 'whitespace/todo', 2, - 'Too many spaces before TODO') - - username = match.group(2) - if not username: - error(filename, linenum, 'readability/todo', 2, - 'Missing username in TODO; it should look like ' - '"// TODO(my_username): Stuff."') - - middle_whitespace = match.group(3) - # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison - if middle_whitespace != ' ' and middle_whitespace != '': - error(filename, linenum, 'whitespace/todo', 2, - 'TODO(my_username) should be followed by a space') - - # If the comment contains an alphanumeric character, there - # should be a space somewhere between it and the // unless - # it's a /// or //! Doxygen comment. - if (Match(r'//[^ ]*\w', comment) and - not Match(r'(///|//\!)(\s+|$)', comment)): - error(filename, linenum, 'whitespace/comments', 4, - 'Should have a space between // and comment') - - -def CheckAccess(filename, clean_lines, linenum, nesting_state, error): - """Checks for improper use of DISALLOW* macros. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] # get rid of comments and strings - - matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' - r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) - if not matched: - return - if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): - if nesting_state.stack[-1].access != 'private': - error(filename, linenum, 'readability/constructors', 3, - '%s must be in the private: section' % matched.group(1)) - - else: - # Found DISALLOW* macro outside a class declaration, or perhaps it - # was used inside a function when it should have been part of the - # class declaration. We could issue a warning here, but it - # probably resulted in a compiler error already. - pass - - -def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): - """Checks for the correctness of various spacing issues in the code. - - Things we check for: spaces around operators, spaces after - if/for/while/switch, no spaces around parens in function calls, two - spaces between code and comment, don't start a block with a blank - line, don't end a function with a blank line, don't add a blank line - after public/protected/private, don't have too many blank lines in a row. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - - # Don't use "elided" lines here, otherwise we can't check commented lines. - # Don't want to use "raw" either, because we don't want to check inside C++11 - # raw strings, - raw = clean_lines.lines_without_raw_strings - line = raw[linenum] - - # Before nixing comments, check if the line is blank for no good - # reason. This includes the first line after a block is opened, and - # blank lines at the end of a function (ie, right before a line like '}' - # - # Skip all the blank line checks if we are immediately inside a - # namespace body. In other words, don't issue blank line warnings - # for this block: - # namespace { - # - # } - # - # A warning about missing end of namespace comments will be issued instead. - # - # Also skip blank line checks for 'extern "C"' blocks, which are formatted - # like namespaces. - if (IsBlankLine(line) and - not nesting_state.InNamespaceBody() and - not nesting_state.InExternC()): - elided = clean_lines.elided - prev_line = elided[linenum - 1] - prevbrace = prev_line.rfind('{') - # TODO(unknown): Don't complain if line before blank line, and line after, - # both start with alnums and are indented the same amount. - # This ignores whitespace at the start of a namespace block - # because those are not usually indented. - if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: - # OK, we have a blank line at the start of a code block. Before we - # complain, we check if it is an exception to the rule: The previous - # non-empty line has the parameters of a function header that are indented - # 4 spaces (because they did not fit in a 80 column line when placed on - # the same line as the function name). We also check for the case where - # the previous line is indented 6 spaces, which may happen when the - # initializers of a constructor do not fit into a 80 column line. - exception = False - if Match(r' {6}\w', prev_line): # Initializer list? - # We are looking for the opening column of initializer list, which - # should be indented 4 spaces to cause 6 space indentation afterwards. - search_position = linenum-2 - while (search_position >= 0 - and Match(r' {6}\w', elided[search_position])): - search_position -= 1 - exception = (search_position >= 0 - and elided[search_position][:5] == ' :') - else: - # Search for the function arguments or an initializer list. We use a - # simple heuristic here: If the line is indented 4 spaces; and we have a - # closing paren, without the opening paren, followed by an opening brace - # or colon (for initializer lists) we assume that it is the last line of - # a function header. If we have a colon indented 4 spaces, it is an - # initializer list. - exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', - prev_line) - or Match(r' {4}:', prev_line)) - - if not exception: - error(filename, linenum, 'whitespace/blank_line', 2, - 'Redundant blank line at the start of a code block ' - 'should be deleted.') - # Ignore blank lines at the end of a block in a long if-else - # chain, like this: - # if (condition1) { - # // Something followed by a blank line - # - # } else if (condition2) { - # // Something else - # } - if linenum + 1 < clean_lines.NumLines(): - next_line = raw[linenum + 1] - if (next_line - and Match(r'\s*}', next_line) - and next_line.find('} else ') == -1): - error(filename, linenum, 'whitespace/blank_line', 3, - 'Redundant blank line at the end of a code block ' - 'should be deleted.') - - matched = Match(r'\s*(public|protected|private):', prev_line) - if matched: - error(filename, linenum, 'whitespace/blank_line', 3, - 'Do not leave a blank line after "%s:"' % matched.group(1)) - - # Next, check comments - next_line_start = 0 - if linenum + 1 < clean_lines.NumLines(): - next_line = raw[linenum + 1] - next_line_start = len(next_line) - len(next_line.lstrip()) - CheckComment(line, filename, linenum, next_line_start, error) - - # get rid of comments and strings - line = clean_lines.elided[linenum] - - # You shouldn't have spaces before your brackets, except maybe after - # 'delete []' or 'return []() {};' - if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line): - error(filename, linenum, 'whitespace/braces', 5, - 'Extra space before [') - - # In range-based for, we wanted spaces before and after the colon, but - # not around "::" tokens that might appear. - if (Search(r'for *\(.*[^:]:[^: ]', line) or - Search(r'for *\(.*[^: ]:[^:]', line)): - error(filename, linenum, 'whitespace/forcolon', 2, - 'Missing space around colon in range-based for loop') - - -def CheckOperatorSpacing(filename, clean_lines, linenum, error): - """Checks for horizontal spacing around operators. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Don't try to do spacing checks for operator methods. Do this by - # replacing the troublesome characters with something else, - # preserving column position for all other characters. - # - # The replacement is done repeatedly to avoid false positives from - # operators that call operators. - while True: - match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) - if match: - line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) - else: - break - - # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". - # Otherwise not. Note we only check for non-spaces on *both* sides; - # sometimes people put non-spaces on one side when aligning ='s among - # many lines (not that this is behavior that I approve of...) - if ((Search(r'[\w.]=', line) or - Search(r'=[\w.]', line)) - and not Search(r'\b(if|while|for) ', line) - # Operators taken from [lex.operators] in C++11 standard. - and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line) - and not Search(r'operator=', line)): - error(filename, linenum, 'whitespace/operators', 4, - 'Missing spaces around =') - - # It's ok not to have spaces around binary operators like + - * /, but if - # there's too little whitespace, we get concerned. It's hard to tell, - # though, so we punt on this one for now. TODO. - - # You should always have whitespace around binary operators. - # - # Check <= and >= first to avoid false positives with < and >, then - # check non-include lines for spacing around < and >. - # - # If the operator is followed by a comma, assume it's be used in a - # macro context and don't do any checks. This avoids false - # positives. - # - # Note that && is not included here. Those are checked separately - # in CheckRValueReference - match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) - if match: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around %s' % match.group(1)) - elif not Match(r'#.*include', line): - # Look for < that is not surrounded by spaces. This is only - # triggered if both sides are missing spaces, even though - # technically should should flag if at least one side is missing a - # space. This is done to avoid some false positives with shifts. - match = Match(r'^(.*[^\s<])<[^\s=<,]', line) - if match: - (_, _, end_pos) = CloseExpression( - clean_lines, linenum, len(match.group(1))) - if end_pos <= -1: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around <') - - # Look for > that is not surrounded by spaces. Similar to the - # above, we only trigger if both sides are missing spaces to avoid - # false positives with shifts. - match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) - if match: - (_, _, start_pos) = ReverseCloseExpression( - clean_lines, linenum, len(match.group(1))) - if start_pos <= -1: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around >') - - # We allow no-spaces around << when used like this: 10<<20, but - # not otherwise (particularly, not when used as streams) - # - # We also allow operators following an opening parenthesis, since - # those tend to be macros that deal with operators. - match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line) - if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and - not (match.group(1) == 'operator' and match.group(2) == ';')): - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around <<') - - # We allow no-spaces around >> for almost anything. This is because - # C++11 allows ">>" to close nested templates, which accounts for - # most cases when ">>" is not followed by a space. - # - # We still warn on ">>" followed by alpha character, because that is - # likely due to ">>" being used for right shifts, e.g.: - # value >> alpha - # - # When ">>" is used to close templates, the alphanumeric letter that - # follows would be part of an identifier, and there should still be - # a space separating the template type and the identifier. - # type> alpha - match = Search(r'>>[a-zA-Z_]', line) - if match: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around >>') - - # There shouldn't be space around unary operators - match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) - if match: - error(filename, linenum, 'whitespace/operators', 4, - 'Extra space for operator %s' % match.group(1)) - - -def CheckParenthesisSpacing(filename, clean_lines, linenum, error): - """Checks for horizontal spacing around parentheses. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # No spaces after an if, while, switch, or for - match = Search(r' (if\(|for\(|while\(|switch\()', line) - if match: - error(filename, linenum, 'whitespace/parens', 5, - 'Missing space before ( in %s' % match.group(1)) - - # For if/for/while/switch, the left and right parens should be - # consistent about how many spaces are inside the parens, and - # there should either be zero or one spaces inside the parens. - # We don't want: "if ( foo)" or "if ( foo )". - # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. - match = Search(r'\b(if|for|while|switch)\s*' - r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', - line) - if match: - if len(match.group(2)) != len(match.group(4)): - if not (match.group(3) == ';' and - len(match.group(2)) == 1 + len(match.group(4)) or - not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): - error(filename, linenum, 'whitespace/parens', 5, - 'Mismatching spaces inside () in %s' % match.group(1)) - if len(match.group(2)) not in [0, 1]: - error(filename, linenum, 'whitespace/parens', 5, - 'Should have zero or one spaces inside ( and ) in %s' % - match.group(1)) - - -def CheckCommaSpacing(filename, clean_lines, linenum, error): - """Checks for horizontal spacing near commas and semicolons. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - raw = clean_lines.lines_without_raw_strings - line = clean_lines.elided[linenum] - - # You should always have a space after a comma (either as fn arg or operator) - # - # This does not apply when the non-space character following the - # comma is another comma, since the only time when that happens is - # for empty macro arguments. - # - # We run this check in two passes: first pass on elided lines to - # verify that lines contain missing whitespaces, second pass on raw - # lines to confirm that those missing whitespaces are not due to - # elided comments. - if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and - Search(r',[^,\s]', raw[linenum])): - error(filename, linenum, 'whitespace/comma', 3, - 'Missing space after ,') - - # You should always have a space after a semicolon - # except for few corner cases - # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more - # space after ; - if Search(r';[^\s};\\)/]', line): - error(filename, linenum, 'whitespace/semicolon', 3, - 'Missing space after ;') - - -def CheckBracesSpacing(filename, clean_lines, linenum, error): - """Checks for horizontal spacing near commas. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Except after an opening paren, or after another opening brace (in case of - # an initializer list, for instance), you should have spaces before your - # braces. And since you should never have braces at the beginning of a line, - # this is an easy test. - match = Match(r'^(.*[^ ({>]){', line) - if match: - # Try a bit harder to check for brace initialization. This - # happens in one of the following forms: - # Constructor() : initializer_list_{} { ... } - # Constructor{}.MemberFunction() - # Type variable{}; - # FunctionCall(type{}, ...); - # LastArgument(..., type{}); - # LOG(INFO) << type{} << " ..."; - # map_of_type[{...}] = ...; - # ternary = expr ? new type{} : nullptr; - # OuterTemplate{}> - # - # We check for the character following the closing brace, and - # silence the warning if it's one of those listed above, i.e. - # "{.;,)<>]:". - # - # To account for nested initializer list, we allow any number of - # closing braces up to "{;,)<". We can't simply silence the - # warning on first sight of closing brace, because that would - # cause false negatives for things that are not initializer lists. - # Silence this: But not this: - # Outer{ if (...) { - # Inner{...} if (...){ // Missing space before { - # }; } - # - # There is a false negative with this approach if people inserted - # spurious semicolons, e.g. "if (cond){};", but we will catch the - # spurious semicolon with a separate check. - (endline, endlinenum, endpos) = CloseExpression( - clean_lines, linenum, len(match.group(1))) - trailing_text = '' - if endpos > -1: - trailing_text = endline[endpos:] - for offset in xrange(endlinenum + 1, - min(endlinenum + 3, clean_lines.NumLines() - 1)): - trailing_text += clean_lines.elided[offset] - if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text): - error(filename, linenum, 'whitespace/braces', 5, - 'Missing space before {') - - # Make sure '} else {' has spaces. - if Search(r'}else', line): - error(filename, linenum, 'whitespace/braces', 5, - 'Missing space before else') - - # You shouldn't have a space before a semicolon at the end of the line. - # There's a special case for "for" since the style guide allows space before - # the semicolon there. - if Search(r':\s*;\s*$', line): - error(filename, linenum, 'whitespace/semicolon', 5, - 'Semicolon defining empty statement. Use {} instead.') - elif Search(r'^\s*;\s*$', line): - error(filename, linenum, 'whitespace/semicolon', 5, - 'Line contains only semicolon. If this should be an empty statement, ' - 'use {} instead.') - elif (Search(r'\s+;\s*$', line) and - not Search(r'\bfor\b', line)): - error(filename, linenum, 'whitespace/semicolon', 5, - 'Extra space before last semicolon. If this should be an empty ' - 'statement, use {} instead.') - - -def IsDecltype(clean_lines, linenum, column): - """Check if the token ending on (linenum, column) is decltype(). - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: the number of the line to check. - column: end column of the token to check. - Returns: - True if this token is decltype() expression, False otherwise. - """ - (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column) - if start_col < 0: - return False - if Search(r'\bdecltype\s*$', text[0:start_col]): - return True - return False - - -def IsTemplateParameterList(clean_lines, linenum, column): - """Check if the token ending on (linenum, column) is the end of template<>. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: the number of the line to check. - column: end column of the token to check. - Returns: - True if this token is end of a template parameter list, False otherwise. - """ - (_, startline, startpos) = ReverseCloseExpression( - clean_lines, linenum, column) - if (startpos > -1 and - Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])): - return True - return False - - -def IsRValueType(typenames, clean_lines, nesting_state, linenum, column): - """Check if the token ending on (linenum, column) is a type. - - Assumes that text to the right of the column is "&&" or a function - name. - - Args: - typenames: set of type names from template-argument-list. - clean_lines: A CleansedLines instance containing the file. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - linenum: the number of the line to check. - column: end column of the token to check. - Returns: - True if this token is a type, False if we are not sure. - """ - prefix = clean_lines.elided[linenum][0:column] - - # Get one word to the left. If we failed to do so, this is most - # likely not a type, since it's unlikely that the type name and "&&" - # would be split across multiple lines. - match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix) - if not match: - return False - - # Check text following the token. If it's "&&>" or "&&," or "&&...", it's - # most likely a rvalue reference used inside a template. - suffix = clean_lines.elided[linenum][column:] - if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix): - return True - - # Check for known types and end of templates: - # int&& variable - # vector&& variable - # - # Because this function is called recursively, we also need to - # recognize pointer and reference types: - # int* Function() - # int& Function() - if (match.group(2) in typenames or - match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool', - 'short', 'int', 'long', 'signed', 'unsigned', - 'float', 'double', 'void', 'auto', '>', '*', '&']): - return True - - # If we see a close parenthesis, look for decltype on the other side. - # decltype would unambiguously identify a type, anything else is - # probably a parenthesized expression and not a type. - if match.group(2) == ')': - return IsDecltype( - clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1) - - # Check for casts and cv-qualifiers. - # match.group(1) remainder - # -------------- --------- - # const_cast< type&& - # const type&& - # type const&& - if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|' - r'reinterpret_cast\s*<|\w+\s)\s*$', - match.group(1)): - return True - - # Look for a preceding symbol that might help differentiate the context. - # These are the cases that would be ambiguous: - # match.group(1) remainder - # -------------- --------- - # Call ( expression && - # Declaration ( type&& - # sizeof ( type&& - # if ( expression && - # while ( expression && - # for ( type&& - # for( ; expression && - # statement ; type&& - # block { type&& - # constructor { expression && - start = linenum - line = match.group(1) - match_symbol = None - while start >= 0: - # We want to skip over identifiers and commas to get to a symbol. - # Commas are skipped so that we can find the opening parenthesis - # for function parameter lists. - match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line) - if match_symbol: - break - start -= 1 - line = clean_lines.elided[start] - - if not match_symbol: - # Probably the first statement in the file is an rvalue reference - return True - - if match_symbol.group(2) == '}': - # Found closing brace, probably an indicate of this: - # block{} type&& - return True - - if match_symbol.group(2) == ';': - # Found semicolon, probably one of these: - # for(; expression && - # statement; type&& - - # Look for the previous 'for(' in the previous lines. - before_text = match_symbol.group(1) - for i in xrange(start - 1, max(start - 6, 0), -1): - before_text = clean_lines.elided[i] + before_text - if Search(r'for\s*\([^{};]*$', before_text): - # This is the condition inside a for-loop - return False - - # Did not find a for-init-statement before this semicolon, so this - # is probably a new statement and not a condition. - return True - - if match_symbol.group(2) == '{': - # Found opening brace, probably one of these: - # block{ type&& = ... ; } - # constructor{ expression && expression } - - # Look for a closing brace or a semicolon. If we see a semicolon - # first, this is probably a rvalue reference. - line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1] - end = start - depth = 1 - while True: - for ch in line: - if ch == ';': - return True - elif ch == '{': - depth += 1 - elif ch == '}': - depth -= 1 - if depth == 0: - return False - end += 1 - if end >= clean_lines.NumLines(): - break - line = clean_lines.elided[end] - # Incomplete program? - return False - - if match_symbol.group(2) == '(': - # Opening parenthesis. Need to check what's to the left of the - # parenthesis. Look back one extra line for additional context. - before_text = match_symbol.group(1) - if linenum > 1: - before_text = clean_lines.elided[linenum - 1] + before_text - before_text = match_symbol.group(1) - - # Patterns that are likely to be types: - # [](type&& - # for (type&& - # sizeof(type&& - # operator=(type&& - # - if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text): - return True - - # Patterns that are likely to be expressions: - # if (expression && - # while (expression && - # : initializer(expression && - # , initializer(expression && - # ( FunctionCall(expression && - # + FunctionCall(expression && - # + (expression && - # - # The last '+' represents operators such as '+' and '-'. - if Search(r'(?:\bif|\bwhile|[-+=%^(]*>)?\s*$', - match_symbol.group(1)) - if match_func: - # Check for constructors, which don't have return types. - if Search(r'\b(?:explicit|inline)$', match_func.group(1)): - return True - implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix) - if (implicit_constructor and - implicit_constructor.group(1) == implicit_constructor.group(2)): - return True - return IsRValueType(typenames, clean_lines, nesting_state, linenum, - len(match_func.group(1))) - - # Nothing before the function name. If this is inside a block scope, - # this is probably a function call. - return not (nesting_state.previous_stack_top and - nesting_state.previous_stack_top.IsBlockInfo()) - - if match_symbol.group(2) == '>': - # Possibly a closing bracket, check that what's on the other side - # looks like the start of a template. - return IsTemplateParameterList( - clean_lines, start, len(match_symbol.group(1))) - - # Some other symbol, usually something like "a=b&&c". This is most - # likely not a type. - return False - - -def IsDeletedOrDefault(clean_lines, linenum): - """Check if current constructor or operator is deleted or default. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - Returns: - True if this is a deleted or default constructor. - """ - open_paren = clean_lines.elided[linenum].find('(') - if open_paren < 0: - return False - (close_line, _, close_paren) = CloseExpression( - clean_lines, linenum, open_paren) - if close_paren < 0: - return False - return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:]) - - -def IsRValueAllowed(clean_lines, linenum, typenames): - """Check if RValue reference is allowed on a particular line. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - typenames: set of type names from template-argument-list. - Returns: - True if line is within the region where RValue references are allowed. - """ - # Allow region marked by PUSH/POP macros - for i in xrange(linenum, 0, -1): - line = clean_lines.elided[i] - if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): - if not line.endswith('PUSH'): - return False - for j in xrange(linenum, clean_lines.NumLines(), 1): - line = clean_lines.elided[j] - if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): - return line.endswith('POP') - - # Allow operator= - line = clean_lines.elided[linenum] - if Search(r'\boperator\s*=\s*\(', line): - return IsDeletedOrDefault(clean_lines, linenum) - - # Allow constructors - match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line) - if match and match.group(1) == match.group(2): - return IsDeletedOrDefault(clean_lines, linenum) - if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line): - return IsDeletedOrDefault(clean_lines, linenum) - - if Match(r'\s*[\w<>]+\s*\(', line): - previous_line = 'ReturnType' - if linenum > 0: - previous_line = clean_lines.elided[linenum - 1] - if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line): - return IsDeletedOrDefault(clean_lines, linenum) - - # Reject types not mentioned in template-argument-list - while line: - match = Match(r'^.*?(\w+)\s*&&(.*)$', line) - if not match: - break - if match.group(1) not in typenames: - return False - line = match.group(2) - - # All RValue types that were in template-argument-list should have - # been removed by now. Those were allowed, assuming that they will - # be forwarded. - # - # If there are no remaining RValue types left (i.e. types that were - # not found in template-argument-list), flag those as not allowed. - return line.find('&&') < 0 - - -def GetTemplateArgs(clean_lines, linenum): - """Find list of template arguments associated with this function declaration. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: Line number containing the start of the function declaration, - usually one line after the end of the template-argument-list. - Returns: - Set of type names, or empty set if this does not appear to have - any template parameters. - """ - # Find start of function - func_line = linenum - while func_line > 0: - line = clean_lines.elided[func_line] - if Match(r'^\s*$', line): - return set() - if line.find('(') >= 0: - break - func_line -= 1 - if func_line == 0: - return set() - - # Collapse template-argument-list into a single string - argument_list = '' - match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line]) - if match: - # template-argument-list on the same line as function name - start_col = len(match.group(1)) - _, end_line, end_col = CloseExpression(clean_lines, func_line, start_col) - if end_col > -1 and end_line == func_line: - start_col += 1 # Skip the opening bracket - argument_list = clean_lines.elided[func_line][start_col:end_col] - - elif func_line > 1: - # template-argument-list one line before function name - match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1]) - if match: - end_col = len(match.group(1)) - _, start_line, start_col = ReverseCloseExpression( - clean_lines, func_line - 1, end_col) - if start_col > -1: - start_col += 1 # Skip the opening bracket - while start_line < func_line - 1: - argument_list += clean_lines.elided[start_line][start_col:] - start_col = 0 - start_line += 1 - argument_list += clean_lines.elided[func_line - 1][start_col:end_col] - - if not argument_list: - return set() - - # Extract type names - typenames = set() - while True: - match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$', - argument_list) - if not match: - break - typenames.add(match.group(1)) - argument_list = match.group(2) - return typenames - - -def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error): - """Check for rvalue references. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - # Find lines missing spaces around &&. - # TODO(unknown): currently we don't check for rvalue references - # with spaces surrounding the && to avoid false positives with - # boolean expressions. - line = clean_lines.elided[linenum] - match = Match(r'^(.*\S)&&', line) - if not match: - match = Match(r'(.*)&&\S', line) - if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)): - return - - # Either poorly formed && or an rvalue reference, check the context - # to get a more accurate error message. Mostly we want to determine - # if what's to the left of "&&" is a type or not. - typenames = GetTemplateArgs(clean_lines, linenum) - and_pos = len(match.group(1)) - if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos): - if not IsRValueAllowed(clean_lines, linenum, typenames): - error(filename, linenum, 'build/c++11', 3, - 'RValue references are an unapproved C++ feature.') - else: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around &&') - - -def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): - """Checks for additional blank line issues related to sections. - - Currently the only thing checked here is blank line before protected/private. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - class_info: A _ClassInfo objects. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - # Skip checks if the class is small, where small means 25 lines or less. - # 25 lines seems like a good cutoff since that's the usual height of - # terminals, and any class that can't fit in one screen can't really - # be considered "small". - # - # Also skip checks if we are on the first line. This accounts for - # classes that look like - # class Foo { public: ... }; - # - # If we didn't find the end of the class, last_line would be zero, - # and the check will be skipped by the first condition. - if (class_info.last_line - class_info.starting_linenum <= 24 or - linenum <= class_info.starting_linenum): - return - - matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) - if matched: - # Issue warning if the line before public/protected/private was - # not a blank line, but don't do this if the previous line contains - # "class" or "struct". This can happen two ways: - # - We are at the beginning of the class. - # - We are forward-declaring an inner class that is semantically - # private, but needed to be public for implementation reasons. - # Also ignores cases where the previous line ends with a backslash as can be - # common when defining classes in C macros. - prev_line = clean_lines.lines[linenum - 1] - if (not IsBlankLine(prev_line) and - not Search(r'\b(class|struct)\b', prev_line) and - not Search(r'\\$', prev_line)): - # Try a bit harder to find the beginning of the class. This is to - # account for multi-line base-specifier lists, e.g.: - # class Derived - # : public Base { - end_class_head = class_info.starting_linenum - for i in range(class_info.starting_linenum, linenum): - if Search(r'\{\s*$', clean_lines.lines[i]): - end_class_head = i - break - if end_class_head < linenum - 1: - error(filename, linenum, 'whitespace/blank_line', 3, - '"%s:" should be preceded by a blank line' % matched.group(1)) - - -def GetPreviousNonBlankLine(clean_lines, linenum): - """Return the most recent non-blank line and its line number. - - Args: - clean_lines: A CleansedLines instance containing the file contents. - linenum: The number of the line to check. - - Returns: - A tuple with two elements. The first element is the contents of the last - non-blank line before the current line, or the empty string if this is the - first non-blank line. The second is the line number of that line, or -1 - if this is the first non-blank line. - """ - - prevlinenum = linenum - 1 - while prevlinenum >= 0: - prevline = clean_lines.elided[prevlinenum] - if not IsBlankLine(prevline): # if not a blank line... - return (prevline, prevlinenum) - prevlinenum -= 1 - return ('', -1) - - -def CheckBraces(filename, clean_lines, linenum, error): - """Looks for misplaced braces (e.g. at the end of line). - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - - line = clean_lines.elided[linenum] # get rid of comments and strings - - if Match(r'\s*{\s*$', line): - # We allow an open brace to start a line in the case where someone is using - # braces in a block to explicitly create a new scope, which is commonly used - # to control the lifetime of stack-allocated variables. Braces are also - # used for brace initializers inside function calls. We don't detect this - # perfectly: we just don't complain if the last non-whitespace character on - # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the - # previous line starts a preprocessor block. - prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] - if (not Search(r'[,;:}{(]\s*$', prevline) and - not Match(r'\s*#', prevline)): - error(filename, linenum, 'whitespace/braces', 4, - '{ should almost always be at the end of the previous line') - - # An else clause should be on the same line as the preceding closing brace. - if Match(r'\s*else\b\s*(?:if\b|\{|$)', line): - prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] - if Match(r'\s*}\s*$', prevline): - error(filename, linenum, 'whitespace/newline', 4, - 'An else should appear on the same line as the preceding }') - - # If braces come on one side of an else, they should be on both. - # However, we have to worry about "else if" that spans multiple lines! - if Search(r'else if\s*\(', line): # could be multi-line if - brace_on_left = bool(Search(r'}\s*else if\s*\(', line)) - # find the ( after the if - pos = line.find('else if') - pos = line.find('(', pos) - if pos > 0: - (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) - brace_on_right = endline[endpos:].find('{') != -1 - if brace_on_left != brace_on_right: # must be brace after if - error(filename, linenum, 'readability/braces', 5, - 'If an else has a brace on one side, it should have it on both') - elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): - error(filename, linenum, 'readability/braces', 5, - 'If an else has a brace on one side, it should have it on both') - - # Likewise, an else should never have the else clause on the same line - if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): - error(filename, linenum, 'whitespace/newline', 4, - 'Else clause should never be on same line as else (use 2 lines)') - - # In the same way, a do/while should never be on one line - if Match(r'\s*do [^\s{]', line): - error(filename, linenum, 'whitespace/newline', 4, - 'do/while clauses should not be on a single line') - - # Check single-line if/else bodies. The style guide says 'curly braces are not - # required for single-line statements'. We additionally allow multi-line, - # single statements, but we reject anything with more than one semicolon in - # it. This means that the first semicolon after the if should be at the end of - # its line, and the line after that should have an indent level equal to or - # lower than the if. We also check for ambiguous if/else nesting without - # braces. - if_else_match = Search(r'\b(if\s*\(|else\b)', line) - if if_else_match and not Match(r'\s*#', line): - if_indent = GetIndentLevel(line) - endline, endlinenum, endpos = line, linenum, if_else_match.end() - if_match = Search(r'\bif\s*\(', line) - if if_match: - # This could be a multiline if condition, so find the end first. - pos = if_match.end() - 1 - (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos) - # Check for an opening brace, either directly after the if or on the next - # line. If found, this isn't a single-statement conditional. - if (not Match(r'\s*{', endline[endpos:]) - and not (Match(r'\s*$', endline[endpos:]) - and endlinenum < (len(clean_lines.elided) - 1) - and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))): - while (endlinenum < len(clean_lines.elided) - and ';' not in clean_lines.elided[endlinenum][endpos:]): - endlinenum += 1 - endpos = 0 - if endlinenum < len(clean_lines.elided): - endline = clean_lines.elided[endlinenum] - # We allow a mix of whitespace and closing braces (e.g. for one-liner - # methods) and a single \ after the semicolon (for macros) - endpos = endline.find(';') - if not Match(r';[\s}]*(\\?)$', endline[endpos:]): - # Semicolon isn't the last character, there's something trailing. - # Output a warning if the semicolon is not contained inside - # a lambda expression. - if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$', - endline): - error(filename, linenum, 'readability/braces', 4, - 'If/else bodies with multiple statements require braces') - elif endlinenum < len(clean_lines.elided) - 1: - # Make sure the next line is dedented - next_line = clean_lines.elided[endlinenum + 1] - next_indent = GetIndentLevel(next_line) - # With ambiguous nested if statements, this will error out on the - # if that *doesn't* match the else, regardless of whether it's the - # inner one or outer one. - if (if_match and Match(r'\s*else\b', next_line) - and next_indent != if_indent): - error(filename, linenum, 'readability/braces', 4, - 'Else clause should be indented at the same level as if. ' - 'Ambiguous nested if/else chains require braces.') - elif next_indent > if_indent: - error(filename, linenum, 'readability/braces', 4, - 'If/else bodies with multiple statements require braces') - - -def CheckTrailingSemicolon(filename, clean_lines, linenum, error): - """Looks for redundant trailing semicolon. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - - line = clean_lines.elided[linenum] - - # Block bodies should not be followed by a semicolon. Due to C++11 - # brace initialization, there are more places where semicolons are - # required than not, so we use a whitelist approach to check these - # rather than a blacklist. These are the places where "};" should - # be replaced by just "}": - # 1. Some flavor of block following closing parenthesis: - # for (;;) {}; - # while (...) {}; - # switch (...) {}; - # Function(...) {}; - # if (...) {}; - # if (...) else if (...) {}; - # - # 2. else block: - # if (...) else {}; - # - # 3. const member function: - # Function(...) const {}; - # - # 4. Block following some statement: - # x = 42; - # {}; - # - # 5. Block at the beginning of a function: - # Function(...) { - # {}; - # } - # - # Note that naively checking for the preceding "{" will also match - # braces inside multi-dimensional arrays, but this is fine since - # that expression will not contain semicolons. - # - # 6. Block following another block: - # while (true) {} - # {}; - # - # 7. End of namespaces: - # namespace {}; - # - # These semicolons seems far more common than other kinds of - # redundant semicolons, possibly due to people converting classes - # to namespaces. For now we do not warn for this case. - # - # Try matching case 1 first. - match = Match(r'^(.*\)\s*)\{', line) - if match: - # Matched closing parenthesis (case 1). Check the token before the - # matching opening parenthesis, and don't warn if it looks like a - # macro. This avoids these false positives: - # - macro that defines a base class - # - multi-line macro that defines a base class - # - macro that defines the whole class-head - # - # But we still issue warnings for macros that we know are safe to - # warn, specifically: - # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P - # - TYPED_TEST - # - INTERFACE_DEF - # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: - # - # We implement a whitelist of safe macros instead of a blacklist of - # unsafe macros, even though the latter appears less frequently in - # google code and would have been easier to implement. This is because - # the downside for getting the whitelist wrong means some extra - # semicolons, while the downside for getting the blacklist wrong - # would result in compile errors. - # - # In addition to macros, we also don't want to warn on - # - Compound literals - # - Lambdas - # - alignas specifier with anonymous structs: - closing_brace_pos = match.group(1).rfind(')') - opening_parenthesis = ReverseCloseExpression( - clean_lines, linenum, closing_brace_pos) - if opening_parenthesis[2] > -1: - line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] - macro = Search(r'\b([A-Z_]+)\s*$', line_prefix) - func = Match(r'^(.*\])\s*$', line_prefix) - if ((macro and - macro.group(1) not in ( - 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', - 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', - 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or - (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or - Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or - Search(r'\s+=\s*$', line_prefix)): - match = None - if (match and - opening_parenthesis[1] > 1 and - Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): - # Multi-line lambda-expression - match = None - - else: - # Try matching cases 2-3. - match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) - if not match: - # Try matching cases 4-6. These are always matched on separate lines. - # - # Note that we can't simply concatenate the previous line to the - # current line and do a single match, otherwise we may output - # duplicate warnings for the blank line case: - # if (cond) { - # // blank line - # } - prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] - if prevline and Search(r'[;{}]\s*$', prevline): - match = Match(r'^(\s*)\{', line) - - # Check matching closing brace - if match: - (endline, endlinenum, endpos) = CloseExpression( - clean_lines, linenum, len(match.group(1))) - if endpos > -1 and Match(r'^\s*;', endline[endpos:]): - # Current {} pair is eligible for semicolon check, and we have found - # the redundant semicolon, output warning here. - # - # Note: because we are scanning forward for opening braces, and - # outputting warnings for the matching closing brace, if there are - # nested blocks with trailing semicolons, we will get the error - # messages in reversed order. - error(filename, endlinenum, 'readability/braces', 4, - "You don't need a ; after a }") - - -def CheckEmptyBlockBody(filename, clean_lines, linenum, error): - """Look for empty loop/conditional body with only a single semicolon. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - - # Search for loop keywords at the beginning of the line. Because only - # whitespaces are allowed before the keywords, this will also ignore most - # do-while-loops, since those lines should start with closing brace. - # - # We also check "if" blocks here, since an empty conditional block - # is likely an error. - line = clean_lines.elided[linenum] - matched = Match(r'\s*(for|while|if)\s*\(', line) - if matched: - # Find the end of the conditional expression - (end_line, end_linenum, end_pos) = CloseExpression( - clean_lines, linenum, line.find('(')) - - # Output warning if what follows the condition expression is a semicolon. - # No warning for all other cases, including whitespace or newline, since we - # have a separate check for semicolons preceded by whitespace. - if end_pos >= 0 and Match(r';', end_line[end_pos:]): - if matched.group(1) == 'if': - error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, - 'Empty conditional bodies should use {}') - else: - error(filename, end_linenum, 'whitespace/empty_loop_body', 5, - 'Empty loop bodies should use {} or continue') - - -def FindCheckMacro(line): - """Find a replaceable CHECK-like macro. - - Args: - line: line to search on. - Returns: - (macro name, start position), or (None, -1) if no replaceable - macro is found. - """ - for macro in _CHECK_MACROS: - i = line.find(macro) - if i >= 0: - # Find opening parenthesis. Do a regular expression match here - # to make sure that we are matching the expected CHECK macro, as - # opposed to some other macro that happens to contain the CHECK - # substring. - matched = Match(r'^(.*\b' + macro + r'\s*)\(', line) - if not matched: - continue - return (macro, len(matched.group(1))) - return (None, -1) - - -def CheckCheck(filename, clean_lines, linenum, error): - """Checks the use of CHECK and EXPECT macros. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - - # Decide the set of replacement macros that should be suggested - lines = clean_lines.elided - (check_macro, start_pos) = FindCheckMacro(lines[linenum]) - if not check_macro: - return - - # Find end of the boolean expression by matching parentheses - (last_line, end_line, end_pos) = CloseExpression( - clean_lines, linenum, start_pos) - if end_pos < 0: - return - - # If the check macro is followed by something other than a - # semicolon, assume users will log their own custom error messages - # and don't suggest any replacements. - if not Match(r'\s*;', last_line[end_pos:]): - return - - if linenum == end_line: - expression = lines[linenum][start_pos + 1:end_pos - 1] - else: - expression = lines[linenum][start_pos + 1:] - for i in xrange(linenum + 1, end_line): - expression += lines[i] - expression += last_line[0:end_pos - 1] - - # Parse expression so that we can take parentheses into account. - # This avoids false positives for inputs like "CHECK((a < 4) == b)", - # which is not replaceable by CHECK_LE. - lhs = '' - rhs = '' - operator = None - while expression: - matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' - r'==|!=|>=|>|<=|<|\()(.*)$', expression) - if matched: - token = matched.group(1) - if token == '(': - # Parenthesized operand - expression = matched.group(2) - (end, _) = FindEndOfExpressionInLine(expression, 0, ['(']) - if end < 0: - return # Unmatched parenthesis - lhs += '(' + expression[0:end] - expression = expression[end:] - elif token in ('&&', '||'): - # Logical and/or operators. This means the expression - # contains more than one term, for example: - # CHECK(42 < a && a < b); - # - # These are not replaceable with CHECK_LE, so bail out early. - return - elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): - # Non-relational operator - lhs += token - expression = matched.group(2) - else: - # Relational operator - operator = token - rhs = matched.group(2) - break - else: - # Unparenthesized operand. Instead of appending to lhs one character - # at a time, we do another regular expression match to consume several - # characters at once if possible. Trivial benchmark shows that this - # is more efficient when the operands are longer than a single - # character, which is generally the case. - matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) - if not matched: - matched = Match(r'^(\s*\S)(.*)$', expression) - if not matched: - break - lhs += matched.group(1) - expression = matched.group(2) - - # Only apply checks if we got all parts of the boolean expression - if not (lhs and operator and rhs): - return - - # Check that rhs do not contain logical operators. We already know - # that lhs is fine since the loop above parses out && and ||. - if rhs.find('&&') > -1 or rhs.find('||') > -1: - return - - # At least one of the operands must be a constant literal. This is - # to avoid suggesting replacements for unprintable things like - # CHECK(variable != iterator) - # - # The following pattern matches decimal, hex integers, strings, and - # characters (in that order). - lhs = lhs.strip() - rhs = rhs.strip() - match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' - if Match(match_constant, lhs) or Match(match_constant, rhs): - # Note: since we know both lhs and rhs, we can provide a more - # descriptive error message like: - # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) - # Instead of: - # Consider using CHECK_EQ instead of CHECK(a == b) - # - # We are still keeping the less descriptive message because if lhs - # or rhs gets long, the error message might become unreadable. - error(filename, linenum, 'readability/check', 2, - 'Consider using %s instead of %s(a %s b)' % ( - _CHECK_REPLACEMENT[check_macro][operator], - check_macro, operator)) - - -def CheckAltTokens(filename, clean_lines, linenum, error): - """Check alternative keywords being used in boolean expressions. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Avoid preprocessor lines - if Match(r'^\s*#', line): - return - - # Last ditch effort to avoid multi-line comments. This will not help - # if the comment started before the current line or ended after the - # current line, but it catches most of the false positives. At least, - # it provides a way to workaround this warning for people who use - # multi-line comments in preprocessor macros. - # - # TODO(unknown): remove this once cpplint has better support for - # multi-line comments. - if line.find('/*') >= 0 or line.find('*/') >= 0: - return - - for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): - error(filename, linenum, 'readability/alt_tokens', 2, - 'Use operator %s instead of %s' % ( - _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) - - -def GetLineWidth(line): - """Determines the width of the line in column positions. - - Args: - line: A string, which may be a Unicode string. - - Returns: - The width of the line in column positions, accounting for Unicode - combining characters and wide characters. - """ - if isinstance(line, unicode): - width = 0 - for uc in unicodedata.normalize('NFC', line): - if unicodedata.east_asian_width(uc) in ('W', 'F'): - width += 2 - elif not unicodedata.combining(uc): - width += 1 - return width - else: - return len(line) - - -def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, - error): - """Checks rules from the 'C++ style rules' section of cppguide.html. - - Most of these rules are hard to test (naming, comment style), but we - do what we can. In particular we check for 2-space indents, line lengths, - tab usage, spaces inside code, etc. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - file_extension: The extension (without the dot) of the filename. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - - # Don't use "elided" lines here, otherwise we can't check commented lines. - # Don't want to use "raw" either, because we don't want to check inside C++11 - # raw strings, - raw_lines = clean_lines.lines_without_raw_strings - line = raw_lines[linenum] - - if line.find('\t') != -1: - error(filename, linenum, 'whitespace/tab', 1, - 'Tab found; better to use spaces') - - # One or three blank spaces at the beginning of the line is weird; it's - # hard to reconcile that with 2-space indents. - # NOTE: here are the conditions rob pike used for his tests. Mine aren't - # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces - # if(RLENGTH > 20) complain = 0; - # if(match($0, " +(error|private|public|protected):")) complain = 0; - # if(match(prev, "&& *$")) complain = 0; - # if(match(prev, "\\|\\| *$")) complain = 0; - # if(match(prev, "[\",=><] *$")) complain = 0; - # if(match($0, " <<")) complain = 0; - # if(match(prev, " +for \\(")) complain = 0; - # if(prevodd && match(prevprev, " +for \\(")) complain = 0; - scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$' - classinfo = nesting_state.InnermostClass() - initial_spaces = 0 - cleansed_line = clean_lines.elided[linenum] - while initial_spaces < len(line) and line[initial_spaces] == ' ': - initial_spaces += 1 - if line and line[-1].isspace(): - error(filename, linenum, 'whitespace/end_of_line', 4, - 'Line ends in whitespace. Consider deleting these extra spaces.') - # There are certain situations we allow one space, notably for - # section labels, and also lines containing multi-line raw strings. - elif ((initial_spaces == 1 or initial_spaces == 3) and - not Match(scope_or_label_pattern, cleansed_line) and - not (clean_lines.raw_lines[linenum] != line and - Match(r'^\s*""', line))): - error(filename, linenum, 'whitespace/indent', 3, - 'Weird number of spaces at line-start. ' - 'Are you using a 2-space indent?') - - # Check if the line is a header guard. - is_header_guard = False - if file_extension == 'h': - cppvar = GetHeaderGuardCPPVariable(filename) - if (line.startswith('#ifndef %s' % cppvar) or - line.startswith('#define %s' % cppvar) or - line.startswith('#endif // %s' % cppvar)): - is_header_guard = True - # #include lines and header guards can be long, since there's no clean way to - # split them. - # - # URLs can be long too. It's possible to split these, but it makes them - # harder to cut&paste. - # - # The "$Id:...$" comment may also get very long without it being the - # developers fault. - if (not line.startswith('#include') and not is_header_guard and - not Match(r'^\s*//.*http(s?)://\S*$', line) and - not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): - line_width = GetLineWidth(line) - extended_length = int((_line_length * 1.25)) - if line_width > extended_length: - error(filename, linenum, 'whitespace/line_length', 4, - 'Lines should very rarely be longer than %i characters' % - extended_length) - elif line_width > _line_length: - error(filename, linenum, 'whitespace/line_length', 2, - 'Lines should be <= %i characters long' % _line_length) - - if (cleansed_line.count(';') > 1 and - # for loops are allowed two ;'s (and may run over two lines). - cleansed_line.find('for') == -1 and - (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or - GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and - # It's ok to have many commands in a switch case that fits in 1 line - not ((cleansed_line.find('case ') != -1 or - cleansed_line.find('default:') != -1) and - cleansed_line.find('break;') != -1)): - error(filename, linenum, 'whitespace/newline', 0, - 'More than one command on the same line') - - # Some more style checks - CheckBraces(filename, clean_lines, linenum, error) - CheckTrailingSemicolon(filename, clean_lines, linenum, error) - CheckEmptyBlockBody(filename, clean_lines, linenum, error) - CheckAccess(filename, clean_lines, linenum, nesting_state, error) - CheckSpacing(filename, clean_lines, linenum, nesting_state, error) - CheckOperatorSpacing(filename, clean_lines, linenum, error) - CheckParenthesisSpacing(filename, clean_lines, linenum, error) - CheckCommaSpacing(filename, clean_lines, linenum, error) - CheckBracesSpacing(filename, clean_lines, linenum, error) - CheckSpacingForFunctionCall(filename, clean_lines, linenum, error) - CheckRValueReference(filename, clean_lines, linenum, nesting_state, error) - CheckCheck(filename, clean_lines, linenum, error) - CheckAltTokens(filename, clean_lines, linenum, error) - classinfo = nesting_state.InnermostClass() - if classinfo: - CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) - - -_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') -# Matches the first component of a filename delimited by -s and _s. That is: -# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' -# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' -# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' -# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' -_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') - - -def _DropCommonSuffixes(filename): - """Drops common suffixes like _test.cc or -inl.h from filename. - - For example: - >>> _DropCommonSuffixes('foo/foo-inl.h') - 'foo/foo' - >>> _DropCommonSuffixes('foo/bar/foo.cc') - 'foo/bar/foo' - >>> _DropCommonSuffixes('foo/foo_internal.h') - 'foo/foo' - >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') - 'foo/foo_unusualinternal' - - Args: - filename: The input filename. - - Returns: - The filename with the common suffix removed. - """ - for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', - 'inl.h', 'impl.h', 'internal.h'): - if (filename.endswith(suffix) and len(filename) > len(suffix) and - filename[-len(suffix) - 1] in ('-', '_')): - return filename[:-len(suffix) - 1] - return os.path.splitext(filename)[0] - - -def _IsTestFilename(filename): - """Determines if the given filename has a suffix that identifies it as a test. - - Args: - filename: The input filename. - - Returns: - True if 'filename' looks like a test, False otherwise. - """ - if (filename.endswith('_test.cc') or - filename.endswith('_unittest.cc') or - filename.endswith('_regtest.cc')): - return True - else: - return False - - -def _ClassifyInclude(fileinfo, include, is_system): - """Figures out what kind of header 'include' is. - - Args: - fileinfo: The current file cpplint is running over. A FileInfo instance. - include: The path to a #included file. - is_system: True if the #include used <> rather than "". - - Returns: - One of the _XXX_HEADER constants. - - For example: - >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) - _C_SYS_HEADER - >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) - _CPP_SYS_HEADER - >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) - _LIKELY_MY_HEADER - >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), - ... 'bar/foo_other_ext.h', False) - _POSSIBLE_MY_HEADER - >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) - _OTHER_HEADER - """ - # This is a list of all standard c++ header files, except - # those already checked for above. - is_cpp_h = include in _CPP_HEADERS - - if is_system: - if is_cpp_h: - return _CPP_SYS_HEADER - else: - return _C_SYS_HEADER - - # If the target file and the include we're checking share a - # basename when we drop common extensions, and the include - # lives in . , then it's likely to be owned by the target file. - target_dir, target_base = ( - os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) - include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) - if target_base == include_base and ( - include_dir == target_dir or - include_dir == os.path.normpath(target_dir + '/../public')): - return _LIKELY_MY_HEADER - - # If the target and include share some initial basename - # component, it's possible the target is implementing the - # include, so it's allowed to be first, but we'll never - # complain if it's not there. - target_first_component = _RE_FIRST_COMPONENT.match(target_base) - include_first_component = _RE_FIRST_COMPONENT.match(include_base) - if (target_first_component and include_first_component and - target_first_component.group(0) == - include_first_component.group(0)): - return _POSSIBLE_MY_HEADER - - return _OTHER_HEADER - - - -def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): - """Check rules that are applicable to #include lines. - - Strings on #include lines are NOT removed from elided line, to make - certain tasks easier. However, to prevent false positives, checks - applicable to #include lines in CheckLanguage must be put here. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - include_state: An _IncludeState instance in which the headers are inserted. - error: The function to call with any errors found. - """ - fileinfo = FileInfo(filename) - line = clean_lines.lines[linenum] - - # "include" should use the new style "foo/bar.h" instead of just "bar.h" - # Only do this check if the included header follows google naming - # conventions. If not, assume that it's a 3rd party API that - # requires special include conventions. - # - # We also make an exception for Lua headers, which follow google - # naming convention but not the include convention. - match = Match(r'#include\s*"([^/]+\.h)"', line) - if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)): - error(filename, linenum, 'build/include', 4, - 'Include the directory when naming .h files') - - # we shouldn't include a file more than once. actually, there are a - # handful of instances where doing so is okay, but in general it's - # not. - match = _RE_PATTERN_INCLUDE.search(line) - if match: - include = match.group(2) - is_system = (match.group(1) == '<') - duplicate_line = include_state.FindHeader(include) - if duplicate_line >= 0: - error(filename, linenum, 'build/include', 4, - '"%s" already included at %s:%s' % - (include, filename, duplicate_line)) - elif (include.endswith('.cc') and - os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)): - error(filename, linenum, 'build/include', 4, - 'Do not include .cc files from other packages') - elif not _THIRD_PARTY_HEADERS_PATTERN.match(include): - include_state.include_list[-1].append((include, linenum)) - - # We want to ensure that headers appear in the right order: - # 1) for foo.cc, foo.h (preferred location) - # 2) c system files - # 3) cpp system files - # 4) for foo.cc, foo.h (deprecated location) - # 5) other google headers - # - # We classify each include statement as one of those 5 types - # using a number of techniques. The include_state object keeps - # track of the highest type seen, and complains if we see a - # lower type after that. - error_message = include_state.CheckNextIncludeOrder( - _ClassifyInclude(fileinfo, include, is_system)) - if error_message: - error(filename, linenum, 'build/include_order', 4, - '%s. Should be: %s.h, c system, c++ system, other.' % - (error_message, fileinfo.BaseName())) - canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) - if not include_state.IsInAlphabeticalOrder( - clean_lines, linenum, canonical_include): - error(filename, linenum, 'build/include_alpha', 4, - 'Include "%s" not in alphabetical order' % include) - include_state.SetLastHeader(canonical_include) - - - -def _GetTextInside(text, start_pattern): - r"""Retrieves all the text between matching open and close parentheses. - - Given a string of lines and a regular expression string, retrieve all the text - following the expression and between opening punctuation symbols like - (, [, or {, and the matching close-punctuation symbol. This properly nested - occurrences of the punctuations, so for the text like - printf(a(), b(c())); - a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. - start_pattern must match string having an open punctuation symbol at the end. - - Args: - text: The lines to extract text. Its comments and strings must be elided. - It can be single line and can span multiple lines. - start_pattern: The regexp string indicating where to start extracting - the text. - Returns: - The extracted text. - None if either the opening string or ending punctuation could not be found. - """ - # TODO(unknown): Audit cpplint.py to see what places could be profitably - # rewritten to use _GetTextInside (and use inferior regexp matching today). - - # Give opening punctuations to get the matching close-punctuations. - matching_punctuation = {'(': ')', '{': '}', '[': ']'} - closing_punctuation = set(matching_punctuation.itervalues()) - - # Find the position to start extracting text. - match = re.search(start_pattern, text, re.M) - if not match: # start_pattern not found in text. - return None - start_position = match.end(0) - - assert start_position > 0, ( - 'start_pattern must ends with an opening punctuation.') - assert text[start_position - 1] in matching_punctuation, ( - 'start_pattern must ends with an opening punctuation.') - # Stack of closing punctuations we expect to have in text after position. - punctuation_stack = [matching_punctuation[text[start_position - 1]]] - position = start_position - while punctuation_stack and position < len(text): - if text[position] == punctuation_stack[-1]: - punctuation_stack.pop() - elif text[position] in closing_punctuation: - # A closing punctuation without matching opening punctuations. - return None - elif text[position] in matching_punctuation: - punctuation_stack.append(matching_punctuation[text[position]]) - position += 1 - if punctuation_stack: - # Opening punctuations left without matching close-punctuations. - return None - # punctuations match. - return text[start_position:position - 1] - - -# Patterns for matching call-by-reference parameters. -# -# Supports nested templates up to 2 levels deep using this messy pattern: -# < (?: < (?: < [^<>]* -# > -# | [^<>] )* -# > -# | [^<>] )* -# > -_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]* -_RE_PATTERN_TYPE = ( - r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?' - r'(?:\w|' - r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|' - r'::)+') -# A call-by-reference parameter ends with '& identifier'. -_RE_PATTERN_REF_PARAM = re.compile( - r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*' - r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]') -# A call-by-const-reference parameter either ends with 'const& identifier' -# or looks like 'const type& identifier' when 'type' is atomic. -_RE_PATTERN_CONST_REF_PARAM = ( - r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + - r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')') - - -def CheckLanguage(filename, clean_lines, linenum, file_extension, - include_state, nesting_state, error): - """Checks rules from the 'C++ language rules' section of cppguide.html. - - Some of these rules are hard to test (function overloading, using - uint32 inappropriately), but we do the best we can. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - file_extension: The extension (without the dot) of the filename. - include_state: An _IncludeState instance in which the headers are inserted. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - # If the line is empty or consists of entirely a comment, no need to - # check it. - line = clean_lines.elided[linenum] - if not line: - return - - match = _RE_PATTERN_INCLUDE.search(line) - if match: - CheckIncludeLine(filename, clean_lines, linenum, include_state, error) - return - - # Reset include state across preprocessor directives. This is meant - # to silence warnings for conditional includes. - match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line) - if match: - include_state.ResetSection(match.group(1)) - - # Make Windows paths like Unix. - fullname = os.path.abspath(filename).replace('\\', '/') - - # Perform other checks now that we are sure that this is not an include line - CheckCasts(filename, clean_lines, linenum, error) - CheckGlobalStatic(filename, clean_lines, linenum, error) - CheckPrintf(filename, clean_lines, linenum, error) - - if file_extension == 'h': - # TODO(unknown): check that 1-arg constructors are explicit. - # How to tell it's a constructor? - # (handled in CheckForNonStandardConstructs for now) - # TODO(unknown): check that classes declare or disable copy/assign - # (level 1 error) - pass - - # Check if people are using the verboten C basic types. The only exception - # we regularly allow is "unsigned short port" for port. - if Search(r'\bshort port\b', line): - if not Search(r'\bunsigned short port\b', line): - error(filename, linenum, 'runtime/int', 4, - 'Use "unsigned short" for ports, not "short"') - else: - match = Search(r'\b(short|long(?! +double)|long long)\b', line) - if match: - error(filename, linenum, 'runtime/int', 4, - 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) - - # Check if some verboten operator overloading is going on - # TODO(unknown): catch out-of-line unary operator&: - # class X {}; - # int operator&(const X& x) { return 42; } // unary operator& - # The trick is it's hard to tell apart from binary operator&: - # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& - if Search(r'\boperator\s*&\s*\(\s*\)', line): - error(filename, linenum, 'runtime/operator', 4, - 'Unary operator& is dangerous. Do not use it.') - - # Check for suspicious usage of "if" like - # } if (a == b) { - if Search(r'\}\s*if\s*\(', line): - error(filename, linenum, 'readability/braces', 4, - 'Did you mean "else if"? If not, start a new line for "if".') - - # Check for potential format string bugs like printf(foo). - # We constrain the pattern not to pick things like DocidForPrintf(foo). - # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) - # TODO(unknown): Catch the following case. Need to change the calling - # convention of the whole function to process multiple line to handle it. - # printf( - # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); - printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') - if printf_args: - match = Match(r'([\w.\->()]+)$', printf_args) - if match and match.group(1) != '__VA_ARGS__': - function_name = re.search(r'\b((?:string)?printf)\s*\(', - line, re.I).group(1) - error(filename, linenum, 'runtime/printf', 4, - 'Potential format string bug. Do %s("%%s", %s) instead.' - % (function_name, match.group(1))) - - # Check for potential memset bugs like memset(buf, sizeof(buf), 0). - match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) - if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): - error(filename, linenum, 'runtime/memset', 4, - 'Did you mean "memset(%s, 0, %s)"?' - % (match.group(1), match.group(2))) - - if Search(r'\busing namespace\b', line): - error(filename, linenum, 'build/namespaces', 5, - 'Do not use namespace using-directives. ' - 'Use using-declarations instead.') - - # Detect variable-length arrays. - match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) - if (match and match.group(2) != 'return' and match.group(2) != 'delete' and - match.group(3).find(']') == -1): - # Split the size using space and arithmetic operators as delimiters. - # If any of the resulting tokens are not compile time constants then - # report the error. - tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) - is_const = True - skip_next = False - for tok in tokens: - if skip_next: - skip_next = False - continue - - if Search(r'sizeof\(.+\)', tok): continue - if Search(r'arraysize\(\w+\)', tok): continue - - tok = tok.lstrip('(') - tok = tok.rstrip(')') - if not tok: continue - if Match(r'\d+', tok): continue - if Match(r'0[xX][0-9a-fA-F]+', tok): continue - if Match(r'k[A-Z0-9]\w*', tok): continue - if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue - if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue - # A catch all for tricky sizeof cases, including 'sizeof expression', - # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' - # requires skipping the next token because we split on ' ' and '*'. - if tok.startswith('sizeof'): - skip_next = True - continue - is_const = False - break - if not is_const: - error(filename, linenum, 'runtime/arrays', 1, - 'Do not use variable-length arrays. Use an appropriately named ' - "('k' followed by CamelCase) compile-time constant for the size.") - - # Check for use of unnamed namespaces in header files. Registration - # macros are typically OK, so we allow use of "namespace {" on lines - # that end with backslashes. - if (file_extension == 'h' - and Search(r'\bnamespace\s*{', line) - and line[-1] != '\\'): - error(filename, linenum, 'build/namespaces', 4, - 'Do not use unnamed namespaces in header files. See ' - 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' - ' for more information.') - - -def CheckGlobalStatic(filename, clean_lines, linenum, error): - """Check for unsafe global or static objects. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Match two lines at a time to support multiline declarations - if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): - line += clean_lines.elided[linenum + 1].strip() - - # Check for people declaring static/global STL strings at the top level. - # This is dangerous because the C++ language does not guarantee that - # globals with constructors are initialized before the first access. - match = Match( - r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', - line) - - # Remove false positives: - # - String pointers (as opposed to values). - # string *pointer - # const string *pointer - # string const *pointer - # string *const pointer - # - # - Functions and template specializations. - # string Function(... - # string Class::Method(... - # - # - Operators. These are matched separately because operator names - # cross non-word boundaries, and trying to match both operators - # and functions at the same time would decrease accuracy of - # matching identifiers. - # string Class::operator*() - if (match and - not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and - not Search(r'\boperator\W', line) and - not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))): - error(filename, linenum, 'runtime/string', 4, - 'For a static/global string constant, use a C style string instead: ' - '"%schar %s[]".' % - (match.group(1), match.group(2))) - - if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): - error(filename, linenum, 'runtime/init', 4, - 'You seem to be initializing a member variable with itself.') - - -def CheckPrintf(filename, clean_lines, linenum, error): - """Check for printf related issues. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # When snprintf is used, the second argument shouldn't be a literal. - match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) - if match and match.group(2) != '0': - # If 2nd arg is zero, snprintf is used to calculate size. - error(filename, linenum, 'runtime/printf', 3, - 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' - 'to snprintf.' % (match.group(1), match.group(2))) - - # Check if some verboten C functions are being used. - if Search(r'\bsprintf\s*\(', line): - error(filename, linenum, 'runtime/printf', 5, - 'Never use sprintf. Use snprintf instead.') - match = Search(r'\b(strcpy|strcat)\s*\(', line) - if match: - error(filename, linenum, 'runtime/printf', 4, - 'Almost always, snprintf is better than %s' % match.group(1)) - - -def IsDerivedFunction(clean_lines, linenum): - """Check if current line contains an inherited function. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - Returns: - True if current line contains a function with "override" - virt-specifier. - """ - # Scan back a few lines for start of current function - for i in xrange(linenum, max(-1, linenum - 10), -1): - match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i]) - if match: - # Look for "override" after the matching closing parenthesis - line, _, closing_paren = CloseExpression( - clean_lines, i, len(match.group(1))) - return (closing_paren >= 0 and - Search(r'\boverride\b', line[closing_paren:])) - return False - - -def IsOutOfLineMethodDefinition(clean_lines, linenum): - """Check if current line contains an out-of-line method definition. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - Returns: - True if current line contains an out-of-line method definition. - """ - # Scan back a few lines for start of current function - for i in xrange(linenum, max(-1, linenum - 10), -1): - if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]): - return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None - return False - - -def IsInitializerList(clean_lines, linenum): - """Check if current line is inside constructor initializer list. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - Returns: - True if current line appears to be inside constructor initializer - list, False otherwise. - """ - for i in xrange(linenum, 1, -1): - line = clean_lines.elided[i] - if i == linenum: - remove_function_body = Match(r'^(.*)\{\s*$', line) - if remove_function_body: - line = remove_function_body.group(1) - - if Search(r'\s:\s*\w+[({]', line): - # A lone colon tend to indicate the start of a constructor - # initializer list. It could also be a ternary operator, which - # also tend to appear in constructor initializer lists as - # opposed to parameter lists. - return True - if Search(r'\}\s*,\s*$', line): - # A closing brace followed by a comma is probably the end of a - # brace-initialized member in constructor initializer list. - return True - if Search(r'[{};]\s*$', line): - # Found one of the following: - # - A closing brace or semicolon, probably the end of the previous - # function. - # - An opening brace, probably the start of current class or namespace. - # - # Current line is probably not inside an initializer list since - # we saw one of those things without seeing the starting colon. - return False - - # Got to the beginning of the file without seeing the start of - # constructor initializer list. - return False - - -def CheckForNonConstReference(filename, clean_lines, linenum, - nesting_state, error): - """Check for non-const references. - - Separate from CheckLanguage since it scans backwards from current - line, instead of scanning forward. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - # Do nothing if there is no '&' on current line. - line = clean_lines.elided[linenum] - if '&' not in line: - return - - # If a function is inherited, current function doesn't have much of - # a choice, so any non-const references should not be blamed on - # derived function. - if IsDerivedFunction(clean_lines, linenum): - return - - # Don't warn on out-of-line method definitions, as we would warn on the - # in-line declaration, if it isn't marked with 'override'. - if IsOutOfLineMethodDefinition(clean_lines, linenum): - return - - # Long type names may be broken across multiple lines, usually in one - # of these forms: - # LongType - # ::LongTypeContinued &identifier - # LongType:: - # LongTypeContinued &identifier - # LongType< - # ...>::LongTypeContinued &identifier - # - # If we detected a type split across two lines, join the previous - # line to current line so that we can match const references - # accordingly. - # - # Note that this only scans back one line, since scanning back - # arbitrary number of lines would be expensive. If you have a type - # that spans more than 2 lines, please use a typedef. - if linenum > 1: - previous = None - if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): - # previous_line\n + ::current_line - previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', - clean_lines.elided[linenum - 1]) - elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): - # previous_line::\n + current_line - previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', - clean_lines.elided[linenum - 1]) - if previous: - line = previous.group(1) + line.lstrip() - else: - # Check for templated parameter that is split across multiple lines - endpos = line.rfind('>') - if endpos > -1: - (_, startline, startpos) = ReverseCloseExpression( - clean_lines, linenum, endpos) - if startpos > -1 and startline < linenum: - # Found the matching < on an earlier line, collect all - # pieces up to current line. - line = '' - for i in xrange(startline, linenum + 1): - line += clean_lines.elided[i].strip() - - # Check for non-const references in function parameters. A single '&' may - # found in the following places: - # inside expression: binary & for bitwise AND - # inside expression: unary & for taking the address of something - # inside declarators: reference parameter - # We will exclude the first two cases by checking that we are not inside a - # function body, including one that was just introduced by a trailing '{'. - # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. - if (nesting_state.previous_stack_top and - not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or - isinstance(nesting_state.previous_stack_top, _NamespaceInfo))): - # Not at toplevel, not within a class, and not within a namespace - return - - # Avoid initializer lists. We only need to scan back from the - # current line for something that starts with ':'. - # - # We don't need to check the current line, since the '&' would - # appear inside the second set of parentheses on the current line as - # opposed to the first set. - if linenum > 0: - for i in xrange(linenum - 1, max(0, linenum - 10), -1): - previous_line = clean_lines.elided[i] - if not Search(r'[),]\s*$', previous_line): - break - if Match(r'^\s*:\s+\S', previous_line): - return - - # Avoid preprocessors - if Search(r'\\\s*$', line): - return - - # Avoid constructor initializer lists - if IsInitializerList(clean_lines, linenum): - return - - # We allow non-const references in a few standard places, like functions - # called "swap()" or iostream operators like "<<" or ">>". Do not check - # those function parameters. - # - # We also accept & in static_assert, which looks like a function but - # it's actually a declaration expression. - whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' - r'operator\s*[<>][<>]|' - r'static_assert|COMPILE_ASSERT' - r')\s*\(') - if Search(whitelisted_functions, line): - return - elif not Search(r'\S+\([^)]*$', line): - # Don't see a whitelisted function on this line. Actually we - # didn't see any function name on this line, so this is likely a - # multi-line parameter list. Try a bit harder to catch this case. - for i in xrange(2): - if (linenum > i and - Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): - return - - decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body - for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): - if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter): - error(filename, linenum, 'runtime/references', 2, - 'Is this a non-const reference? ' - 'If so, make const or use a pointer: ' + - ReplaceAll(' *<', '<', parameter)) - - -def CheckCasts(filename, clean_lines, linenum, error): - """Various cast related checks. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Check to see if they're using an conversion function cast. - # I just try to capture the most common basic types, though there are more. - # Parameterless conversion functions, such as bool(), are allowed as they are - # probably a member operator declaration or default constructor. - match = Search( - r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b' - r'(int|float|double|bool|char|int32|uint32|int64|uint64)' - r'(\([^)].*)', line) - expecting_function = ExpectingFunctionArgs(clean_lines, linenum) - if match and not expecting_function: - matched_type = match.group(2) - - # matched_new_or_template is used to silence two false positives: - # - New operators - # - Template arguments with function types - # - # For template arguments, we match on types immediately following - # an opening bracket without any spaces. This is a fast way to - # silence the common case where the function type is the first - # template argument. False negative with less-than comparison is - # avoided because those operators are usually followed by a space. - # - # function // bracket + no space = false positive - # value < double(42) // bracket + space = true positive - matched_new_or_template = match.group(1) - - # Avoid arrays by looking for brackets that come after the closing - # parenthesis. - if Match(r'\([^()]+\)\s*\[', match.group(3)): - return - - # Other things to ignore: - # - Function pointers - # - Casts to pointer types - # - Placement new - # - Alias declarations - matched_funcptr = match.group(3) - if (matched_new_or_template is None and - not (matched_funcptr and - (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', - matched_funcptr) or - matched_funcptr.startswith('(*)'))) and - not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and - not Search(r'new\(\S+\)\s*' + matched_type, line)): - error(filename, linenum, 'readability/casting', 4, - 'Using deprecated casting style. ' - 'Use static_cast<%s>(...) instead' % - matched_type) - - if not expecting_function: - CheckCStyleCast(filename, clean_lines, linenum, 'static_cast', - r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) - - # This doesn't catch all cases. Consider (const char * const)"hello". - # - # (char *) "foo" should always be a const_cast (reinterpret_cast won't - # compile). - if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast', - r'\((char\s?\*+\s?)\)\s*"', error): - pass - else: - # Check pointer casts for other than string constants - CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast', - r'\((\w+\s?\*+\s?)\)', error) - - # In addition, we look for people taking the address of a cast. This - # is dangerous -- casts can assign to temporaries, so the pointer doesn't - # point where you think. - # - # Some non-identifier character is required before the '&' for the - # expression to be recognized as a cast. These are casts: - # expression = &static_cast(temporary()); - # function(&(int*)(temporary())); - # - # This is not a cast: - # reference_type&(int* function_param); - match = Search( - r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|' - r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line) - if match: - # Try a better error message when the & is bound to something - # dereferenced by the casted pointer, as opposed to the casted - # pointer itself. - parenthesis_error = False - match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line) - if match: - _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1))) - if x1 >= 0 and clean_lines.elided[y1][x1] == '(': - _, y2, x2 = CloseExpression(clean_lines, y1, x1) - if x2 >= 0: - extended_line = clean_lines.elided[y2][x2:] - if y2 < clean_lines.NumLines() - 1: - extended_line += clean_lines.elided[y2 + 1] - if Match(r'\s*(?:->|\[)', extended_line): - parenthesis_error = True - - if parenthesis_error: - error(filename, linenum, 'readability/casting', 4, - ('Are you taking an address of something dereferenced ' - 'from a cast? Wrapping the dereferenced expression in ' - 'parentheses will make the binding more obvious')) - else: - error(filename, linenum, 'runtime/casting', 4, - ('Are you taking an address of a cast? ' - 'This is dangerous: could be a temp var. ' - 'Take the address before doing the cast, rather than after')) - - -def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): - """Checks for a C-style cast by looking for the pattern. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - cast_type: The string for the C++ cast to recommend. This is either - reinterpret_cast, static_cast, or const_cast, depending. - pattern: The regular expression used to find C-style casts. - error: The function to call with any errors found. - - Returns: - True if an error was emitted. - False otherwise. - """ - line = clean_lines.elided[linenum] - match = Search(pattern, line) - if not match: - return False - - # Exclude lines with keywords that tend to look like casts - context = line[0:match.start(1) - 1] - if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): - return False - - # Try expanding current context to see if we one level of - # parentheses inside a macro. - if linenum > 0: - for i in xrange(linenum - 1, max(0, linenum - 5), -1): - context = clean_lines.elided[i] + context - if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): - return False - - # operator++(int) and operator--(int) - if context.endswith(' operator++') or context.endswith(' operator--'): - return False - - # A single unnamed argument for a function tends to look like old - # style cast. If we see those, don't issue warnings for deprecated - # casts, instead issue warnings for unnamed arguments where - # appropriate. - # - # These are things that we want warnings for, since the style guide - # explicitly require all parameters to be named: - # Function(int); - # Function(int) { - # ConstMember(int) const; - # ConstMember(int) const { - # ExceptionMember(int) throw (...); - # ExceptionMember(int) throw (...) { - # PureVirtual(int) = 0; - # [](int) -> bool { - # - # These are functions of some sort, where the compiler would be fine - # if they had named parameters, but people often omit those - # identifiers to reduce clutter: - # (FunctionPointer)(int); - # (FunctionPointer)(int) = value; - # Function((function_pointer_arg)(int)) - # Function((function_pointer_arg)(int), int param) - # ; - # <(FunctionPointerTemplateArgument)(int)>; - remainder = line[match.end(0):] - if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)', - remainder): - # Looks like an unnamed parameter. - - # Don't warn on any kind of template arguments. - if Match(r'^\s*>', remainder): - return False - - # Don't warn on assignments to function pointers, but keep warnings for - # unnamed parameters to pure virtual functions. Note that this pattern - # will also pass on assignments of "0" to function pointers, but the - # preferred values for those would be "nullptr" or "NULL". - matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) - if matched_zero and matched_zero.group(1) != '0': - return False - - # Don't warn on function pointer declarations. For this we need - # to check what came before the "(type)" string. - if Match(r'.*\)\s*$', line[0:match.start(0)]): - return False - - # Don't warn if the parameter is named with block comments, e.g.: - # Function(int /*unused_param*/); - raw_line = clean_lines.raw_lines[linenum] - if '/*' in raw_line: - return False - - # Passed all filters, issue warning here. - error(filename, linenum, 'readability/function', 3, - 'All parameters should be named in a function') - return True - - # At this point, all that should be left is actual casts. - error(filename, linenum, 'readability/casting', 4, - 'Using C-style cast. Use %s<%s>(...) instead' % - (cast_type, match.group(1))) - - return True - - -def ExpectingFunctionArgs(clean_lines, linenum): - """Checks whether where function type arguments are expected. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - - Returns: - True if the line at 'linenum' is inside something that expects arguments - of function types. - """ - line = clean_lines.elided[linenum] - return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or - (linenum >= 2 and - (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', - clean_lines.elided[linenum - 1]) or - Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', - clean_lines.elided[linenum - 2]) or - Search(r'\bstd::m?function\s*\<\s*$', - clean_lines.elided[linenum - 1])))) - - -_HEADERS_CONTAINING_TEMPLATES = ( - ('', ('deque',)), - ('', ('unary_function', 'binary_function', - 'plus', 'minus', 'multiplies', 'divides', 'modulus', - 'negate', - 'equal_to', 'not_equal_to', 'greater', 'less', - 'greater_equal', 'less_equal', - 'logical_and', 'logical_or', 'logical_not', - 'unary_negate', 'not1', 'binary_negate', 'not2', - 'bind1st', 'bind2nd', - 'pointer_to_unary_function', - 'pointer_to_binary_function', - 'ptr_fun', - 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', - 'mem_fun_ref_t', - 'const_mem_fun_t', 'const_mem_fun1_t', - 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', - 'mem_fun_ref', - )), - ('', ('numeric_limits',)), - ('', ('list',)), - ('', ('map', 'multimap',)), - ('', ('allocator',)), - ('', ('queue', 'priority_queue',)), - ('', ('set', 'multiset',)), - ('', ('stack',)), - ('', ('char_traits', 'basic_string',)), - ('', ('tuple',)), - ('', ('pair',)), - ('', ('vector',)), - - # gcc extensions. - # Note: std::hash is their hash, ::hash is our hash - ('', ('hash_map', 'hash_multimap',)), - ('', ('hash_set', 'hash_multiset',)), - ('', ('slist',)), - ) - -_RE_PATTERN_STRING = re.compile(r'\bstring\b') - -_re_pattern_algorithm_header = [] -for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap', - 'transform'): - # Match max(..., ...), max(..., ...), but not foo->max, foo.max or - # type::max(). - _re_pattern_algorithm_header.append( - (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), - _template, - '')) - -_re_pattern_templates = [] -for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: - for _template in _templates: - _re_pattern_templates.append( - (re.compile(r'(\<|\b)' + _template + r'\s*\<'), - _template + '<>', - _header)) - - -def FilesBelongToSameModule(filename_cc, filename_h): - """Check if these two filenames belong to the same module. - - The concept of a 'module' here is a as follows: - foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the - same 'module' if they are in the same directory. - some/path/public/xyzzy and some/path/internal/xyzzy are also considered - to belong to the same module here. - - If the filename_cc contains a longer path than the filename_h, for example, - '/absolute/path/to/base/sysinfo.cc', and this file would include - 'base/sysinfo.h', this function also produces the prefix needed to open the - header. This is used by the caller of this function to more robustly open the - header file. We don't have access to the real include paths in this context, - so we need this guesswork here. - - Known bugs: tools/base/bar.cc and base/bar.h belong to the same module - according to this implementation. Because of this, this function gives - some false positives. This should be sufficiently rare in practice. - - Args: - filename_cc: is the path for the .cc file - filename_h: is the path for the header path - - Returns: - Tuple with a bool and a string: - bool: True if filename_cc and filename_h belong to the same module. - string: the additional prefix needed to open the header file. - """ - - if not filename_cc.endswith('.cc'): - return (False, '') - filename_cc = filename_cc[:-len('.cc')] - if filename_cc.endswith('_unittest'): - filename_cc = filename_cc[:-len('_unittest')] - elif filename_cc.endswith('_test'): - filename_cc = filename_cc[:-len('_test')] - filename_cc = filename_cc.replace('/public/', '/') - filename_cc = filename_cc.replace('/internal/', '/') - - if not filename_h.endswith('.h'): - return (False, '') - filename_h = filename_h[:-len('.h')] - if filename_h.endswith('-inl'): - filename_h = filename_h[:-len('-inl')] - filename_h = filename_h.replace('/public/', '/') - filename_h = filename_h.replace('/internal/', '/') - - files_belong_to_same_module = filename_cc.endswith(filename_h) - common_path = '' - if files_belong_to_same_module: - common_path = filename_cc[:-len(filename_h)] - return files_belong_to_same_module, common_path - - -def UpdateIncludeState(filename, include_dict, io=codecs): - """Fill up the include_dict with new includes found from the file. - - Args: - filename: the name of the header to read. - include_dict: a dictionary in which the headers are inserted. - io: The io factory to use to read the file. Provided for testability. - - Returns: - True if a header was successfully added. False otherwise. - """ - headerfile = None - try: - headerfile = io.open(filename, 'r', 'utf8', 'replace') - except IOError: - return False - linenum = 0 - for line in headerfile: - linenum += 1 - clean_line = CleanseComments(line) - match = _RE_PATTERN_INCLUDE.search(clean_line) - if match: - include = match.group(2) - include_dict.setdefault(include, linenum) - return True - - -def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, - io=codecs): - """Reports for missing stl includes. - - This function will output warnings to make sure you are including the headers - necessary for the stl containers and functions that you use. We only give one - reason to include a header. For example, if you use both equal_to<> and - less<> in a .h file, only one (the latter in the file) of these will be - reported as a reason to include the . - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - include_state: An _IncludeState instance. - error: The function to call with any errors found. - io: The IO factory to use to read the header file. Provided for unittest - injection. - """ - required = {} # A map of header name to linenumber and the template entity. - # Example of required: { '': (1219, 'less<>') } - - for linenum in xrange(clean_lines.NumLines()): - line = clean_lines.elided[linenum] - if not line or line[0] == '#': - continue - - # String is special -- it is a non-templatized type in STL. - matched = _RE_PATTERN_STRING.search(line) - if matched: - # Don't warn about strings in non-STL namespaces: - # (We check only the first match per line; good enough.) - prefix = line[:matched.start()] - if prefix.endswith('std::') or not prefix.endswith('::'): - required[''] = (linenum, 'string') - - for pattern, template, header in _re_pattern_algorithm_header: - if pattern.search(line): - required[header] = (linenum, template) - - # The following function is just a speed up, no semantics are changed. - if not '<' in line: # Reduces the cpu time usage by skipping lines. - continue - - for pattern, template, header in _re_pattern_templates: - if pattern.search(line): - required[header] = (linenum, template) - - # The policy is that if you #include something in foo.h you don't need to - # include it again in foo.cc. Here, we will look at possible includes. - # Let's flatten the include_state include_list and copy it into a dictionary. - include_dict = dict([item for sublist in include_state.include_list - for item in sublist]) - - # Did we find the header for this file (if any) and successfully load it? - header_found = False - - # Use the absolute path so that matching works properly. - abs_filename = FileInfo(filename).FullName() - - # For Emacs's flymake. - # If cpplint is invoked from Emacs's flymake, a temporary file is generated - # by flymake and that file name might end with '_flymake.cc'. In that case, - # restore original file name here so that the corresponding header file can be - # found. - # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' - # instead of 'foo_flymake.h' - abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) - - # include_dict is modified during iteration, so we iterate over a copy of - # the keys. - header_keys = include_dict.keys() - for header in header_keys: - (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) - fullpath = common_path + header - if same_module and UpdateIncludeState(fullpath, include_dict, io): - header_found = True - - # If we can't find the header file for a .cc, assume it's because we don't - # know where to look. In that case we'll give up as we're not sure they - # didn't include it in the .h file. - # TODO(unknown): Do a better job of finding .h files so we are confident that - # not having the .h file means there isn't one. - if filename.endswith('.cc') and not header_found: - return - - # All the lines have been processed, report the errors found. - for required_header_unstripped in required: - template = required[required_header_unstripped][1] - if required_header_unstripped.strip('<>"') not in include_dict: - error(filename, required[required_header_unstripped][0], - 'build/include_what_you_use', 4, - 'Add #include ' + required_header_unstripped + ' for ' + template) - - -_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<') - - -def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): - """Check that make_pair's template arguments are deduced. - - G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are - specified explicitly, and such use isn't intended in any case. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) - if match: - error(filename, linenum, 'build/explicit_make_pair', - 4, # 4 = high confidence - 'For C++11-compatibility, omit template arguments from make_pair' - ' OR use pair directly OR if appropriate, construct a pair directly') - - -def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error): - """Check that default lambda captures are not used. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # A lambda introducer specifies a default capture if it starts with "[=" - # or if it starts with "[&" _not_ followed by an identifier. - match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line) - if match: - # Found a potential error, check what comes after the lambda-introducer. - # If it's not open parenthesis (for lambda-declarator) or open brace - # (for compound-statement), it's not a lambda. - line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1))) - if pos >= 0 and Match(r'^\s*[{(]', line[pos:]): - error(filename, linenum, 'build/c++11', - 4, # 4 = high confidence - 'Default lambda captures are an unapproved C++ feature.') - - -def CheckRedundantVirtual(filename, clean_lines, linenum, error): - """Check if line contains a redundant "virtual" function-specifier. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - # Look for "virtual" on current line. - line = clean_lines.elided[linenum] - virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line) - if not virtual: return - - # Ignore "virtual" keywords that are near access-specifiers. These - # are only used in class base-specifier and do not apply to member - # functions. - if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or - Match(r'^\s+(public|protected|private)\b', virtual.group(3))): - return - - # Ignore the "virtual" keyword from virtual base classes. Usually - # there is a column on the same line in these cases (virtual base - # classes are rare in google3 because multiple inheritance is rare). - if Match(r'^.*[^:]:[^:].*$', line): return - - # Look for the next opening parenthesis. This is the start of the - # parameter list (possibly on the next line shortly after virtual). - # TODO(unknown): doesn't work if there are virtual functions with - # decltype() or other things that use parentheses, but csearch suggests - # that this is rare. - end_col = -1 - end_line = -1 - start_col = len(virtual.group(2)) - for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())): - line = clean_lines.elided[start_line][start_col:] - parameter_list = Match(r'^([^(]*)\(', line) - if parameter_list: - # Match parentheses to find the end of the parameter list - (_, end_line, end_col) = CloseExpression( - clean_lines, start_line, start_col + len(parameter_list.group(1))) - break - start_col = 0 - - if end_col < 0: - return # Couldn't find end of parameter list, give up - - # Look for "override" or "final" after the parameter list - # (possibly on the next few lines). - for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())): - line = clean_lines.elided[i][end_col:] - match = Search(r'\b(override|final)\b', line) - if match: - error(filename, linenum, 'readability/inheritance', 4, - ('"virtual" is redundant since function is ' - 'already declared as "%s"' % match.group(1))) - - # Set end_col to check whole lines after we are done with the - # first line. - end_col = 0 - if Search(r'[^\w]\s*$', line): - break - - -def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error): - """Check if line contains a redundant "override" or "final" virt-specifier. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - # Look for closing parenthesis nearby. We need one to confirm where - # the declarator ends and where the virt-specifier starts to avoid - # false positives. - line = clean_lines.elided[linenum] - declarator_end = line.rfind(')') - if declarator_end >= 0: - fragment = line[declarator_end:] - else: - if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0: - fragment = line - else: - return - - # Check that at most one of "override" or "final" is present, not both - if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment): - error(filename, linenum, 'readability/inheritance', 4, - ('"override" is redundant since function is ' - 'already declared as "final"')) - - - - -# Returns true if we are at a new block, and it is directly -# inside of a namespace. -def IsBlockInNameSpace(nesting_state, is_forward_declaration): - """Checks that the new block is directly in a namespace. - - Args: - nesting_state: The _NestingState object that contains info about our state. - is_forward_declaration: If the class is a forward declared class. - Returns: - Whether or not the new block is directly in a namespace. - """ - if is_forward_declaration: - if len(nesting_state.stack) >= 1 and ( - isinstance(nesting_state.stack[-1], _NamespaceInfo)): - return True - else: - return False - - return (len(nesting_state.stack) > 1 and - nesting_state.stack[-1].check_namespace_indentation and - isinstance(nesting_state.stack[-2], _NamespaceInfo)) - - -def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, - raw_lines_no_comments, linenum): - """This method determines if we should apply our namespace indentation check. - - Args: - nesting_state: The current nesting state. - is_namespace_indent_item: If we just put a new class on the stack, True. - If the top of the stack is not a class, or we did not recently - add the class, False. - raw_lines_no_comments: The lines without the comments. - linenum: The current line number we are processing. - - Returns: - True if we should apply our namespace indentation check. Currently, it - only works for classes and namespaces inside of a namespace. - """ - - is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments, - linenum) - - if not (is_namespace_indent_item or is_forward_declaration): - return False - - # If we are in a macro, we do not want to check the namespace indentation. - if IsMacroDefinition(raw_lines_no_comments, linenum): - return False - - return IsBlockInNameSpace(nesting_state, is_forward_declaration) - - -# Call this method if the line is directly inside of a namespace. -# If the line above is blank (excluding comments) or the start of -# an inner namespace, it cannot be indented. -def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum, - error): - line = raw_lines_no_comments[linenum] - if Match(r'^\s+', line): - error(filename, linenum, 'runtime/indentation_namespace', 4, - 'Do not indent within a namespace') - - -def ProcessLine(filename, file_extension, clean_lines, line, - include_state, function_state, nesting_state, error, - extra_check_functions=[]): - """Processes a single line in the file. - - Args: - filename: Filename of the file that is being processed. - file_extension: The extension (dot not included) of the file. - clean_lines: An array of strings, each representing a line of the file, - with comments stripped. - line: Number of line being processed. - include_state: An _IncludeState instance in which the headers are inserted. - function_state: A _FunctionState instance which counts function lines, etc. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: A callable to which errors are reported, which takes 4 arguments: - filename, line number, error level, and message - extra_check_functions: An array of additional check functions that will be - run on each source line. Each function takes 4 - arguments: filename, clean_lines, line, error - """ - raw_lines = clean_lines.raw_lines - ParseNolintSuppressions(filename, raw_lines[line], line, error) - nesting_state.Update(filename, clean_lines, line, error) - CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, - error) - if nesting_state.InAsmBlock(): return - CheckForFunctionLengths(filename, clean_lines, line, function_state, error) - CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) - CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) - CheckLanguage(filename, clean_lines, line, file_extension, include_state, - nesting_state, error) - CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) - CheckForNonStandardConstructs(filename, clean_lines, line, - nesting_state, error) - CheckVlogArguments(filename, clean_lines, line, error) - CheckPosixThreading(filename, clean_lines, line, error) - CheckInvalidIncrement(filename, clean_lines, line, error) - CheckMakePairUsesDeduction(filename, clean_lines, line, error) - CheckDefaultLambdaCaptures(filename, clean_lines, line, error) - CheckRedundantVirtual(filename, clean_lines, line, error) - CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) - for check_fn in extra_check_functions: - check_fn(filename, clean_lines, line, error) - -def FlagCxx11Features(filename, clean_lines, linenum, error): - """Flag those c++11 features that we only allow in certain places. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Flag unapproved C++11 headers. - include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) - if include and include.group(1) in ('cfenv', - 'condition_variable', - 'fenv.h', - 'future', - 'mutex', - 'thread', - 'chrono', - 'ratio', - 'regex', - 'system_error', - ): - error(filename, linenum, 'build/c++11', 5, - ('<%s> is an unapproved C++11 header.') % include.group(1)) - - # The only place where we need to worry about C++11 keywords and library - # features in preprocessor directives is in macro definitions. - if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return - - # These are classes and free functions. The classes are always - # mentioned as std::*, but we only catch the free functions if - # they're not found by ADL. They're alphabetical by header. - for top_name in ( - # type_traits - 'alignment_of', - 'aligned_union', - ): - if Search(r'\bstd::%s\b' % top_name, line): - error(filename, linenum, 'build/c++11', 5, - ('std::%s is an unapproved C++11 class or function. Send c-style ' - 'an example of where it would make your code more readable, and ' - 'they may let you use it.') % top_name) - - -def ProcessFileData(filename, file_extension, lines, error, - extra_check_functions=[]): - """Performs lint checks and reports any errors to the given error function. - - Args: - filename: Filename of the file that is being processed. - file_extension: The extension (dot not included) of the file. - lines: An array of strings, each representing a line of the file, with the - last element being empty if the file is terminated with a newline. - error: A callable to which errors are reported, which takes 4 arguments: - filename, line number, error level, and message - extra_check_functions: An array of additional check functions that will be - run on each source line. Each function takes 4 - arguments: filename, clean_lines, line, error - """ - lines = (['// marker so line numbers and indices both start at 1'] + lines + - ['// marker so line numbers end in a known way']) - - include_state = _IncludeState() - function_state = _FunctionState() - nesting_state = NestingState() - - ResetNolintSuppressions() - - CheckForCopyright(filename, lines, error) - - RemoveMultiLineComments(filename, lines, error) - clean_lines = CleansedLines(lines) - - if file_extension == 'h': - CheckForHeaderGuard(filename, clean_lines, error) - - for line in xrange(clean_lines.NumLines()): - ProcessLine(filename, file_extension, clean_lines, line, - include_state, function_state, nesting_state, error, - extra_check_functions) - FlagCxx11Features(filename, clean_lines, line, error) - nesting_state.CheckCompletedBlocks(filename, error) - - CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) - - # Check that the .cc file has included its header if it exists. - if file_extension == 'cc': - CheckHeaderFileIncluded(filename, include_state, error) - - # We check here rather than inside ProcessLine so that we see raw - # lines rather than "cleaned" lines. - CheckForBadCharacters(filename, lines, error) - - CheckForNewlineAtEOF(filename, lines, error) - -def ProcessConfigOverrides(filename): - """ Loads the configuration files and processes the config overrides. - - Args: - filename: The name of the file being processed by the linter. - - Returns: - False if the current |filename| should not be processed further. - """ - - abs_filename = os.path.abspath(filename) - cfg_filters = [] - keep_looking = True - while keep_looking: - abs_path, base_name = os.path.split(abs_filename) - if not base_name: - break # Reached the root directory. - - cfg_file = os.path.join(abs_path, "CPPLINT.cfg") - abs_filename = abs_path - if not os.path.isfile(cfg_file): - continue - - try: - with open(cfg_file) as file_handle: - for line in file_handle: - line, _, _ = line.partition('#') # Remove comments. - if not line.strip(): - continue - - name, _, val = line.partition('=') - name = name.strip() - val = val.strip() - if name == 'set noparent': - keep_looking = False - elif name == 'filter': - cfg_filters.append(val) - elif name == 'exclude_files': - # When matching exclude_files pattern, use the base_name of - # the current file name or the directory name we are processing. - # For example, if we are checking for lint errors in /foo/bar/baz.cc - # and we found the .cfg file at /foo/CPPLINT.cfg, then the config - # file's "exclude_files" filter is meant to be checked against "bar" - # and not "baz" nor "bar/baz.cc". - if base_name: - pattern = re.compile(val) - if pattern.match(base_name): - sys.stderr.write('Ignoring "%s": file excluded by "%s". ' - 'File path component "%s" matches ' - 'pattern "%s"\n' % - (filename, cfg_file, base_name, val)) - return False - elif name == 'linelength': - global _line_length - try: - _line_length = int(val) - except ValueError: - sys.stderr.write('Line length must be numeric.') - else: - sys.stderr.write( - 'Invalid configuration option (%s) in file %s\n' % - (name, cfg_file)) - - except IOError: - sys.stderr.write( - "Skipping config file '%s': Can't open for reading\n" % cfg_file) - keep_looking = False - - # Apply all the accumulated filters in reverse order (top-level directory - # config options having the least priority). - for filter in reversed(cfg_filters): - _AddFilters(filter) - - return True - - -def ProcessFile(filename, vlevel, extra_check_functions=[]): - """Does google-lint on a single file. - - Args: - filename: The name of the file to parse. - - vlevel: The level of errors to report. Every error of confidence - >= verbose_level will be reported. 0 is a good default. - - extra_check_functions: An array of additional check functions that will be - run on each source line. Each function takes 4 - arguments: filename, clean_lines, line, error - """ - - _SetVerboseLevel(vlevel) - _BackupFilters() - - if not ProcessConfigOverrides(filename): - _RestoreFilters() - return - - lf_lines = [] - crlf_lines = [] - try: - # Support the UNIX convention of using "-" for stdin. Note that - # we are not opening the file with universal newline support - # (which codecs doesn't support anyway), so the resulting lines do - # contain trailing '\r' characters if we are reading a file that - # has CRLF endings. - # If after the split a trailing '\r' is present, it is removed - # below. - if filename == '-': - lines = codecs.StreamReaderWriter(sys.stdin, - codecs.getreader('utf8'), - codecs.getwriter('utf8'), - 'replace').read().split('\n') - else: - lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') - - # Remove trailing '\r'. - # The -1 accounts for the extra trailing blank line we get from split() - for linenum in range(len(lines) - 1): - if lines[linenum].endswith('\r'): - lines[linenum] = lines[linenum].rstrip('\r') - crlf_lines.append(linenum + 1) - else: - lf_lines.append(linenum + 1) - - except IOError: - sys.stderr.write( - "Skipping input '%s': Can't open for reading\n" % filename) - _RestoreFilters() - return - - # Note, if no dot is found, this will give the entire filename as the ext. - file_extension = filename[filename.rfind('.') + 1:] - - # When reading from stdin, the extension is unknown, so no cpplint tests - # should rely on the extension. - if filename != '-' and file_extension not in _valid_extensions: - sys.stderr.write('Ignoring %s; not a valid file name ' - '(%s)\n' % (filename, ', '.join(_valid_extensions))) - else: - ProcessFileData(filename, file_extension, lines, Error, - extra_check_functions) - - # If end-of-line sequences are a mix of LF and CR-LF, issue - # warnings on the lines with CR. - # - # Don't issue any warnings if all lines are uniformly LF or CR-LF, - # since critique can handle these just fine, and the style guide - # doesn't dictate a particular end of line sequence. - # - # We can't depend on os.linesep to determine what the desired - # end-of-line sequence should be, since that will return the - # server-side end-of-line sequence. - if lf_lines and crlf_lines: - # Warn on every line with CR. An alternative approach might be to - # check whether the file is mostly CRLF or just LF, and warn on the - # minority, we bias toward LF here since most tools prefer LF. - for linenum in crlf_lines: - Error(filename, linenum, 'whitespace/newline', 1, - 'Unexpected \\r (^M) found; better to use only \\n') - - sys.stderr.write('Done processing %s\n' % filename) - _RestoreFilters() - - -def PrintUsage(message): - """Prints a brief usage string and exits, optionally with an error message. - - Args: - message: The optional error message. - """ - sys.stderr.write(_USAGE) - if message: - sys.exit('\nFATAL ERROR: ' + message) - else: - sys.exit(1) - - -def PrintCategories(): - """Prints a list of all the error-categories used by error messages. - - These are the categories used to filter messages via --filter. - """ - sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) - sys.exit(0) - - -def ParseArguments(args): - """Parses the command line arguments. - - This may set the output format and verbosity level as side-effects. - - Args: - args: The command line arguments: - - Returns: - The list of filenames to lint. - """ - try: - (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', - 'counting=', - 'filter=', - 'root=', - 'linelength=', - 'extensions=']) - except getopt.GetoptError: - PrintUsage('Invalid arguments.') - - verbosity = _VerboseLevel() - output_format = _OutputFormat() - filters = '' - counting_style = '' - - for (opt, val) in opts: - if opt == '--help': - PrintUsage(None) - elif opt == '--output': - if val not in ('emacs', 'vs7', 'eclipse'): - PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') - output_format = val - elif opt == '--verbose': - verbosity = int(val) - elif opt == '--filter': - filters = val - if not filters: - PrintCategories() - elif opt == '--counting': - if val not in ('total', 'toplevel', 'detailed'): - PrintUsage('Valid counting options are total, toplevel, and detailed') - counting_style = val - elif opt == '--root': - global _root - _root = val - elif opt == '--linelength': - global _line_length - try: - _line_length = int(val) - except ValueError: - PrintUsage('Line length must be digits.') - elif opt == '--extensions': - global _valid_extensions - try: - _valid_extensions = set(val.split(',')) - except ValueError: - PrintUsage('Extensions must be comma seperated list.') - - if not filenames: - PrintUsage('No files were specified.') - - _SetOutputFormat(output_format) - _SetVerboseLevel(verbosity) - _SetFilters(filters) - _SetCountingStyle(counting_style) - - return filenames - - -def main(): - filenames = ParseArguments(sys.argv[1:]) - - # Change stderr to write with replacement characters so we don't die - # if we try to print something containing non-ASCII characters. - sys.stderr = codecs.StreamReaderWriter(sys.stderr, - codecs.getreader('utf8'), - codecs.getwriter('utf8'), - 'replace') - - _cpplint_state.ResetErrorCounts() - for filename in filenames: - ProcessFile(filename, _cpplint_state.verbose_level) - _cpplint_state.PrintErrorCounts() - - sys.exit(_cpplint_state.error_count > 0) - - -if __name__ == '__main__': - main() diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deploy.enc b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deploy.enc deleted file mode 100644 index d419f9e1..00000000 Binary files a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deploy.enc and /dev/null differ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka.gyp b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka.gyp deleted file mode 100644 index 6093d7b6..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka.gyp +++ /dev/null @@ -1,62 +0,0 @@ -{ - 'targets': [ - { - "target_name": "librdkafka", - "type": "none", - "conditions": [ - [ - 'OS=="win"', - { - }, - { - "actions": [ - { - "action_name": "configure", - "inputs": [], - "outputs": [ - "librdkafka/config.h", - ], - "action": [ - "node", "../util/configure" - ] - }, - { - "action_name": "build_dependencies", - "inputs": [ - "librdkafka/config.h", - ], - "action": [ - "make", "-C", "librdkafka", "libs", "install" - ], - "conditions": [ - [ - 'OS=="mac"', - { - 'outputs': [ - 'deps/librdkafka/src-cpp/librdkafka++.dylib', - 'deps/librdkafka/src-cpp/librdkafka++.1.dylib', - 'deps/librdkafka/src/librdkafka.dylib', - 'deps/librdkafka/src/librdkafka.1.dylib' - ], - }, - { - 'outputs': [ - 'deps/librdkafka/src-cpp/librdkafka++.so', - 'deps/librdkafka/src-cpp/librdkafka++.so.1', - 'deps/librdkafka/src/librdkafka.so', - 'deps/librdkafka/src/librdkafka.so.1', - 'deps/librdkafka/src-cpp/librdkafka++.a', - 'deps/librdkafka/src/librdkafka.a', - ], - } - ] - ], - } - ] - } - - ] - ] - } - ] -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.appveyor.yml b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.appveyor.yml deleted file mode 100644 index b2151341..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.appveyor.yml +++ /dev/null @@ -1,105 +0,0 @@ -version: 1.8.0-R-post{build} -pull_requests: - do_not_increment_build_number: true -image: Visual Studio 2015 -configuration: Release -environment: - runtime: v140 - matrix: - - platform: x64 - arch: x64 - - platform: win32 - arch: x86 -install: -- ps: "& .\\win32\\install-coapp.ps1" - # Update vcpkg (is outdated on the VS 2015 image) -- cmd: | - cd "C:\Tools\vcpkg" - git pull -q - .\bootstrap-vcpkg.bat - cd %appveyor_build_folder% -cache: - - c:\tools\vcpkg\installed - - C:\Users\appveyor\AppData\Local\vcpkg\archives - - C:\Users\appveyor\AppData\Local\vcpkg\installed -nuget: - account_feed: true - project_feed: true - disable_publish_on_pr: true -before_build: - - cmd: vcpkg --feature-flags=versions install --triplet %arch%-windows -build: - project: win32/librdkafka.sln - publish_nuget: true - publish_nuget_symbols: true - include_nuget_references: true - parallel: true - verbosity: normal -test_script: -- cmd: cd tests && ..\win32\outdir\%runtime%\%PLATFORM%\%CONFIGURATION%\tests.exe -l -Q -p1 && cd .. -artifacts: -- path: test_report*.json - name: Test report -- path: '*.nupkg' - name: Packages -- path: '**\*.dll' - name: Libraries -- path: '**\*.lib' - name: Libraries -- path: '**\*.pdb' - name: Libraries -- path: '**\*.exe' - name: Executables -#before_deploy: -after_test: -- ps: >- - # FIXME: Add to Deployment condition above: - - # APPVEYOR_REPO_TAG = true - - - - # This is the CoApp .autopkg file to create. - - $autopkgFile = "win32/librdkafka.autopkg" - - pwd - - - ls $autopkgFile - - - - # Get the ".autopkg.template" file, replace "@version" with the Appveyor version number, then save to the ".autopkg" file. - - cat ($autopkgFile + ".template") | % { $_ -replace "@version", $env:appveyor_build_version } > $autopkgFile - - - # Use the CoApp tools to create NuGet native packages from the .autopkg. - - Write-NuGetPackage $autopkgFile - - - # Push all newly created .nupkg files as Appveyor artifacts for later deployment. - - Get-ChildItem .\*.nupkg | % { Push-AppveyorArtifact $_.FullName -FileName $_.Name } -deploy: -- provider: S3 - access_key_id: - secure: 3SmFFB3J1WWjLqxouvH8zLdcmrFNVHHbkROb+2BBVJE= - secret_access_key: - secure: VT0D5uzlaJI6gfZbemKCnf0MMh6qnlcmioVADK0oCkW6syz+n17VzWScRjvAifPm - region: us-west-1 - bucket: librdkafka-ci-packages - folder: librdkafka/p-librdkafka__bld-appveyor__plat-windows__arch-$(platform)__bldtype-$(configuration)__tag-$(APPVEYOR_REPO_TAG_NAME)__sha-$(APPVEYOR_REPO_COMMIT)__bid-$(APPVEYOR_BUILD_ID) - artifact: /.*\.(nupkg)/ - max_error_retry: 3 - on: - APPVEYOR_REPO_TAG: true -notifications: -- provider: Email - to: - - magnus@edenhill.se - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.dir-locals.el b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.dir-locals.el deleted file mode 100644 index b8c8f1e7..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.dir-locals.el +++ /dev/null @@ -1,10 +0,0 @@ -((nil - (compile-command . "LC_ALL=C make -C $(git rev-parse --show-toplevel) -kw -j")) - (c-mode - (c-file-style . "linux") - (tab-width . 8) - (indent-tabs-mode . nil)) -) - -(if (file-exists-p (concat (dir-locals-find-file "./") "TAGS")) - (visit-tags-table (concat (dir-locals-find-file "./") "TAGS"))) diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.doozer.json b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.doozer.json deleted file mode 100644 index 47c52ba8..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.doozer.json +++ /dev/null @@ -1,114 +0,0 @@ -{ - "targets": { - "xenial-amd64": { - - "buildenv": "xenial-amd64", - "builddeps": [ - "build-essential", - "python3", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "libzstd-dev" - ], - "buildcmd": [ - "./configure", - "make -j ${PARALLEL}", - "make -C tests build" - ], - "testcmd": [ - "make -C tests run_local_quick" - ], - }, - - "xenial-i386": { - "_comment": "including liblz4-dev here to verify that WITH_LZ4_EXT works", - "buildenv": "xenial-i386", - "builddeps": [ - "build-essential", - "python3", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "liblz4-dev", - "libzstd-dev" - ], - "buildcmd": [ - "./configure", - "make -j ${PARALLEL}", - "make -C tests build" - ], - "testcmd": [ - "make -C tests run_local_quick" - ], - }, - - "xenial-armhf": { - - "buildenv": "xenial-armhf", - "builddeps": [ - "build-essential", - "python3", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "libzstd-dev" - ], - "buildcmd": [ - "./configure", - "make -j ${PARALLEL}", - "make -j ${PARALLEL} -C tests build", - ], - "testcmd": [ - "cd tests", - "./run-test.sh -p1 -l", - "cd .." - ], - }, - - "stretch-mips": { - - "buildenv": "stretch-mips", - "builddeps": [ - "build-essential", - "python3", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "libzstd-dev" - ], - "buildcmd": [ - "./configure", - "make -j ${PARALLEL}", - "make -j ${PARALLEL} -C tests build", - ], - "testcmd": [ - "cd tests", - "./run-test.sh -p1 -l", - "cd .." - ], - }, - - "cmake-xenial-amd64": { - - "buildenv": "xenial-amd64", - "builddeps": [ - "build-essential", - "python3", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "cmake" - ], - "buildcmd": [ - "cmake -H. -B_builds -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_BUILD_TYPE=Debug", - "cmake --build _builds", - ], - "testcmd": [ - "cd _builds", - "ctest -VV -R RdKafkaTestBrokerLess" - ], - } - }, - "artifacts": ["config.log", "Makefile.config", "config.h"] -} diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.gdbmacros b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.gdbmacros deleted file mode 100644 index a04366fd..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.gdbmacros +++ /dev/null @@ -1,19 +0,0 @@ - -# Print rd_kafka_msgq_t -define dump_msgq - set $rkmq = $arg0 - set $rkm = $rkmq.rkmq_msgs.tqh_first - set $exp_msgid = 0 - set $cnt = 0 - while $rkm != 0 - set $msgid = $rkm.rkm_u.producer.msgid - printf "#%d ((rd_kafka_msgq_t *)%p) msgid %llu\n", $cnt, $rkm, $msgid - if $exp_msgid != 0 && $exp_msgid != $msgid - printf " ^ WARNING: expected msgid %llu, not %llu\n", $exp_msgid, $msgid - end - set $exp_msgid = $msgid + 1 - set $rkm = $rkm.rkm_link.tqe_next - set $cnt++ - end -end - diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.travis.yml b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.travis.yml deleted file mode 100644 index f5a8d997..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/.travis.yml +++ /dev/null @@ -1,162 +0,0 @@ -language: c -dist: xenial -cache: ccache - -addons: - apt: - packages: - - python3 - - python3-pip - - python3-setuptools - # required by openssl installer - - perl - -env: - global: - - secure: "q7DQ6KCiQyMEpBf8mxPFl6hY9JEoaOUdIaLh1IuYn5TctiNIA+J6O/bL/dyDSy2Yjor61WAiiMOh77eMykm1wPl72kqjR97ui0uCq7BQQn4MWtKrXXi0eWLF3bYt2FbUGJZvrM0xeoWzSYT6np7CKu8ssgL8Fvr4bmf152IpdQ8=" - - secure: "XpFExynXwbSr6vTuGsZVyqF4sti+UmRxX2sztjpTdaIH0yo60d6KYT0SRW7BLdZNA6/XI1l1GPTAwcDwTM1XasnnFrD7i88uZsAneA/xEgZTGXtnVVWPJAcVoX/75Rxeibc8CfSc5MO9QmBMiGGuI3S6HHCj4RzCJacBhOjIhfA=" - -matrix: - include: - - name: "Linux GCC: +Debian packages +BuiltinRegex +Strict" - if: tag IS present - os: linux - compiler: gcc - env: ADDITIONAL_BUILDS="debian" LINKAGE=std - before_script: - - ./configure --install-deps --disable-lz4-ext --disable-regex-ext --prefix="$PWD/dest" --enable-strip - - - name: "RPM packages" - if: tag IS present - os: linux - compiler: gcc - env: ADDITIONAL_BUILDS="centos" SKIP_MAKE=y - - - name: "Linux clang: +alpine +manylinux +werror" - os: linux - compiler: clang - env: ADDITIONAL_BUILDS="alpine manylinux2010_x86_64" LINKAGE=std - before_script: - - ./configure --install-deps --disable-lz4-ext --prefix="$PWD/dest" --enable-werror --enable-strip - - - name: "Linux clang: +static +alpine-static -gssapi" - if: tag IS present - os: linux - compiler: clang - env: ADDITIONAL_BUILDS="alpine-static" LINKAGE=static - before_script: - - ./configure --enable-static --install-deps --source-deps-only --disable-gssapi --disable-lz4-ext --prefix="$PWD/dest" --enable-strip - - - name: "OSX GCC" - if: tag IS PRESENT - os: osx - compiler: gcc - env: LINKAGE=std HOMEBREW_NO_AUTO_UPDATE=1 - before_script: - - ./configure --install-deps --source-deps-only --disable-lz4-ext --prefix="$PWD/dest" --enable-strip - - - name: "OSX clang: +static" - if: tag IS PRESENT - os: osx - compiler: clang - env: LINKAGE=static HOMEBREW_NO_AUTO_UPDATE=1 - before_script: - - ./configure --install-deps --source-deps-only --disable-lz4-ext --prefix="$PWD/dest" --enable-static --enable-strip - - - name: "Windows MinGW-w64 Dynamic" - if: tag IS PRESENT - os: windows - env: - - LINKAGE=std - - SKIP_MAKE=y - before_install: - - source ./packaging/mingw-w64/travis-before-install.sh - before_script: - - ./packaging/mingw-w64/configure-build-msys2-mingw.sh - - - name: "Windows MinGW-w64 Static" - os: windows - env: - - LINKAGE=static - - SKIP_MAKE=y - before_install: - - source ./packaging/mingw-w64/travis-before-install.sh - before_script: - - ./packaging/mingw-w64/configure-build-msys2-mingw-static.sh - - - name: "Linux GCC: +integration-tests +copyright-check +doc-check +devel +code-cov +c99 +c++98" - os: linux - dist: xenial - compiler: gcc - env: NO_ARTIFACTS=y RUN_INTEGRATION_TESTS=y COPYRIGHT_CHECK=y DOC_CHECK=y - before_script: - - wget -O rapidjson-dev.deb https://launchpad.net/ubuntu/+archive/primary/+files/rapidjson-dev_1.1.0+dfsg2-3_all.deb - - sudo dpkg -i rapidjson-dev.deb - - sudo pip3 install -r tests/requirements.txt - - sudo apt update - - sudo apt install -y doxygen graphviz gdb - - ./configure --CFLAGS="-std=c99" --CXXFLAGS="-std=c++98" --install-deps --enable-devel --disable-lz4-ext --prefix="$PWD/dest" - - ./packaging/tools/rdutcoverage.sh - - - name: "Linux GCC arm64: +static -gssapi" - os: linux - arch: arm64 - dist: bionic - compiler: gcc - env: LINKAGE=std - before_script: - - ./configure --disable-gssapi --install-deps --source-deps-only --enable-static --disable-lz4-ext --prefix="$PWD/dest" --enable-strip - - - name: "Linux GCC s390x: +devel" - if: tag IS PRESENT - os: linux - arch: s390x - dist: bionic - compiler: gcc - env: NO_ARTIFACTS=y - before_script: - - sudo apt install -y gdb - - ./configure --enable-devel --disable-lz4-ext --prefix="$PWD/dest" - -install: - - ccache -s || echo "CCache is not available." - - rm -rf artifacts dest - - mkdir dest artifacts - - if [[ $TRAVIS_OS_NAME == "linux" ]]; then sudo apt update || true; fi - - if [[ $TRAVIS_DIST == "trusty" || $TRAVIS_DIST == "xenial" ]]; then sudo apt-get install -y libssl1.0.0 libssl-dev ; fi - - if [[ $TRAVIS_DIST == "bionic" || $TRAVIS_DIST == "focal" ]]; then sudo apt-get install -y libssl1.1 libssl-dev ; fi - - if [[ -n $DOCKER_PASSWORD && $TRAVIS_OS_NAME == "linux" ]]; then echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin ; fi - -before_cache: - - if [[ $TRAVIS_OS_NAME == windows ]]; then $msys2 pacman --sync --clean --noconfirm ; fi - -script: -- if [[ $SKIP_MAKE != y ]]; then (make -j2 all examples check && make -j2 -C tests build) || travis_terminate 1 ; fi -# Tag: Run quick local test suite on -# No tag: Run unit tests. -- if [[ $SKIP_MAKE != y && $RUN_INTEGRATION_TESTS != y ]]; then if [[ -n $TRAVIS_TAG ]]; then make -C tests run_local_quick; else make -C tests unit ; fi ; fi -- if [[ $SKIP_MAKE != y ]]; then make install || travis_terminate 1 ; fi -- if [[ -z $NO_ARTIFACTS ]]; then (cd dest && tar cvzf ../artifacts/librdkafka-${CC}.tar.gz .) ; fi -- if [[ -n $TRAVIS_TAG ]]; then for distro in $ADDITIONAL_BUILDS ; do packaging/tools/distro-build.sh $distro --enable-strip || travis_terminate 1 ; done ; fi -- if [[ $COPYRIGHT_CHECK == y ]]; then make copyright-check || travis_terminate 1; fi -- if [[ $DOC_CHECK == y ]]; then make docs || travis_terminate 1 ; fi -- if [[ -z $TRAVIS_TAG && $RUN_INTEGRATION_TESTS == y ]]; then (cd tests && travis_retry ./interactive_broker_version.py -c "make quick" 2.7.0) || travis_terminate 1 ; fi -- if [[ -f tests/core ]] && (which gdb >/dev/null); then (cd tests && LD_LIBRARY_PATH=../src:../src-cpp gdb ./test-runner core < backtrace.gdb) ; fi -- sha256sum artifacts/* || true - -deploy: - provider: s3 - access_key_id: - secure: "sRsKY1YoPDb3b+9hHnBv4tDSdyB/FraYEKI1/+aKmqWxvOI6xYYFFP0Tvn6f4Rgk0wzYmxO/5V+cR+fmKxVhb1pItFXOdVqML0ilOTP5gtlOPUeHu9fytqw3q7GgMV8JR75g60BNVko9vZegtd2LIq6FWzAIvPSUJOAw7qekjGU=" - secret_access_key: - secure: "ZDjH6Z9CJr2yo7Splm+0xpo30QbO+cpeqxFUn1d9XOyLZQ0dapr6iboxdPlJaCOIhqVUWXS0IJgFwCW+5vWb9Za6tFumP1MtJGiwE6bqr820G8E02umwSvbNijr44h+EyxQcxP71Ljjk22Pfu7SLKWqMJ/iIzcYe6Z6Sz8obSWA=" - bucket: librdkafka-ci-packages - region: us-west-1 - skip_cleanup: true - local-dir: artifacts - upload-dir: librdkafka/p-librdkafka__bld-travis__plat-${TRAVIS_OS_NAME}__arch-${TRAVIS_CPU_ARCH}__tag-${TRAVIS_TAG}__sha-${TRAVIS_COMMIT}__bid-${TRAVIS_JOB_NUMBER}__lnk-${LINKAGE} - on: - repo: edenhill/librdkafka - all_branches: true - tags: true - condition: $NO_ARTIFACTS != y diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CHANGELOG.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CHANGELOG.md deleted file mode 100644 index 0387cd0e..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CHANGELOG.md +++ /dev/null @@ -1,776 +0,0 @@ -# librdkafka v1.8.2 - -librdkafka v1.8.2 is a maintenance release. - -## Enhancements - - * Added `ssl.ca.pem` to add CA certificate by PEM string. (#2380) - * Prebuilt binaries for Mac OSX now contain statically linked OpenSSL v1.1.1l. - Previously the OpenSSL version was either v1.1.1 or v1.0.2 depending on - build type. - -## Fixes - - * The `librdkafka.redist` 1.8.0 package had two flaws: - - the linux-arm64 .so build was a linux-x64 build. - - the included Windows MSVC 140 runtimes for x64 were infact x86. - The release script has been updated to verify the architectures of - provided artifacts to avoid this happening in the future. - * Prebuilt binaries for Mac OSX Sierra (10.12) and older are no longer provided. - This affects [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go). - * Some of the prebuilt binaries for Linux were built on Ubuntu 14.04, - these builds are now performed on Ubuntu 16.04 instead. - This may affect users on ancient Linux distributions. - * It was not possible to configure `ssl.ca.location` on OSX, the property - would automatically revert back to `probe` (default value). - This regression was introduced in v1.8.0. (#3566) - * librdkafka's internal timers would not start if the timeout was set to 0, - which would result in some timeout operations not being enforced correctly, - e.g., the transactional producer API timeouts. - These timers are now started with a timeout of 1 microsecond. - -### Transactional producer fixes - - * Upon quick repeated leader changes the transactional producer could receive - an `OUT_OF_ORDER_SEQUENCE` error from the broker, which triggered an - Epoch bump on the producer resulting in an InitProducerIdRequest being sent - to the transaction coordinator in the middle of a transaction. - This request would start a new transaction on the coordinator, but the - producer would still think (erroneously) it was in current transaction. - Any messages produced in the current transaction prior to this event would - be silently lost when the application committed the transaction, leading - to message loss. - This has been fixed by setting the Abortable transaction error state - in the producer. #3575. - * The transactional producer could stall during a transaction if the transaction - coordinator changed while adding offsets to the transaction (send_offsets_to_transaction()). - This stall lasted until the coordinator connection went down, the - transaction timed out, transaction was aborted, or messages were produced - to a new partition, whichever came first. #3571. - - - -*Note: there was no v1.8.1 librdkafka release* - - -# librdkafka v1.8.0 - -librdkafka v1.8.0 is a security release: - - * Upgrade bundled zlib version from 1.2.8 to 1.2.11 in the `librdkafka.redist` - NuGet package. The updated zlib version fixes CVEs: - CVE-2016-9840, CVE-2016-9841, CVE-2016-9842, CVE-2016-9843 - See https://github.com/edenhill/librdkafka/issues/2934 for more information. - * librdkafka now uses [vcpkg](https://vcpkg.io/) for up-to-date Windows - dependencies in the `librdkafka.redist` NuGet package: - OpenSSL 1.1.1l, zlib 1.2.11, zstd 1.5.0. - * The upstream dependency (OpenSSL, zstd, zlib) source archive checksums are - now verified when building with `./configure --install-deps`. - These builds are used by the librdkafka builds bundled with - confluent-kafka-go, confluent-kafka-python and confluent-kafka-dotnet. - - -## Enhancements - - * Producer `flush()` now overrides the `linger.ms` setting for the duration - of the `flush()` call, effectively triggering immediate transmission of - queued messages. (#3489) - -## Fixes - -### General fixes - - * Correctly detect presence of zlib via compilation check. (Chris Novakovic) - * `ERR__ALL_BROKERS_DOWN` is no longer emitted when the coordinator - connection goes down, only when all standard named brokers have been tried. - This fixes the issue with `ERR__ALL_BROKERS_DOWN` being triggered on - `consumer_close()`. It is also now only emitted if the connection was fully - up (past handshake), and not just connected. - * `rd_kafka_query_watermark_offsets()`, `rd_kafka_offsets_for_times()`, - `consumer_lag` metric, and `auto.offset.reset` now honour - `isolation.level` and will return the Last Stable Offset (LSO) - when `isolation.level` is set to `read_committed` (default), rather than - the uncommitted high-watermark when it is set to `read_uncommitted`. (#3423) - * SASL GSSAPI is now usable when `sasl.kerberos.min.time.before.relogin` - is set to 0 - which disables ticket refreshes (by @mpekalski, #3431). - * Rename internal crc32c() symbol to rd_crc32c() to avoid conflict with - other static libraries (#3421). - * `txidle` and `rxidle` in the statistics object was emitted as 18446744073709551615 when no idle was known. -1 is now emitted instead. (#3519) - - -### Consumer fixes - - * Automatically retry offset commits on `ERR_REQUEST_TIMED_OUT`, - `ERR_COORDINATOR_NOT_AVAILABLE`, and `ERR_NOT_COORDINATOR` (#3398). - Offset commits will be retried twice. - * Timed auto commits did not work when only using assign() and not subscribe(). - This regression was introduced in v1.7.0. - * If the topics matching the current subscription changed (or the application - updated the subscription) while there was an outstanding JoinGroup or - SyncGroup request, an additional request would sometimes be sent before - handling the response of the first. This in turn lead to internal state - issues that could cause a crash or malbehaviour. - The consumer will now wait for any outstanding JoinGroup or SyncGroup - responses before re-joining the group. - * `auto.offset.reset` could previously be triggered by temporary errors, - such as disconnects and timeouts (after the two retries are exhausted). - This is now fixed so that the auto offset reset policy is only triggered - for permanent errors. - * The error that triggers `auto.offset.reset` is now logged to help the - application owner identify the reason of the reset. - * If a rebalance takes longer than a consumer's `session.timeout.ms`, the - consumer will remain in the group as long as it receives heartbeat responses - from the broker. - - -### Admin fixes - - * `DeleteRecords()` could crash if one of the underlying requests - (for a given partition leader) failed at the transport level (e.g., timeout). - (#3476). - - - -# librdkafka v1.7.0 - -librdkafka v1.7.0 is feature release: - - * [KIP-360](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=89068820) - Improve reliability of transactional producer. - Requires Apache Kafka 2.5 or later. - * OpenSSL Engine support (`ssl.engine.location`) by @adinigam and @ajbarb. - - -## Enhancements - - * Added `connections.max.idle.ms` to automatically close idle broker - connections. - This feature is disabled by default unless `bootstrap.servers` contains - the string `azure` in which case the default is set to <4 minutes to improve - connection reliability and circumvent limitations with the Azure load - balancers (see #3109 for more information). - * Bumped to OpenSSL 1.1.1k in binary librdkafka artifacts. - * The binary librdkafka artifacts for Alpine are now using Alpine 3.12. - OpenSSL 1.1.1k. - * Improved static librdkafka Windows builds using MinGW (@neptoess, #3130). - * The `librdkafka.redist` NuGet package now has updated zlib, zstd and - OpenSSL versions (from vcpkg). - - -## Security considerations - - * The zlib version bundled with the `librdkafka.redist` NuGet package has now been upgraded - from zlib 1.2.8 to 1.2.11, fixing the following CVEs: - * CVE-2016-9840: undefined behaviour (compiler dependent) in inflate (decompression) code: this is used by the librdkafka consumer. Risk of successfully exploitation through consumed messages is eastimated very low. - * CVE-2016-9841: undefined behaviour (compiler dependent) in inflate code: this is used by the librdkafka consumer. Risk of successfully exploitation through consumed messages is eastimated very low. - * CVE-2016-9842: undefined behaviour in inflateMark(): this API is not used by librdkafka. - * CVE-2016-9843: issue in crc32_big() which is called from crc32_z(): this API is not used by librdkafka. - -## Upgrade considerations - - * The C++ `oauthbearer_token_refresh_cb()` was missing a `Handle *` - argument that has now been added. This is a breaking change but the original - function signature is considered a bug. - This change only affects C++ OAuth developers. - * [KIP-735](https://cwiki.apache.org/confluence/display/KAFKA/KIP-735%3A+Increase+default+consumer+session+timeout) The consumer `session.timeout.ms` - default was changed from 10 to 45 seconds to make consumer groups more - robust and less sensitive to temporary network and cluster issues. - * Statistics: `consumer_lag` is now using the `committed_offset`, - while the new `consumer_lag_stored` is using `stored_offset` - (offset to be committed). - This is more correct than the previous `consumer_lag` which was using - either `committed_offset` or `app_offset` (last message passed - to application). - * The `librdkafka.redist` NuGet package is now built with MSVC runtime v140 - (VS 2015). Previous versions were built with MSVC runtime v120 (VS 2013). - - -## Fixes - -### General fixes - - * Fix accesses to freed metadata cache mutexes on client termination (#3279) - * There was a race condition on receiving updated metadata where a broker id - update (such as bootstrap to proper broker transformation) could finish after - the topic metadata cache was updated, leading to existing brokers seemingly - being not available. - One occurrence of this issue was query_watermark_offsets() that could return - `ERR__UNKNOWN_PARTITION` for existing partitions shortly after the - client instance was created. - * The OpenSSL context is now initialized with `TLS_client_method()` - (on OpenSSL >= 1.1.0) instead of the deprecated and outdated - `SSLv23_client_method()`. - * The initial cluster connection on client instance creation could sometimes - be delayed up to 1 second if a `group.id` or `transactional.id` - was configured (#3305). - * Speed up triggering of new broker connections in certain cases by exiting - the broker thread io/op poll loop when a wakeup op is received. - * SASL GSSAPI: The Kerberos kinit refresh command was triggered from - `rd_kafka_new()` which made this call blocking if the refresh command - was taking long. The refresh is now performed by the background rdkafka - main thread. - * Fix busy-loop (100% CPU on the broker threads) during the handshake phase - of an SSL connection. - * Disconnects during SSL handshake are now propagated as transport errors - rather than SSL errors, since these disconnects are at the transport level - (e.g., incorrect listener, flaky load balancer, etc) and not due to SSL - issues. - * Increment metadata fast refresh interval backoff exponentially (@ajbarb, #3237). - * Unthrottled requests are no longer counted in the `brokers[].throttle` - statistics object. - * Log CONFWARN warning when global topic configuration properties - are overwritten by explicitly setting a `default_topic_conf`. - -### Consumer fixes - - * If a rebalance happened during a `consume_batch..()` call the already - accumulated messages for revoked partitions were not purged, which would - pass messages to the application for partitions that were no longer owned - by the consumer. Fixed by @jliunyu. #3340. - * Fix balancing and reassignment issues with the cooperative-sticky assignor. - #3306. - * Fix incorrect detection of first rebalance in sticky assignor (@hallfox). - * Aborted transactions with no messages produced to a partition could - cause further successfully committed messages in the same Fetch response to - be ignored, resulting in consumer-side message loss. - A log message along the lines `Abort txn ctrl msg bad order at offset - 7501: expected before or at 7702: messages in aborted transactions may be delivered to the application` - would be seen. - This is a rare occurrence where a transactional producer would register with - the partition but not produce any messages before aborting the transaction. - * The consumer group deemed cached metadata up to date by checking - `topic.metadata.refresh.interval.ms`: if this property was set too low - it would cause cached metadata to be unusable and new metadata to be fetched, - which could delay the time it took for a rebalance to settle. - It now correctly uses `metadata.max.age.ms` instead. - * The consumer group timed auto commit would attempt commits during rebalances, - which could result in "Illegal generation" errors. This is now fixed, the - timed auto committer is only employed in the steady state when no rebalances - are taking places. Offsets are still auto committed when partitions are - revoked. - * Retriable FindCoordinatorRequest errors are no longer propagated to - the application as they are retried automatically. - * Fix rare crash (assert `rktp_started`) on consumer termination - (introduced in v1.6.0). - * Fix unaligned access and possibly corrupted snappy decompression when - building with MSVC (@azat) - * A consumer configured with the `cooperative-sticky` assignor did - not actively Leave the group on unsubscribe(). This delayed the - rebalance for the remaining group members by up to `session.timeout.ms`. - * The current subscription list was sometimes leaked when unsubscribing. - -### Producer fixes - - * The timeout value of `flush()` was not respected when delivery reports - were scheduled as events (such as for confluent-kafka-go) rather than - callbacks. - * There was a race conditition in `purge()` which could cause newly - created partition objects, or partitions that were changing leaders, to - not have their message queues purged. This could cause - `abort_transaction()` to time out. This issue is now fixed. - * In certain high-thruput produce rate patterns producing could stall for - 1 second, regardless of `linger.ms`, due to rate-limiting of internal - queue wakeups. This is now fixed by not rate-limiting queue wakeups but - instead limiting them to one wakeup per queue reader poll. #2912. - -### Transactional Producer fixes - - * KIP-360: Fatal Idempotent producer errors are now recoverable by the - transactional producer and will raise a `txn_requires_abort()` error. - * If the cluster went down between `produce()` and `commit_transaction()` - and before any partitions had been registered with the coordinator, the - messages would time out but the commit would succeed because nothing - had been sent to the coordinator. This is now fixed. - * If the current transaction failed while `commit_transaction()` was - checking the current transaction state an invalid state transaction could - occur which in turn would trigger a assertion crash. - This issue showed up as "Invalid txn state transition: .." crashes, and is - now fixed by properly synchronizing both checking and transition of state. - - - -# librdkafka v1.6.1 - -librdkafka v1.6.1 is a maintenance release. - -## Upgrade considerations - - * Fatal idempotent producer errors are now also fatal to the transactional - producer. This is a necessary step to maintain data integrity prior to - librdkafka supporting KIP-360. Applications should check any transactional - API errors for the is_fatal flag and decommission the transactional producer - if the flag is set. - * The consumer error raised by `auto.offset.reset=error` now has error-code - set to `ERR__AUTO_OFFSET_RESET` to allow an application to differentiate - between auto offset resets and other consumer errors. - - -## Fixes - -### General fixes - - * Admin API and transactional `send_offsets_to_transaction()` coordinator - requests, such as TxnOffsetCommitRequest, could in rare cases be sent - multiple times which could cause a crash. - * `ssl.ca.location=probe` is now enabled by default on Mac OSX since the - librdkafka-bundled OpenSSL might not have the same default CA search paths - as the system or brew installed OpenSSL. Probing scans all known locations. - -### Transactional Producer fixes - - * Fatal idempotent producer errors are now also fatal to the transactional - producer. - * The transactional producer could crash if the transaction failed while - `send_offsets_to_transaction()` was called. - * Group coordinator requests for transactional - `send_offsets_to_transaction()` calls would leak memory if the - underlying request was attempted to be sent after the transaction had - failed. - * When gradually producing to multiple partitions (resulting in multiple - underlying AddPartitionsToTxnRequests) subsequent partitions could get - stuck in pending state under certain conditions. These pending partitions - would not send queued messages to the broker and eventually trigger - message timeouts, failing the current transaction. This is now fixed. - * Committing an empty transaction (no messages were produced and no - offsets were sent) would previously raise a fatal error due to invalid state - on the transaction coordinator. We now allow empty/no-op transactions to - be committed. - -### Consumer fixes - - * The consumer will now retry indefinitely (or until the assignment is changed) - to retrieve committed offsets. This fixes the issue where only two retries - were attempted when outstanding transactions were blocking OffsetFetch - requests with `ERR_UNSTABLE_OFFSET_COMMIT`. #3265 - - - - - -# librdkafka v1.6.0 - -librdkafka v1.6.0 is feature release: - - * [KIP-429 Incremental rebalancing](https://cwiki.apache.org/confluence/display/KAFKA/KIP-429%3A+Kafka+Consumer+Incremental+Rebalance+Protocol) with sticky - consumer group partition assignor (KIP-54) (by @mhowlett). - * [KIP-480 Sticky producer partitioning](https://cwiki.apache.org/confluence/display/KAFKA/KIP-480%3A+Sticky+Partitioner) (`sticky.partitioning.linger.ms`) - - achieves higher throughput and lower latency through sticky selection - of random partition (by @abbycriswell). - * AdminAPI: Add support for `DeleteRecords()`, `DeleteGroups()` and - `DeleteConsumerGroupOffsets()` (by @gridaphobe) - * [KIP-447 Producer scalability for exactly once semantics](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics) - - allows a single transactional producer to be used for multiple input - partitions. Requires Apache Kafka 2.5 or later. - * Transactional producer fixes and improvements, see **Transactional Producer fixes** below. - * The [librdkafka.redist](https://www.nuget.org/packages/librdkafka.redist/) - NuGet package now supports Linux ARM64/Aarch64. - - -## Upgrade considerations - - * Sticky producer partitioning (`sticky.partitioning.linger.ms`) is - enabled by default (10 milliseconds) which affects the distribution of - randomly partitioned messages, where previously these messages would be - evenly distributed over the available partitions they are now partitioned - to a single partition for the duration of the sticky time - (10 milliseconds by default) before a new random sticky partition - is selected. - * The new KIP-447 transactional producer scalability guarantees are only - supported on Apache Kafka 2.5 or later, on earlier releases you will - need to use one producer per input partition for EOS. This limitation - is not enforced by the producer or broker. - * Error handling for the transactional producer has been improved, see - the **Transactional Producer fixes** below for more information. - - -## Known issues - - * The Transactional Producer's API timeout handling is inconsistent with the - underlying protocol requests, it is therefore strongly recommended that - applications call `rd_kafka_commit_transaction()` and - `rd_kafka_abort_transaction()` with the `timeout_ms` parameter - set to `-1`, which will use the remaining transaction timeout. - - -## Enhancements - - * KIP-107, KIP-204: AdminAPI: Added `DeleteRecords()` (by @gridaphobe). - * KIP-229: AdminAPI: Added `DeleteGroups()` (by @gridaphobe). - * KIP-496: AdminAPI: Added `DeleteConsumerGroupOffsets()`. - * KIP-464: AdminAPI: Added support for broker-side default partition count - and replication factor for `CreateTopics()`. - * Windows: Added `ssl.ca.certificate.stores` to specify a list of - Windows Certificate Stores to read CA certificates from, e.g., - `CA,Root`. `Root` remains the default store. - * Use reentrant `rand_r()` on supporting platforms which decreases lock - contention (@azat). - * Added `assignor` debug context for troubleshooting consumer partition - assignments. - * Updated to OpenSSL v1.1.1i when building dependencies. - * Update bundled lz4 (used when `./configure --disable-lz4-ext`) to v1.9.3 - which has vast performance improvements. - * Added `rd_kafka_conf_get_default_topic_conf()` to retrieve the - default topic configuration object from a global configuration object. - * Added `conf` debugging context to `debug` - shows set configuration - properties on client and topic instantiation. Sensitive properties - are redacted. - * Added `rd_kafka_queue_yield()` to cancel a blocking queue call. - * Will now log a warning when multiple ClusterIds are seen, which is an - indication that the client might be erroneously configured to connect to - multiple clusters which is not supported. - * Added `rd_kafka_seek_partitions()` to seek multiple partitions to - per-partition specific offsets. - - -## Fixes - -### General fixes - - * Fix a use-after-free crash when certain coordinator requests were retried. - * The C++ `oauthbearer_set_token()` function would call `free()` on - a `new`-created pointer, possibly leading to crashes or heap corruption (#3194) - -### Consumer fixes - - * The consumer assignment and consumer group implementations have been - decoupled, simplified and made more strict and robust. This will sort out - a number of edge cases for the consumer where the behaviour was previously - undefined. - * Partition fetch state was not set to STOPPED if OffsetCommit failed. - * The session timeout is now enforced locally also when the coordinator - connection is down, which was not previously the case. - - -### Transactional Producer fixes - - * Transaction commit or abort failures on the broker, such as when the - producer was fenced by a newer instance, were not propagated to the - application resulting in failed commits seeming successful. - This was a critical race condition for applications that had a delay after - producing messages (or sendings offsets) before committing or - aborting the transaction. This issue has now been fixed and test coverage - improved. - * The transactional producer API would return `RD_KAFKA_RESP_ERR__STATE` - when API calls were attempted after the transaction had failed, we now - try to return the error that caused the transaction to fail in the first - place, such as `RD_KAFKA_RESP_ERR__FENCED` when the producer has - been fenced, or `RD_KAFKA_RESP_ERR__TIMED_OUT` when the transaction - has timed out. - * Transactional producer retry count for transactional control protocol - requests has been increased from 3 to infinite, retriable errors - are now automatically retried by the producer until success or the - transaction timeout is exceeded. This fixes the case where - `rd_kafka_send_offsets_to_transaction()` would fail the current - transaction into an abortable state when `CONCURRENT_TRANSACTIONS` was - returned by the broker (which is a transient error) and the 3 retries - were exhausted. - - -### Producer fixes - - * Calling `rd_kafka_topic_new()` with a topic config object with - `message.timeout.ms` set could sometimes adjust the global `linger.ms` - property (if not explicitly configured) which was not desired, this is now - fixed and the auto adjustment is only done based on the - `default_topic_conf` at producer creation. - * `rd_kafka_flush()` could previously return `RD_KAFKA_RESP_ERR__TIMED_OUT` - just as the timeout was reached if the messages had been flushed but - there were now no more messages. This has been fixed. - - - - -# librdkafka v1.5.3 - -librdkafka v1.5.3 is a maintenance release. - -## Upgrade considerations - - * CentOS 6 is now EOL and is no longer included in binary librdkafka packages, - such as NuGet. - -## Fixes - -### General fixes - - * Fix a use-after-free crash when certain coordinator requests were retried. - * Coordinator requests could be left uncollected on instance destroy which - could lead to hang. - * Fix rare 1 second stalls by forcing rdkafka main thread wakeup when a new - next-timer-to-be-fired is scheduled. - * Fix additional cases where broker-side automatic topic creation might be - triggered unexpectedly. - * AdminAPI: The operation_timeout (on-broker timeout) previously defaulted to 0, - but now defaults to `socket.timeout.ms` (60s). - * Fix possible crash for Admin API protocol requests that fail at the - transport layer or prior to sending. - - -### Consumer fixes - - * Consumer would not filter out messages for aborted transactions - if the messages were compressed (#3020). - * Consumer destroy without prior `close()` could hang in certain - cgrp states (@gridaphobe, #3127). - * Fix possible null dereference in `Message::errstr()` (#3140). - * The `roundrobin` partition assignment strategy could get stuck in an - endless loop or generate uneven assignments in case the group members - had asymmetric subscriptions (e.g., c1 subscribes to t1,t2 while c2 - subscribes to t2,t3). (#3159) - * Mixing committed and logical or absolute offsets in the partitions - passed to `rd_kafka_assign()` would in previous released ignore the - logical or absolute offsets and use the committed offsets for all partitions. - This is now fixed. (#2938) - - - - -# librdkafka v1.5.2 - -librdkafka v1.5.2 is a maintenance release. - - -## Upgrade considerations - - * The default value for the producer configuration property `retries` has - been increased from 2 to infinity, effectively limiting Produce retries to - only `message.timeout.ms`. - As the reasons for the automatic internal retries vary (various broker error - codes as well as transport layer issues), it doesn't make much sense to limit - the number of retries for retriable errors, but instead only limit the - retries based on the allowed time to produce a message. - * The default value for the producer configuration property - `request.timeout.ms` has been increased from 5 to 30 seconds to match - the Apache Kafka Java producer default. - This change yields increased robustness for broker-side congestion. - - -## Enhancements - - * The generated `CONFIGURATION.md` (through `rd_kafka_conf_properties_show())`) - now include all properties and values, regardless if they were included in - the build, and setting a disabled property or value through - `rd_kafka_conf_set()` now returns `RD_KAFKA_CONF_INVALID` and provides - a more useful error string saying why the property can't be set. - * Consumer configs on producers and vice versa will now be logged with - warning messages on client instantiation. - -## Fixes - -### Security fixes - - * There was an incorrect call to zlib's `inflateGetHeader()` with - unitialized memory pointers that could lead to the GZIP header of a fetched - message batch to be copied to arbitrary memory. - This function call has now been completely removed since the result was - not used. - Reported by Ilja van Sprundel. - - -### General fixes - - * `rd_kafka_topic_opaque()` (used by the C++ API) would cause object - refcounting issues when used on light-weight (error-only) topic objects - such as consumer errors (#2693). - * Handle name resolution failures when formatting IP addresses in error logs, - and increase printed hostname limit to ~256 bytes (was ~60). - * Broker sockets would be closed twice (thus leading to potential race - condition with fd-reuse in other threads) if a custom `socket_cb` would - return error. - -### Consumer fixes - - * The `roundrobin` `partition.assignment.strategy` could crash (assert) - for certain combinations of members and partitions. - This is a regression in v1.5.0. (#3024) - * The C++ `KafkaConsumer` destructor did not destroy the underlying - C `rd_kafka_t` instance, causing a leak if `close()` was not used. - * Expose rich error strings for C++ Consumer `Message->errstr()`. - * The consumer could get stuck if an outstanding commit failed during - rebalancing (#2933). - * Topic authorization errors during fetching are now reported only once (#3072). - -### Producer fixes - - * Topic authorization errors are now properly propagated for produced messages, - both through delivery reports and as `ERR_TOPIC_AUTHORIZATION_FAILED` - return value from `produce*()` (#2215) - * Treat cluster authentication failures as fatal in the transactional - producer (#2994). - * The transactional producer code did not properly reference-count partition - objects which could in very rare circumstances lead to a use-after-free bug - if a topic was deleted from the cluster when a transaction was using it. - * `ERR_KAFKA_STORAGE_ERROR` is now correctly treated as a retriable - produce error (#3026). - * Messages that timed out locally would not fail the ongoing transaction. - If the application did not take action on failed messages in its delivery - report callback and went on to commit the transaction, the transaction would - be successfully committed, simply omitting the failed messages. - * EndTxnRequests (sent on commit/abort) are only retried in allowed - states (#3041). - Previously the transaction could hang on commit_transaction() if an abortable - error was hit and the EndTxnRequest was to be retried. - - -*Note: there was no v1.5.1 librdkafka release* - - - - -# librdkafka v1.5.0 - -The v1.5.0 release brings usability improvements, enhancements and fixes to -librdkafka. - -## Enhancements - - * Improved broker connection error reporting with more useful information and - hints on the cause of the problem. - * Consumer: Propagate errors when subscribing to unavailable topics (#1540) - * Producer: Add `batch.size` producer configuration property (#638) - * Add `topic.metadata.propagation.max.ms` to allow newly manually created - topics to be propagated throughout the cluster before reporting them - as non-existent. This fixes race issues where CreateTopics() is - quickly followed by produce(). - * Prefer least idle connection for periodic metadata refreshes, et.al., - to allow truly idle connections to time out and to avoid load-balancer-killed - idle connection errors (#2845) - * Added `rd_kafka_event_debug_contexts()` to get the debug contexts for - a debug log line (by @wolfchimneyrock). - * Added Test scenarios which define the cluster configuration. - * Added MinGW-w64 builds (@ed-alertedh, #2553) - * `./configure --enable-XYZ` now requires the XYZ check to pass, - and `--disable-XYZ` disables the feature altogether (@benesch) - * Added `rd_kafka_produceva()` which takes an array of produce arguments - for situations where the existing `rd_kafka_producev()` va-arg approach - can't be used. - * Added `rd_kafka_message_broker_id()` to see the broker that a message - was produced or fetched from, or an error was associated with. - * Added RTT/delay simulation to mock brokers. - - -## Upgrade considerations - - * Subscribing to non-existent and unauthorized topics will now propagate - errors `RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART` and - `RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED` to the application through - the standard consumer error (the err field in the message object). - * Consumer will no longer trigger auto creation of topics, - `allow.auto.create.topics=true` may be used to re-enable the old deprecated - functionality. - * The default consumer pre-fetch queue threshold `queued.max.messages.kbytes` - has been decreased from 1GB to 64MB to avoid excessive network usage for low - and medium throughput consumer applications. High throughput consumer - applications may need to manually set this property to a higher value. - * The default consumer Fetch wait time has been increased from 100ms to 500ms - to avoid excessive network usage for low throughput topics. - * If OpenSSL is linked statically, or `ssl.ca.location=probe` is configured, - librdkafka will probe known CA certificate paths and automatically use the - first one found. This should alleviate the need to configure - `ssl.ca.location` when the statically linked OpenSSL's OPENSSLDIR differs - from the system's CA certificate path. - * The heuristics for handling Apache Kafka < 0.10 brokers has been removed to - improve connection error handling for modern Kafka versions. - Users on Brokers 0.9.x or older should already be configuring - `api.version.request=false` and `broker.version.fallback=...` so there - should be no functional change. - * The default producer batch accumulation time, `linger.ms`, has been changed - from 0.5ms to 5ms to improve batch sizes and throughput while reducing - the per-message protocol overhead. - Applications that require lower produce latency than 5ms will need to - manually set `linger.ms` to a lower value. - * librdkafka's build tooling now requires Python 3.x (python3 interpreter). - - -## Fixes - -### General fixes - - * The client could crash in rare circumstances on ApiVersion or - SaslHandshake request timeouts (#2326) - * `./configure --LDFLAGS='a=b, c=d'` with arguments containing = are now - supported (by @sky92zwq). - * `./configure` arguments now take precedence over cached `configure` variables - from previous invocation. - * Fix theoretical crash on coord request failure. - * Unknown partition error could be triggered for existing partitions when - additional partitions were added to a topic (@benesch, #2915) - * Quickly refresh topic metadata for desired but non-existent partitions. - This will speed up the initial discovery delay when new partitions are added - to an existing topic (#2917). - - -### Consumer fixes - - * The roundrobin partition assignor could crash if subscriptions - where asymmetrical (different sets from different members of the group). - Thanks to @ankon and @wilmai for identifying the root cause (#2121). - * The consumer assignors could ignore some topics if there were more subscribed - topics than consumers in taking part in the assignment. - * The consumer would connect to all partition leaders of a topic even - for partitions that were not being consumed (#2826). - * Initial consumer group joins should now be a couple of seconds quicker - thanks expedited query intervals (@benesch). - * Fix crash and/or inconsistent subscriptions when using multiple consumers - (in the same process) with wildcard topics on Windows. - * Don't propagate temporary offset lookup errors to application. - * Immediately refresh topic metadata when partitions are reassigned to other - brokers, avoiding a fetch stall of up to `topic.metadata.refresh.interval.ms`. (#2955) - * Memory for batches containing control messages would not be freed when - using the batch consume APIs (@pf-qiu, #2990). - - -### Producer fixes - - * Proper locking for transaction state in EndTxn handler. - - - -# librdkafka v1.4.4 - -v1.4.4 is a maintenance release with the following fixes and enhancements: - - * Transactional producer could crash on request timeout due to dereferencing - NULL pointer of non-existent response object. - * Mark `rd_kafka_send_offsets_to_transaction()` CONCURRENT_TRANSACTION (et.al) - errors as retriable. - * Fix crash on transactional coordinator FindCoordinator request failure. - * Minimize broker re-connect delay when broker's connection is needed to - send requests. - * Proper locking for transaction state in EndTxn handler. - * `socket.timeout.ms` was ignored when `transactional.id` was set. - * Added RTT/delay simulation to mock brokers. - -*Note: there was no v1.4.3 librdkafka release* - - - -# librdkafka v1.4.2 - -v1.4.2 is a maintenance release with the following fixes and enhancements: - - * Fix produce/consume hang after partition goes away and comes back, - such as when a topic is deleted and re-created. - * Consumer: Reset the stored offset when partitions are un-assign()ed (fixes #2782). - This fixes the case where a manual offset-less commit() or the auto-committer - would commit a stored offset from a previous assignment before - a new message was consumed by the application. - * Probe known CA cert paths and set default `ssl.ca.location` accordingly - if OpenSSL is statically linked or `ssl.ca.location` is set to `probe`. - * Per-partition OffsetCommit errors were unhandled (fixes #2791) - * Seed the PRNG (random number generator) by default, allow application to - override with `enable.random.seed=false` (#2795) - * Fix stack overwrite (of 1 byte) when SaslHandshake MechCnt is zero - * Align bundled c11 threads (tinycthreads) constants to glibc and musl (#2681) - * Fix return value of rd_kafka_test_fatal_error() (by @ckb42) - * Ensure CMake sets disabled defines to zero on Windows (@benesch) - - -*Note: there was no v1.4.1 librdkafka release* - - - - - -# Older releases - -See https://github.com/edenhill/librdkafka/releases diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CMakeLists.txt b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CMakeLists.txt deleted file mode 100644 index d1129bce..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CMakeLists.txt +++ /dev/null @@ -1,274 +0,0 @@ -cmake_minimum_required(VERSION 3.2) - -include("packaging/cmake/parseversion.cmake") -parseversion("src/rdkafka.h") - -project(RdKafka VERSION ${RDKAFKA_VERSION}) - -set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/packaging/cmake/Modules/") - -# Options. No 'RDKAFKA_' prefix to match old C++ code. { - -# This option doesn't affect build in fact, only C code -# (see 'rd_kafka_version_str'). In CMake the build type feature usually used -# (like Debug, Release, etc.). -option(WITHOUT_OPTIMIZATION "Disable optimization" OFF) - -option(ENABLE_DEVEL "Enable development asserts, checks, etc" OFF) -option(ENABLE_REFCNT_DEBUG "Enable refcnt debugging" OFF) -set(TRYCOMPILE_SRC_DIR "${CMAKE_CURRENT_LIST_DIR}/packaging/cmake/try_compile") -set(BUILT_WITH "CMAKE") - -# Toolchain { -list(APPEND BUILT_WITH "${CMAKE_C_COMPILER_ID}") -list(APPEND BUILT_WITH "${CMAKE_CXX_COMPILER_ID}") -# } - -# PkgConfig { -find_package(PkgConfig QUIET) -if(PkgConfig_FOUND) - set(WITH_PKGCONFIG ON) - list(APPEND BUILT_WITH "PKGCONFIG") -endif() -# } - -# LIBM { -include(CheckLibraryExists) -check_library_exists(m pow "" WITH_HDRHISTOGRAM) -if(WITH_HDRHISTOGRAM) - list(APPEND BUILT_WITH "HDRHISTOGRAM") -endif() -# } - -# ZLIB { -find_package(ZLIB QUIET) -if(ZLIB_FOUND) - set(with_zlib_default ON) -else() - set(with_zlib_default OFF) -endif() -option(WITH_ZLIB "With ZLIB" ${with_zlib_default}) -if(WITH_ZLIB) - list(APPEND BUILT_WITH "ZLIB") -endif() -# } - -# ZSTD { -find_package(ZSTD QUIET) -if(ZSTD_FOUND) - set(with_zstd_default ON) -else() - set(with_zstd_default OFF) -endif() -option(WITH_ZSTD "With ZSTD" ${with_zstd_default}) -if(WITH_ZSTD) - list(APPEND BUILT_WITH "ZSTD") -endif() -# } - -# LibDL { -try_compile( - WITH_LIBDL - "${CMAKE_CURRENT_BINARY_DIR}/try_compile" - "${TRYCOMPILE_SRC_DIR}/dlopen_test.c" - LINK_LIBRARIES "${CMAKE_DL_LIBS}" -) -if(WITH_LIBDL) - list(APPEND BUILT_WITH "LIBDL") -endif() -# } - -# WITH_PLUGINS { -if(WITH_LIBDL OR WIN32) - set(with_plugins_default ON) -else() - set(with_plugins_default OFF) -endif() -option(WITH_PLUGINS "With plugin support" ${with_plugins_default}) -if(WITH_PLUGINS) - list(APPEND BUILT_WITH "PLUGINS") -endif() -# } - -# OpenSSL { -if(WITH_BUNDLED_SSL) # option from 'h2o' parent project - set(with_ssl_default ON) -else() - find_package(OpenSSL QUIET) - if(OpenSSL_FOUND) - set(with_ssl_default ON) - else() - set(with_ssl_default OFF) - endif() -endif() -option(WITH_SSL "With SSL" ${with_ssl_default}) -if(WITH_SSL) - list(APPEND BUILT_WITH "SSL") -endif() -# } - -# SASL { -if(WIN32) - set(with_sasl_default ON) -else() - if(PkgConfig_FOUND) - pkg_check_modules(SASL libsasl2) - if(SASL_FOUND) - set(with_sasl_default ON) - else() - try_compile( - WITH_SASL_CYRUS_BOOL - "${CMAKE_CURRENT_BINARY_DIR}/try_compile" - "${TRYCOMPILE_SRC_DIR}/libsasl2_test.c" - LINK_LIBRARIES "-lsasl2" - ) - if(WITH_SASL_CYRUS_BOOL) - set(with_sasl_default ON) - set(SASL_LIBRARIES "-lsasl2") - else() - set(with_sasl_default OFF) - endif() - endif() - endif() -endif() -option(WITH_SASL "With SASL" ${with_sasl_default}) -if(WITH_SASL) - if(SASL_FOUND) - link_directories(${SASL_LIBRARY_DIRS}) - endif() - if(WITH_SSL) - set(WITH_SASL_SCRAM ON) - set(WITH_SASL_OAUTHBEARER ON) - list(APPEND BUILT_WITH "SASL_SCRAM SASL_OAUTHBEARER") - endif() - if(NOT WIN32) - set(WITH_SASL_CYRUS ON) - list(APPEND BUILT_WITH "SASL_CYRUS") - endif() -endif() -# } - -# LZ4 { -option(ENABLE_LZ4_EXT "Enable external LZ4 library support" ON) -set(WITH_LZ4_EXT OFF) -if(ENABLE_LZ4_EXT) - find_package(LZ4) - if(LZ4_FOUND) - set(WITH_LZ4_EXT ON) - list(APPEND BUILT_WITH "LZ4_EXT") - else() - message(STATUS "Using bundled LZ4 implementation.") - endif() -endif() -# } - -option(RDKAFKA_BUILD_STATIC "Build static rdkafka library" OFF) -option(RDKAFKA_BUILD_EXAMPLES "Build examples" ON) -option(RDKAFKA_BUILD_TESTS "Build tests" ON) -if(WIN32) - option(WITHOUT_WIN32_CONFIG "Avoid including win32_config.h on cmake builds" ON) -endif(WIN32) - -# In: -# * TRYCOMPILE_SRC_DIR -# Out: -# * HAVE_ATOMICS_32 -# * HAVE_ATOMICS_32_SYNC -# * HAVE_ATOMICS_64 -# * HAVE_ATOMICS_64_SYNC -# * HAVE_REGEX -# * HAVE_STRNDUP -# * HAVE_PTHREAD_SETNAME_GNU -# * HAVE_PTHREAD_SETNAME_DARWIN -# * HAVE_PTHREAD_SETNAME_FREEBSD -# * WITH_C11THREADS -# * WITH_CRC32C_HW -# * LINK_ATOMIC -include("packaging/cmake/try_compile/rdkafka_setup.cmake") -if(WITH_C11THREADS) - list(APPEND BUILT_WITH "C11THREADS") -endif() -if(WITH_CRC32C_HW) - list(APPEND BUILT_WITH "CRC32C_HW") -endif() - -set(GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated") - -# In: -# * WITHOUT_OPTIMIZATION -# * ENABLE_DEVEL -# * ENABLE_REFCNT_DEBUG -# * HAVE_ATOMICS_32 -# * HAVE_ATOMICS_32_SYNC -# * HAVE_ATOMICS_64 -# * HAVE_ATOMICS_64_SYNC -# * WITH_ZLIB -# * WITH_SSL -# * WITH_SASL -# * HAVE_REGEX -# * HAVE_STRNDUP -# * HAVE_PTHREAD_SETNAME_GNU -# * HAVE_PTHREAD_SETNAME_DARWIN -# * HAVE_PTHREAD_SETNAME_FREEBSD -list(APPEND BUILT_WITH "SNAPPY") -list(APPEND BUILT_WITH "SOCKEM") -string(REPLACE ";" " " BUILT_WITH "${BUILT_WITH}") -configure_file("packaging/cmake/config.h.in" "${GENERATED_DIR}/config.h") - -# Installation (https://github.com/forexample/package-example) { - -include(GNUInstallDirs) - -set(config_install_dir "lib/cmake/${PROJECT_NAME}") - -set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") - -set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") -set(project_version "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") -set(targets_export_name "${PROJECT_NAME}Targets") -set(namespace "${PROJECT_NAME}::") - -include(CMakePackageConfigHelpers) - -# In: -# * targets_export_name -# * PROJECT_NAME -configure_package_config_file( - "packaging/cmake/Config.cmake.in" - "${project_config}" - INSTALL_DESTINATION "${config_install_dir}" -) - -write_basic_package_version_file( - "${project_version}" - VERSION ${PROJECT_VERSION} - COMPATIBILITY AnyNewerVersion -) - -install( - FILES "${project_config}" "${project_version}" "packaging/cmake/Modules/FindLZ4.cmake" - DESTINATION "${config_install_dir}" -) - -install( - EXPORT "${targets_export_name}" - NAMESPACE "${namespace}" - DESTINATION "${config_install_dir}" -) - -install( - FILES LICENSES.txt - DESTINATION "share/licenses/librdkafka" -) - -add_subdirectory(src) -add_subdirectory(src-cpp) - -if(RDKAFKA_BUILD_EXAMPLES) - add_subdirectory(examples) -endif() - -if(RDKAFKA_BUILD_TESTS) - enable_testing() - add_subdirectory(tests) -endif() diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CODE_OF_CONDUCT.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CODE_OF_CONDUCT.md deleted file mode 100644 index dbbde19c..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at rdkafka@edenhill.se. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CONFIGURATION.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CONFIGURATION.md deleted file mode 100644 index aea22534..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CONFIGURATION.md +++ /dev/null @@ -1,174 +0,0 @@ -# Configuration properties -## Global configuration properties - -Property | C/P | Range | Default | Importance | Description ------------------------------------------|-----|-----------------|--------------:|------------| -------------------------- -builtin.features | * | | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins, zstd, sasl_oauthbearer | low | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support.
*Type: CSV flags* -client.id | * | | rdkafka | low | Client identifier.
*Type: string* -metadata.broker.list | * | | | high | Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* -bootstrap.servers | * | | | high | Alias for `metadata.broker.list`: Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* -message.max.bytes | * | 1000 .. 1000000000 | 1000000 | medium | Maximum Kafka protocol request message size. Due to differing framing overhead between protocol versions the producer is unable to reliably enforce a strict max message limit at produce time and may exceed the maximum size by one message in protocol ProduceRequests, the broker will enforce the the topic's `max.message.bytes` limit (see Apache Kafka documentation).
*Type: integer* -message.copy.max.bytes | * | 0 .. 1000000000 | 65535 | low | Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs.
*Type: integer* -receive.message.max.bytes | * | 1000 .. 2147483647 | 100000000 | medium | Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value must be at least `fetch.max.bytes` + 512 to allow for protocol overhead; the value is adjusted automatically unless the configuration property is explicitly set.
*Type: integer* -max.in.flight.requests.per.connection | * | 1 .. 1000000 | 1000000 | low | Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one.
*Type: integer* -max.in.flight | * | 1 .. 1000000 | 1000000 | low | Alias for `max.in.flight.requests.per.connection`: Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one.
*Type: integer* -topic.metadata.refresh.interval.ms | * | -1 .. 3600000 | 300000 | low | Period of time in milliseconds at which topic and broker metadata is refreshed in order to proactively discover any new brokers, topics, partitions or partition leader changes. Use -1 to disable the intervalled refresh (not recommended). If there are no locally referenced topics (no topic objects created, no messages produced, no subscription or no assignment) then only the broker list will be refreshed every interval but no more often than every 10s.
*Type: integer* -metadata.max.age.ms | * | 1 .. 86400000 | 900000 | low | Metadata cache max age. Defaults to topic.metadata.refresh.interval.ms * 3
*Type: integer* -topic.metadata.refresh.fast.interval.ms | * | 1 .. 60000 | 250 | low | When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers.
*Type: integer* -topic.metadata.refresh.fast.cnt | * | 0 .. 1000 | 10 | low | **DEPRECATED** No longer used.
*Type: integer* -topic.metadata.refresh.sparse | * | true, false | true | low | Sparse metadata requests (consumes less network bandwidth)
*Type: boolean* -topic.metadata.propagation.max.ms | * | 0 .. 3600000 | 30000 | low | Apache Kafka topic creation is asynchronous and it takes some time for a new topic to propagate throughout the cluster to all brokers. If a client requests topic metadata after manual topic creation but before the topic has been fully propagated to the broker the client is requesting metadata from, the topic will seem to be non-existent and the client will mark the topic as such, failing queued produced messages with `ERR__UNKNOWN_TOPIC`. This setting delays marking a topic as non-existent until the configured propagation max time has passed. The maximum propagation time is calculated from the time the topic is first referenced in the client, e.g., on produce().
*Type: integer* -topic.blacklist | * | | | low | Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist.
*Type: pattern list* -debug | * | generic, broker, topic, metadata, feature, queue, msg, protocol, cgrp, security, fetch, interceptor, plugin, consumer, admin, eos, mock, assignor, conf, all | | medium | A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch
*Type: CSV flags* -socket.timeout.ms | * | 10 .. 300000 | 60000 | low | Default timeout for network requests. Producer: ProduceRequests will use the lesser value of `socket.timeout.ms` and remaining `message.timeout.ms` for the first message in the batch. Consumer: FetchRequests will use `fetch.wait.max.ms` + `socket.timeout.ms`. Admin: Admin requests will use `socket.timeout.ms` or explicitly set `rd_kafka_AdminOptions_set_operation_timeout()` value.
*Type: integer* -socket.blocking.max.ms | * | 1 .. 60000 | 1000 | low | **DEPRECATED** No longer used.
*Type: integer* -socket.send.buffer.bytes | * | 0 .. 100000000 | 0 | low | Broker socket send buffer size. System default is used if 0.
*Type: integer* -socket.receive.buffer.bytes | * | 0 .. 100000000 | 0 | low | Broker socket receive buffer size. System default is used if 0.
*Type: integer* -socket.keepalive.enable | * | true, false | false | low | Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets
*Type: boolean* -socket.nagle.disable | * | true, false | false | low | Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.
*Type: boolean* -socket.max.fails | * | 0 .. 1000000 | 1 | low | Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. WARNING: It is highly recommended to leave this setting at its default value of 1 to avoid the client and broker to become desynchronized in case of request timeouts. NOTE: The connection is automatically re-established.
*Type: integer* -broker.address.ttl | * | 0 .. 86400000 | 1000 | low | How long to cache the broker address resolving results (milliseconds).
*Type: integer* -broker.address.family | * | any, v4, v6 | any | low | Allowed broker IP address families: any, v4, v6
*Type: enum value* -connections.max.idle.ms | * | 0 .. 2147483647 | 0 | medium | Close broker connections after the specified time of inactivity. Disable with 0. If this property is left at its default value some heuristics are performed to determine a suitable default value, this is currently limited to identifying brokers on Azure (see librdkafka issue #3109 for more info).
*Type: integer* -reconnect.backoff.jitter.ms | * | 0 .. 3600000 | 0 | low | **DEPRECATED** No longer used. See `reconnect.backoff.ms` and `reconnect.backoff.max.ms`.
*Type: integer* -reconnect.backoff.ms | * | 0 .. 3600000 | 100 | medium | The initial time to wait before reconnecting to a broker after the connection has been closed. The time is increased exponentially until `reconnect.backoff.max.ms` is reached. -25% to +50% jitter is applied to each reconnect backoff. A value of 0 disables the backoff and reconnects immediately.
*Type: integer* -reconnect.backoff.max.ms | * | 0 .. 3600000 | 10000 | medium | The maximum time to wait before reconnecting to a broker after the connection has been closed.
*Type: integer* -statistics.interval.ms | * | 0 .. 86400000 | 0 | high | librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics.
*Type: integer* -enabled_events | * | 0 .. 2147483647 | 0 | low | See `rd_kafka_conf_set_events()`
*Type: integer* -error_cb | * | | | low | Error callback (set with rd_kafka_conf_set_error_cb())
*Type: see dedicated API* -throttle_cb | * | | | low | Throttle callback (set with rd_kafka_conf_set_throttle_cb())
*Type: see dedicated API* -stats_cb | * | | | low | Statistics callback (set with rd_kafka_conf_set_stats_cb())
*Type: see dedicated API* -log_cb | * | | | low | Log callback (set with rd_kafka_conf_set_log_cb())
*Type: see dedicated API* -log_level | * | 0 .. 7 | 6 | low | Logging level (syslog(3) levels)
*Type: integer* -log.queue | * | true, false | false | low | Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set.
*Type: boolean* -log.thread.name | * | true, false | true | low | Print internal thread name in log messages (useful for debugging librdkafka internals)
*Type: boolean* -enable.random.seed | * | true, false | true | low | If enabled librdkafka will initialize the PRNG with srand(current_time.milliseconds) on the first invocation of rd_kafka_new() (required only if rand_r() is not available on your platform). If disabled the application must call srand() prior to calling rd_kafka_new().
*Type: boolean* -log.connection.close | * | true, false | true | low | Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connection.max.idle.ms` value.
*Type: boolean* -background_event_cb | * | | | low | Background queue event callback (set with rd_kafka_conf_set_background_event_cb())
*Type: see dedicated API* -socket_cb | * | | | low | Socket creation callback to provide race-free CLOEXEC
*Type: see dedicated API* -connect_cb | * | | | low | Socket connect callback
*Type: see dedicated API* -closesocket_cb | * | | | low | Socket close callback
*Type: see dedicated API* -open_cb | * | | | low | File open callback to provide race-free CLOEXEC
*Type: see dedicated API* -opaque | * | | | low | Application opaque (set with rd_kafka_conf_set_opaque())
*Type: see dedicated API* -default_topic_conf | * | | | low | Default topic configuration for automatically subscribed topics
*Type: see dedicated API* -internal.termination.signal | * | 0 .. 128 | 0 | low | Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed.
*Type: integer* -api.version.request | * | true, false | true | high | Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used.
*Type: boolean* -api.version.request.timeout.ms | * | 1 .. 300000 | 10000 | low | Timeout for broker API version requests.
*Type: integer* -api.version.fallback.ms | * | 0 .. 604800000 | 0 | medium | Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade).
*Type: integer* -broker.version.fallback | * | | 0.10.0 | medium | Older broker versions (before 0.10.0) provide no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value >= 0.10, such as 0.10.2.1, enables ApiVersionRequests.
*Type: string* -security.protocol | * | plaintext, ssl, sasl_plaintext, sasl_ssl | plaintext | high | Protocol used to communicate with brokers.
*Type: enum value* -ssl.cipher.suites | * | | | low | A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3).
*Type: string* -ssl.curves.list | * | | | low | The supported-curves extension in the TLS ClientHello message specifies the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client is willing to have the server use. See manual page for `SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.
*Type: string* -ssl.sigalgs.list | * | | | low | The client uses the TLS ClientHello signature_algorithms extension to indicate to the server which signature/hash algorithm pairs may be used in digital signatures. See manual page for `SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required.
*Type: string* -ssl.key.location | * | | | low | Path to client's private key (PEM) used for authentication.
*Type: string* -ssl.key.password | * | | | low | Private key passphrase (for use with `ssl.key.location` and `set_ssl_cert()`)
*Type: string* -ssl.key.pem | * | | | low | Client's private key string (PEM format) used for authentication.
*Type: string* -ssl_key | * | | | low | Client's private key as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* -ssl.certificate.location | * | | | low | Path to client's public key (PEM) used for authentication.
*Type: string* -ssl.certificate.pem | * | | | low | Client's public key string (PEM format) used for authentication.
*Type: string* -ssl_certificate | * | | | low | Client's public key as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* -ssl.ca.location | * | | | low | File or directory path to CA certificate(s) for verifying the broker's key. Defaults: On Windows the system's CA certificates are automatically looked up in the Windows Root certificate store. On Mac OSX this configuration defaults to `probe`. It is recommended to install openssl using Homebrew, to provide CA certificates. On Linux install the distribution's ca-certificates package. If OpenSSL is statically linked or `ssl.ca.location` is set to `probe` a list of standard paths will be probed and the first one found will be used as the default CA certificate location path. If OpenSSL is dynamically linked the OpenSSL library's default path will be used (see `OPENSSLDIR` in `openssl version -a`).
*Type: string* -ssl.ca.pem | * | | | low | CA certificate string (PEM format) for verifying the broker's key.
*Type: string* -ssl_ca | * | | | low | CA certificate as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* -ssl.ca.certificate.stores | * | | Root | low | Comma-separated list of Windows Certificate stores to load CA certificates from. Certificates will be loaded in the same order as stores are specified. If no certificates can be loaded from any of the specified stores an error is logged and the OpenSSL library's default CA location is used instead. Store names are typically one or more of: MY, Root, Trust, CA.
*Type: string* -ssl.crl.location | * | | | low | Path to CRL for verifying broker's certificate validity.
*Type: string* -ssl.keystore.location | * | | | low | Path to client's keystore (PKCS#12) used for authentication.
*Type: string* -ssl.keystore.password | * | | | low | Client's keystore (PKCS#12) password.
*Type: string* -ssl.engine.location | * | | | low | Path to OpenSSL engine library. OpenSSL >= 1.1.0 required.
*Type: string* -ssl.engine.id | * | | dynamic | low | OpenSSL engine id is the name used for loading engine.
*Type: string* -ssl_engine_callback_data | * | | | low | OpenSSL engine callback data (set with rd_kafka_conf_set_engine_callback_data()).
*Type: see dedicated API* -enable.ssl.certificate.verification | * | true, false | true | low | Enable OpenSSL's builtin broker (server) certificate verification. This verification can be extended by the application by implementing a certificate_verify_cb.
*Type: boolean* -ssl.endpoint.identification.algorithm | * | none, https | none | low | Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL >= 1.0.2 required.
*Type: enum value* -ssl.certificate.verify_cb | * | | | low | Callback to verify the broker certificate chain.
*Type: see dedicated API* -sasl.mechanisms | * | | GSSAPI | high | SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured.
*Type: string* -sasl.mechanism | * | | GSSAPI | high | Alias for `sasl.mechanisms`: SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured.
*Type: string* -sasl.kerberos.service.name | * | | kafka | low | Kerberos principal name that Kafka runs as, not including /hostname@REALM
*Type: string* -sasl.kerberos.principal | * | | kafkaclient | low | This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal).
*Type: string* -sasl.kerberos.kinit.cmd | * | | kinit -R -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} \|\| kinit -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} | low | Shell command to refresh or acquire the client's Kerberos ticket. This command is executed on client creation and every sasl.kerberos.min.time.before.relogin (0=disable). %{config.prop.name} is replaced by corresponding config object value.
*Type: string* -sasl.kerberos.keytab | * | | | low | Path to Kerberos keytab file. This configuration property is only used as a variable in `sasl.kerberos.kinit.cmd` as ` ... -t "%{sasl.kerberos.keytab}"`.
*Type: string* -sasl.kerberos.min.time.before.relogin | * | 0 .. 86400000 | 60000 | low | Minimum time in milliseconds between key refresh attempts. Disable automatic key refresh by setting this property to 0.
*Type: integer* -sasl.username | * | | | high | SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms
*Type: string* -sasl.password | * | | | high | SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism
*Type: string* -sasl.oauthbearer.config | * | | | low | SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is "sub", the default value for scopeClaimName is "scope", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_NAME=value`. For example: `principal=admin extension_traceId=123`
*Type: string* -enable.sasl.oauthbearer.unsecure.jwt | * | true, false | false | low | Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production.
*Type: boolean* -oauthbearer_token_refresh_cb | * | | | low | SASL/OAUTHBEARER token refresh callback (set with rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by rd_kafka_poll(), et.al. This callback will be triggered when it is time to refresh the client's OAUTHBEARER token.
*Type: see dedicated API* -plugin.library.paths | * | | | low | List of plugin libraries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically.
*Type: string* -interceptors | * | | | low | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors.
*Type: see dedicated API* -group.id | C | | | high | Client group id string. All clients sharing the same group.id belong to the same group.
*Type: string* -group.instance.id | C | | | medium | Enable static group membership. Static group members are able to leave and rejoin a group within the configured `session.timeout.ms` without prompting a group rebalance. This should be used in combination with a larger `session.timeout.ms` to avoid group rebalances caused by transient unavailability (e.g. process restarts). Requires broker version >= 2.3.0.
*Type: string* -partition.assignment.strategy | C | | range,roundrobin | medium | The name of one or more partition assignment strategies. The elected group leader will use a strategy supported by all members of the group to assign partitions to group members. If there is more than one eligible strategy, preference is determined by the order of this list (strategies earlier in the list have higher priority). Cooperative and non-cooperative (eager) strategies must not be mixed. Available strategies: range, roundrobin, cooperative-sticky.
*Type: string* -session.timeout.ms | C | 1 .. 3600000 | 45000 | high | Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. The allowed range is configured with the **broker** configuration properties `group.min.session.timeout.ms` and `group.max.session.timeout.ms`. Also see `max.poll.interval.ms`.
*Type: integer* -heartbeat.interval.ms | C | 1 .. 3600000 | 3000 | low | Group session keepalive heartbeat interval.
*Type: integer* -group.protocol.type | C | | consumer | low | Group protocol type. NOTE: Currently, the only supported group protocol type is `consumer`.
*Type: string* -coordinator.query.interval.ms | C | 1 .. 3600000 | 600000 | low | How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment.
*Type: integer* -max.poll.interval.ms | C | 1 .. 86400000 | 300000 | high | Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. Warning: Offset commits may be not possible at this point. Note: It is recommended to set `enable.auto.offset.store=false` for long-time processing applications and then explicitly store offsets (using offsets_store()) *after* message processing, to make sure offsets are not auto-committed prior to processing has finished. The interval is checked two times per second. See KIP-62 for more information.
*Type: integer* -enable.auto.commit | C | true, false | true | high | Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign().
*Type: boolean* -auto.commit.interval.ms | C | 0 .. 86400000 | 5000 | medium | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer.
*Type: integer* -enable.auto.offset.store | C | true, false | true | high | Automatically store offset of last message provided to application. The offset store is an in-memory store of the next offset to (auto-)commit for each partition.
*Type: boolean* -queued.min.messages | C | 1 .. 10000000 | 100000 | medium | Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue.
*Type: integer* -queued.max.messages.kbytes | C | 1 .. 2097151 | 65536 | medium | Maximum number of kilobytes of queued pre-fetched messages in the local consumer queue. If using the high-level consumer this setting applies to the single consumer queue, regardless of the number of partitions. When using the legacy simple consumer or when separate partition queues are used this setting applies per partition. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages.
*Type: integer* -fetch.wait.max.ms | C | 0 .. 300000 | 500 | low | Maximum time the broker may wait to fill the Fetch response with fetch.min.bytes of messages.
*Type: integer* -fetch.message.max.bytes | C | 1 .. 1000000000 | 1048576 | medium | Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched.
*Type: integer* -max.partition.fetch.bytes | C | 1 .. 1000000000 | 1048576 | medium | Alias for `fetch.message.max.bytes`: Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched.
*Type: integer* -fetch.max.bytes | C | 0 .. 2147483135 | 52428800 | medium | Maximum amount of data the broker shall return for a Fetch request. Messages are fetched in batches by the consumer and if the first message batch in the first non-empty partition of the Fetch request is larger than this value, then the message batch will still be returned to ensure the consumer can make progress. The maximum message batch size accepted by the broker is defined via `message.max.bytes` (broker config) or `max.message.bytes` (broker topic config). `fetch.max.bytes` is automatically adjusted upwards to be at least `message.max.bytes` (consumer config).
*Type: integer* -fetch.min.bytes | C | 1 .. 100000000 | 1 | low | Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting.
*Type: integer* -fetch.error.backoff.ms | C | 0 .. 300000 | 500 | medium | How long to postpone the next fetch request for a topic+partition in case of a fetch error.
*Type: integer* -offset.store.method | C | none, file, broker | broker | low | **DEPRECATED** Offset commit store method: 'file' - DEPRECATED: local file store (offset.store.path, et.al), 'broker' - broker commit store (requires Apache Kafka 0.8.2 or later on the broker).
*Type: enum value* -isolation.level | C | read_uncommitted, read_committed | read_committed | high | Controls how to read messages written transactionally: `read_committed` - only return transactional messages which have been committed. `read_uncommitted` - return all messages, even transactional messages which have been aborted.
*Type: enum value* -consume_cb | C | | | low | Message consume callback (set with rd_kafka_conf_set_consume_cb())
*Type: see dedicated API* -rebalance_cb | C | | | low | Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb())
*Type: see dedicated API* -offset_commit_cb | C | | | low | Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb())
*Type: see dedicated API* -enable.partition.eof | C | true, false | false | low | Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition.
*Type: boolean* -check.crcs | C | true, false | false | medium | Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage.
*Type: boolean* -allow.auto.create.topics | C | true, false | false | low | Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuraiton to take effect. Note: The default value (false) is different from the Java consumer (true). Requires broker version >= 0.11.0.0, for older broker versions only the broker configuration applies.
*Type: boolean* -client.rack | * | | | low | A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config `broker.rack`.
*Type: string* -transactional.id | P | | | high | Enables the transactional producer. The transactional.id is used to identify the same transactional producer instance across process restarts. It allows the producer to guarantee that transactions corresponding to earlier instances of the same producer have been finalized prior to starting any new transactions, and that any zombie instances are fenced off. If no transactional.id is provided, then the producer is limited to idempotent delivery (if enable.idempotence is set). Requires broker version >= 0.11.0.
*Type: string* -transaction.timeout.ms | P | 1000 .. 2147483647 | 60000 | medium | The maximum amount of time in milliseconds that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction. If this value is larger than the `transaction.max.timeout.ms` setting in the broker, the init_transactions() call will fail with ERR_INVALID_TRANSACTION_TIMEOUT. The transaction timeout automatically adjusts `message.timeout.ms` and `socket.timeout.ms`, unless explicitly configured in which case they must not exceed the transaction timeout (`socket.timeout.ms` must be at least 100ms lower than `transaction.timeout.ms`). This is also the default timeout value if no timeout (-1) is supplied to the transactional API methods.
*Type: integer* -enable.idempotence | P | true, false | false | high | When set to `true`, the producer will ensure that messages are successfully produced exactly once and in the original produce order. The following configuration properties are adjusted automatically (if not modified by the user) when idempotence is enabled: `max.in.flight.requests.per.connection=5` (must be less than or equal to 5), `retries=INT32_MAX` (must be greater than 0), `acks=all`, `queuing.strategy=fifo`. Producer instantation will fail if user-supplied configuration is incompatible.
*Type: boolean* -enable.gapless.guarantee | P | true, false | false | low | **EXPERIMENTAL**: subject to change or removal. When set to `true`, any error that could result in a gap in the produced message series when a batch of messages fails, will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop the producer. Messages failing due to `message.timeout.ms` are not covered by this guarantee. Requires `enable.idempotence=true`.
*Type: boolean* -queue.buffering.max.messages | P | 1 .. 10000000 | 100000 | high | Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions.
*Type: integer* -queue.buffering.max.kbytes | P | 1 .. 2147483647 | 1048576 | high | Maximum total message size sum allowed on the producer queue. This queue is shared by all topics and partitions. This property has higher priority than queue.buffering.max.messages.
*Type: integer* -queue.buffering.max.ms | P | 0 .. 900000 | 5 | high | Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: float* -linger.ms | P | 0 .. 900000 | 5 | high | Alias for `queue.buffering.max.ms`: Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: float* -message.send.max.retries | P | 0 .. 2147483647 | 2147483647 | high | How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true.
*Type: integer* -retries | P | 0 .. 2147483647 | 2147483647 | high | Alias for `message.send.max.retries`: How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true.
*Type: integer* -retry.backoff.ms | P | 1 .. 300000 | 100 | medium | The backoff time in milliseconds before retrying a protocol request.
*Type: integer* -queue.buffering.backpressure.threshold | P | 1 .. 1000000 | 1 | low | The threshold of outstanding not yet transmitted broker requests needed to backpressure the producer's message accumulator. If the number of not yet transmitted requests equals or exceeds this number, produce request creation that would have otherwise been triggered (for example, in accordance with linger.ms) will be delayed. A lower number yields larger and more effective batches. A higher value can improve latency when using compression on slow machines.
*Type: integer* -compression.codec | P | none, gzip, snappy, lz4, zstd | none | medium | compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
*Type: enum value* -compression.type | P | none, gzip, snappy, lz4, zstd | none | medium | Alias for `compression.codec`: compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
*Type: enum value* -batch.num.messages | P | 1 .. 1000000 | 10000 | medium | Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by batch.size and message.max.bytes.
*Type: integer* -batch.size | P | 1 .. 2147483647 | 1000000 | medium | Maximum size (in bytes) of all messages batched in one MessageSet, including protocol framing overhead. This limit is applied after the first message has been added to the batch, regardless of the first message's size, this is to ensure that messages that exceed batch.size are produced. The total MessageSet size is also limited by batch.num.messages and message.max.bytes.
*Type: integer* -delivery.report.only.error | P | true, false | false | low | Only provide delivery reports for failed messages.
*Type: boolean* -dr_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_cb())
*Type: see dedicated API* -dr_msg_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())
*Type: see dedicated API* -sticky.partitioning.linger.ms | P | 0 .. 900000 | 10 | low | Delay in milliseconds to wait to assign new sticky partitions for each topic. By default, set to double the time of linger.ms. To disable sticky behavior, set to 0. This behavior affects messages with the key NULL in all cases, and messages with key lengths of zero when the consistent_random partitioner is in use. These messages would otherwise be assigned randomly. A higher value allows for more effective batching of these messages.
*Type: integer* - - -## Topic configuration properties - -Property | C/P | Range | Default | Importance | Description ------------------------------------------|-----|-----------------|--------------:|------------| -------------------------- -request.required.acks | P | -1 .. 1000 | -1 | high | This field indicates the number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *-1* or *all*=Broker will block until message is committed by all in sync replicas (ISRs). If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail.
*Type: integer* -acks | P | -1 .. 1000 | -1 | high | Alias for `request.required.acks`: This field indicates the number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *-1* or *all*=Broker will block until message is committed by all in sync replicas (ISRs). If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail.
*Type: integer* -request.timeout.ms | P | 1 .. 900000 | 30000 | medium | The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0.
*Type: integer* -message.timeout.ms | P | 0 .. 2147483647 | 300000 | high | Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured.
*Type: integer* -delivery.timeout.ms | P | 0 .. 2147483647 | 300000 | high | Alias for `message.timeout.ms`: Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured.
*Type: integer* -queuing.strategy | P | fifo, lifo | fifo | low | **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Producer queuing strategy. FIFO preserves produce ordering, while LIFO prioritizes new messages.
*Type: enum value* -produce.offset.report | P | true, false | false | low | **DEPRECATED** No longer used.
*Type: boolean* -partitioner | P | | consistent_random | high | Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.), `fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), `fnv1a_random` - FNV-1a hash of key (NULL keys are randomly partitioned).
*Type: string* -partitioner_cb | P | | | low | Custom partitioner callback (set with rd_kafka_topic_conf_set_partitioner_cb())
*Type: see dedicated API* -msg_order_cmp | P | | | low | **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Message queue ordering comparator (set with rd_kafka_topic_conf_set_msg_order_cmp()). Also see `queuing.strategy`.
*Type: see dedicated API* -opaque | * | | | low | Application opaque (set with rd_kafka_topic_conf_set_opaque())
*Type: see dedicated API* -compression.codec | P | none, gzip, snappy, lz4, zstd, inherit | inherit | high | Compression codec to use for compressing message sets. inherit = inherit global compression.codec configuration.
*Type: enum value* -compression.type | P | none, gzip, snappy, lz4, zstd | none | medium | Alias for `compression.codec`: compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
*Type: enum value* -compression.level | P | -1 .. 12 | -1 | medium | Compression level parameter for algorithm selected by configuration property `compression.codec`. Higher values will result in better compression at the cost of more CPU usage. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level.
*Type: integer* -auto.commit.enable | C | true, false | true | low | **DEPRECATED** [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). Offsets will be written to broker or local file according to offset.store.method.
*Type: boolean* -enable.auto.commit | C | true, false | true | low | **DEPRECATED** Alias for `auto.commit.enable`: [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). Offsets will be written to broker or local file according to offset.store.method.
*Type: boolean* -auto.commit.interval.ms | C | 10 .. 86400000 | 60000 | high | [**LEGACY PROPERTY:** This setting is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `auto.commit.interval.ms` property must be used instead]. The frequency in milliseconds that the consumer offsets are committed (written) to offset storage.
*Type: integer* -auto.offset.reset | C | smallest, earliest, beginning, largest, latest, end, error | largest | high | Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'.
*Type: enum value* -offset.store.path | C | | . | low | **DEPRECATED** Path to local file for storing offsets. If the path is a directory a filename will be automatically generated in that directory based on the topic and partition. File-based offset storage will be removed in a future version.
*Type: string* -offset.store.sync.interval.ms | C | -1 .. 86400000 | -1 | low | **DEPRECATED** fsync() interval for the offset file, in milliseconds. Use -1 to disable syncing, and 0 for immediate sync after each write. File-based offset storage will be removed in a future version.
*Type: integer* -offset.store.method | C | file, broker | broker | low | **DEPRECATED** Offset commit store method: 'file' - DEPRECATED: local file store (offset.store.path, et.al), 'broker' - broker commit store (requires "group.id" to be configured and Apache Kafka 0.8.2 or later on the broker.).
*Type: enum value* -consume.callback.max.messages | C | 0 .. 1000000 | 0 | low | Maximum number of messages to dispatch in one `rd_kafka_consume_callback*()` call (0 = unlimited)
*Type: integer* - -### C/P legend: C = Consumer, P = Producer, * = both diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CONTRIBUTING.md b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CONTRIBUTING.md deleted file mode 100644 index b8cb2abf..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/CONTRIBUTING.md +++ /dev/null @@ -1,298 +0,0 @@ -# Contributing to librdkafka - -(This document is based on [curl's CONTRIBUTE.md](https://github.com/curl/curl/blob/master/docs/CONTRIBUTE.md) - thank you!) - -This document is intended to offer guidelines on how to best contribute to the -librdkafka project. This concerns new features as well as bug fixes and -general improvements. - -### License and copyright - -When contributing with code, you agree to put your changes and new code under -the same license librdkafka is already using unless stated and agreed -otherwise. - -When changing existing source code, you do not alter the copyright of the -original file(s). The copyright will still be owned by the original creator(s) -or those who have been assigned copyright by the original author(s). - -By submitting a patch to the librdkafka, you are assumed to have the right -to the code and to be allowed by your employer or whatever to hand over that -patch/code to us. We will credit you for your changes as far as possible, to -give credit but also to keep a trace back to who made what changes. Please -always provide us with your full real name when contributing! - -Official librdkafka project maintainer(s) assume ownership of all accepted -submissions. - -## Write a good patch - -### Follow code style - -When writing C code, follow the code style already established in -the project. Consistent style makes code easier to read and mistakes less -likely to happen. - -See the end of this document for the C style guide to use in librdkafka. - - -### Write Separate Changes - -It is annoying when you get a huge patch from someone that is said to fix 511 -odd problems, but discussions and opinions don't agree with 510 of them - or -509 of them were already fixed in a different way. Then the person merging -this change needs to extract the single interesting patch from somewhere -within the huge pile of source, and that gives a lot of extra work. - -Preferably, each fix that correct a problem should be in its own patch/commit -with its own description/commit message stating exactly what they correct so -that all changes can be selectively applied by the maintainer or other -interested parties. - -Also, separate changes enable bisecting much better when we track problems -and regression in the future. - -### Patch Against Recent Sources - -Please try to make your patches against latest master branch. - -### Test Cases - -Bugfixes should also include a new test case in the regression test suite -that verifies the bug is fixed. -Create a new tests/00-.c file and -try to reproduce the issue in its most simple form. -Verify that the test case fails for earlier versions and passes with your -bugfix in-place. - -New features and APIs should also result in an added test case. - -Submitted patches must pass all existing tests. -For more information on the test suite see [tests/README.md] - - - -## How to get your changes into the main sources - -File a [pull request on github](https://github.com/edenhill/librdkafka/pulls) - -Your change will be reviewed and discussed there and you will be -expected to correct flaws pointed out and update accordingly, or the change -risk stalling and eventually just get deleted without action. As a submitter -of a change, you are the owner of that change until it has been merged. - -Make sure to monitor your PR on github and answer questions and/or -fix nits/flaws. This is very important. We will take lack of replies as a -sign that you're not very anxious to get your patch accepted and we tend to -simply drop such changes. - -When you adjust your pull requests after review, please squash the -commits so that we can review the full updated version more easily -and keep history cleaner. - -For example: - - # Interactive rebase to let you squash/fixup commits - $ git rebase -i master - - # Mark fixes-on-fixes commits as 'fixup' (or just 'f') in the - # first column. These will be silently integrated into the - # previous commit, so make sure to move the fixup-commit to - # the line beneath the parent commit. - - # Since this probably rewrote the history of previously pushed - # commits you will need to make a force push, which is usually - # a bad idea but works good for pull requests. - $ git push --force origin your_feature_branch - - -### Write good commit messages - -A short guide to how to write good commit messages. - - ---- start ---- - [area]: [short line describing the main effect] [(#issuenumber)] - -- empty line -- - [full description, no wider than 72 columns that describe as much as - possible as to why this change is made, and possibly what things - it fixes and everything else that is related] - ---- stop ---- - -Example: - - cgrp: Restart query timer on all heartbeat failures (#10023) - - If unhandled errors were received in HeartbeatResponse - the cgrp could get stuck in a state where it would not - refresh its coordinator. - - -**Important**: Rebase your PR branch on top of master (`git rebase -i master`) - and squash interim commits (to make a clean and readable git history) - before pushing. Use force push to keep your history clean even after - the initial PR push. - -**Note**: Good PRs with bad commit messages or messy commit history - such as "fixed review comment", will be squashed up in - to a single commit with a proper commit message. - - -### Add changelog - -If the changes in the PR affects the end user in any way, such as for a user -visible bug fix, new feature, API or doc change, etc, a release changelog item -needs to be added to [CHANGELOG.md](CHANGELOG.md) for the next release. - -Add a single line to the appropriate section (Enhancements, Fixes, ..) -outlining the change, an issue number (if any), and your name or GitHub -user id for attribution. - -E.g.: -``` -## Enhancements - * Improve commit() async parameter documentation (Paul Nit, #123) -``` - - - -# librdkafka C style guide - -## Function and globals naming - -Use self-explanatory hierarchical snake-case naming. -Pretty much all symbols should start with `rd_kafka_`, followed by -their subsystem (e.g., `cgrp`, `broker`, `buf`, etc..), followed by an -action (e.g, `find`, `get`, `clear`, ..). - - -## Variable naming - -For existing types use the type prefix as variable name. -The type prefix is typically the first part of struct member fields. -Example: - - * `rd_kafka_broker_t` has field names starting with `rkb_..`, thus broker - variable names should be named `rkb` - - -For other types use reasonably concise but descriptive names. -`i` and `j` are typical int iterators. - -## Variable declaration - -Variables must be declared at the head of a scope, no in-line variable -declarations are allowed. - -## Indenting - -Use 8 spaces indent, same as the Linux kernel. -In emacs, use `c-set-style "linux`. -For C++, use Google's C++ style. - -## Comments - -Use `/* .. */` comments, not `// ..` - -For functions, use doxygen syntax, e.g.: - - /** - * @brief - * .. - * @returns - */ - - -Make sure to comment non-obvious code and situations where the full -context of an operation is not easily graspable. - -Also make sure to update existing comments when the code changes. - - -## Line length - -Try hard to keep line length below 80 characters, when this is not possible -exceed it with reason. - - -## Braces - -Braces go on the same line as their enveloping statement: - - int some_func (..) { - while (1) { - if (1) { - do something; - .. - } else { - do something else; - .. - } - } - - /* Single line scopes should not have braces */ - if (1) - hi(); - else if (2) - /* Say hello */ - hello(); - else - bye(); - - -## Spaces - -All expression parentheses should be prefixed and suffixed with a single space: - - int some_func (int a) { - - if (1) - ....; - - for (i = 0 ; i < 19 ; i++) { - - - } - } - - -Use space around operators: - - int a = 2; - - if (b >= 3) - c += 2; - -Except for these: - - d++; - --e; - - -## New block on new line - -New blocks should be on a new line: - - if (1) - new(); - else - old(); - - -## Parentheses - -Don't assume the reader knows C operator precedence by heart for complex -statements, add parentheses to ease readability. - - -## ifdef hell - -Avoid ifdef's as much as possible. -Platform support checking should be performed in configure.librdkafka. - - - - - -# librdkafka C++ style guide - -Follow [Google's C++ style guide](https://google.github.io/styleguide/cppguide.html) diff --git a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/Doxyfile b/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/Doxyfile deleted file mode 100644 index 65dcf523..00000000 --- a/quick-start-sample-codes/quick-start-sample-codes-nodejs/kafka-producer-nodejs/src/code/node_modules/node-rdkafka/deps/librdkafka/Doxyfile +++ /dev/null @@ -1,2375 +0,0 @@ -# Doxyfile 1.8.9.1 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv -# for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME = "librdkafka" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER = - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = "The Apache Kafka C/C++ client library" - -# With the PROJECT_LOGO tag one can specify a logo or an icon that is included -# in the documentation. The maximum height of the logo should not exceed 55 -# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy -# the logo to the output directory. - -#PROJECT_LOGO = kafka_logo.png - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = staging-docs - -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = YES - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = NO - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new -# page for each member. If set to NO, the documentation of a member will be part -# of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. - -ALIASES = "locality=@par Thread restriction:" -ALIASES += "locks=@par Lock restriction:" -# Automatically escape @REALM in CONFIGURATION.md -ALIASES += "REALM=\@REALM" - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = YES - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. -# -# Note: For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by putting a % sign in front of the word or -# globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = YES - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = NO - -# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = YES - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO, -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. If set to YES, local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO, only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO, these classes will be included in the various overviews. This option -# has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO, these declarations will be -# included in the documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO, these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES, the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = NO - -# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will -# append additional text to a page's title, such as Class Reference. If set to -# YES the compound reference will be hidden. -# The default value is: NO. - -HIDE_COMPOUND_REFERENCE= NO - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = NO - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo -# list. This list is created by putting \todo commands in the documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test -# list. This list is created by putting \test commands in the documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES, the -# list will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = NO - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = YES - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# Treat all warnings as errors. -WARN_AS_ERROR = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. -# The default value is: NO. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. -# Note: If this tag is empty the current directory is searched. - -INPUT = mainpage.doxy INTRODUCTION.md CONFIGURATION.md STATISTICS.md src/rdkafka.h src-cpp/rdkafkacpp.h - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: http://www.gnu.org/software/libiconv) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank the -# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, -# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, -# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, -# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, -# *.qsf, *.as and *.js. - -FILE_PATTERNS = - -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = NO - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = src - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see http://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the -# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the -# cost of reduced performance. This can be particularly helpful with template -# rich C++ code for which doxygen's built-in parser lacks the necessary type -# information. -# Note: The availability of this option depends on whether or not doxygen was -# compiled with the --with-libclang option. -# The default value is: NO. - -#CLANG_ASSISTED_PARSING = NO - -# If clang assisted parsing is enabled you can provide the compiler with command -# line options that you would normally use when invoking the compiler. Note that -# the include paths will already be set by doxygen for the files and directories -# specified with INPUT and INCLUDE_PATH. -# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. - -#CLANG_OPTIONS = - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefore more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra style sheet files is of importance (e.g. the last -# style sheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# http://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to YES can help to show when doxygen was last run and thus if the -# documentation is up to date. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: http://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "librdkafka documentation" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = se.edenhill.librdkafka - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = se.edenhill - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Magnus Edenhill - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler (hhc.exe). If non-empty, -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated -# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = se.edenhill.librdkafka - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- -# folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = se.edenhill.librdkafka - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = YES - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 1 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering -# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from http://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /