kafka.py 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. from __future__ import absolute_import
  2. import atexit
  3. import copy
  4. import logging
  5. import socket
  6. import threading
  7. import time
  8. import weakref
  9. from .. import errors as Errors
  10. from ..client_async import KafkaClient, selectors
  11. from ..metrics import MetricConfig, Metrics
  12. from ..partitioner.default import DefaultPartitioner
  13. from ..protocol.message import Message, MessageSet
  14. from ..serializer import Serializer
  15. from ..structs import TopicPartition
  16. from .future import FutureRecordMetadata, FutureProduceResult
  17. from .record_accumulator import AtomicInteger, RecordAccumulator
  18. from .sender import Sender
  19. log = logging.getLogger(__name__)
  20. PRODUCER_CLIENT_ID_SEQUENCE = AtomicInteger()
  21. class KafkaProducer(object):
  22. """A Kafka client that publishes records to the Kafka cluster.
  23. The producer is thread safe and sharing a single producer instance across
  24. threads will generally be faster than having multiple instances.
  25. The producer consists of a pool of buffer space that holds records that
  26. haven't yet been transmitted to the server as well as a background I/O
  27. thread that is responsible for turning these records into requests and
  28. transmitting them to the cluster.
  29. :meth:`~kafka.KafkaProducer.send` is asynchronous. When called it adds the
  30. record to a buffer of pending record sends and immediately returns. This
  31. allows the producer to batch together individual records for efficiency.
  32. The 'acks' config controls the criteria under which requests are considered
  33. complete. The "all" setting will result in blocking on the full commit of
  34. the record, the slowest but most durable setting.
  35. If the request fails, the producer can automatically retry, unless
  36. 'retries' is configured to 0. Enabling retries also opens up the
  37. possibility of duplicates (see the documentation on message
  38. delivery semantics for details:
  39. http://kafka.apache.org/documentation.html#semantics
  40. ).
  41. The producer maintains buffers of unsent records for each partition. These
  42. buffers are of a size specified by the 'batch_size' config. Making this
  43. larger can result in more batching, but requires more memory (since we will
  44. generally have one of these buffers for each active partition).
  45. By default a buffer is available to send immediately even if there is
  46. additional unused space in the buffer. However if you want to reduce the
  47. number of requests you can set 'linger_ms' to something greater than 0.
  48. This will instruct the producer to wait up to that number of milliseconds
  49. before sending a request in hope that more records will arrive to fill up
  50. the same batch. This is analogous to Nagle's algorithm in TCP. Note that
  51. records that arrive close together in time will generally batch together
  52. even with linger_ms=0 so under heavy load batching will occur regardless of
  53. the linger configuration; however setting this to something larger than 0
  54. can lead to fewer, more efficient requests when not under maximal load at
  55. the cost of a small amount of latency.
  56. The buffer_memory controls the total amount of memory available to the
  57. producer for buffering. If records are sent faster than they can be
  58. transmitted to the server then this buffer space will be exhausted. When
  59. the buffer space is exhausted additional send calls will block.
  60. The key_serializer and value_serializer instruct how to turn the key and
  61. value objects the user provides into bytes.
  62. Keyword Arguments:
  63. bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
  64. strings) that the producer should contact to bootstrap initial
  65. cluster metadata. This does not have to be the full node list.
  66. It just needs to have at least one broker that will respond to a
  67. Metadata API Request. Default port is 9092. If no servers are
  68. specified, will default to localhost:9092.
  69. client_id (str): a name for this client. This string is passed in
  70. each request to servers and can be used to identify specific
  71. server-side log entries that correspond to this client.
  72. Default: 'kafka-python-producer-#' (appended with a unique number
  73. per instance)
  74. key_serializer (callable): used to convert user-supplied keys to bytes
  75. If not None, called as f(key), should return bytes. Default: None.
  76. value_serializer (callable): used to convert user-supplied message
  77. values to bytes. If not None, called as f(value), should return
  78. bytes. Default: None.
  79. acks (0, 1, 'all'): The number of acknowledgments the producer requires
  80. the leader to have received before considering a request complete.
  81. This controls the durability of records that are sent. The
  82. following settings are common:
  83. 0: Producer will not wait for any acknowledgment from the server.
  84. The message will immediately be added to the socket
  85. buffer and considered sent. No guarantee can be made that the
  86. server has received the record in this case, and the retries
  87. configuration will not take effect (as the client won't
  88. generally know of any failures). The offset given back for each
  89. record will always be set to -1.
  90. 1: Wait for leader to write the record to its local log only.
  91. Broker will respond without awaiting full acknowledgement from
  92. all followers. In this case should the leader fail immediately
  93. after acknowledging the record but before the followers have
  94. replicated it then the record will be lost.
  95. all: Wait for the full set of in-sync replicas to write the record.
  96. This guarantees that the record will not be lost as long as at
  97. least one in-sync replica remains alive. This is the strongest
  98. available guarantee.
  99. If unset, defaults to acks=1.
  100. compression_type (str): The compression type for all data generated by
  101. the producer. Valid values are 'gzip', 'snappy', 'lz4', or None.
  102. Compression is of full batches of data, so the efficacy of batching
  103. will also impact the compression ratio (more batching means better
  104. compression). Default: None.
  105. retries (int): Setting a value greater than zero will cause the client
  106. to resend any record whose send fails with a potentially transient
  107. error. Note that this retry is no different than if the client
  108. resent the record upon receiving the error. Allowing retries
  109. without setting max_in_flight_requests_per_connection to 1 will
  110. potentially change the ordering of records because if two batches
  111. are sent to a single partition, and the first fails and is retried
  112. but the second succeeds, then the records in the second batch may
  113. appear first.
  114. Default: 0.
  115. batch_size (int): Requests sent to brokers will contain multiple
  116. batches, one for each partition with data available to be sent.
  117. A small batch size will make batching less common and may reduce
  118. throughput (a batch size of zero will disable batching entirely).
  119. Default: 16384
  120. linger_ms (int): The producer groups together any records that arrive
  121. in between request transmissions into a single batched request.
  122. Normally this occurs only under load when records arrive faster
  123. than they can be sent out. However in some circumstances the client
  124. may want to reduce the number of requests even under moderate load.
  125. This setting accomplishes this by adding a small amount of
  126. artificial delay; that is, rather than immediately sending out a
  127. record the producer will wait for up to the given delay to allow
  128. other records to be sent so that the sends can be batched together.
  129. This can be thought of as analogous to Nagle's algorithm in TCP.
  130. This setting gives the upper bound on the delay for batching: once
  131. we get batch_size worth of records for a partition it will be sent
  132. immediately regardless of this setting, however if we have fewer
  133. than this many bytes accumulated for this partition we will
  134. 'linger' for the specified time waiting for more records to show
  135. up. This setting defaults to 0 (i.e. no delay). Setting linger_ms=5
  136. would have the effect of reducing the number of requests sent but
  137. would add up to 5ms of latency to records sent in the absense of
  138. load. Default: 0.
  139. partitioner (callable): Callable used to determine which partition
  140. each message is assigned to. Called (after key serialization):
  141. partitioner(key_bytes, all_partitions, available_partitions).
  142. The default partitioner implementation hashes each non-None key
  143. using the same murmur2 algorithm as the java client so that
  144. messages with the same key are assigned to the same partition.
  145. When a key is None, the message is delivered to a random partition
  146. (filtered to partitions with available leaders only, if possible).
  147. buffer_memory (int): The total bytes of memory the producer should use
  148. to buffer records waiting to be sent to the server. If records are
  149. sent faster than they can be delivered to the server the producer
  150. will block up to max_block_ms, raising an exception on timeout.
  151. In the current implementation, this setting is an approximation.
  152. Default: 33554432 (32MB)
  153. max_block_ms (int): Number of milliseconds to block during
  154. :meth:`~kafka.KafkaProducer.send` and
  155. :meth:`~kafka.KafkaProducer.partitions_for`. These methods can be
  156. blocked either because the buffer is full or metadata unavailable.
  157. Blocking in the user-supplied serializers or partitioner will not be
  158. counted against this timeout. Default: 60000.
  159. max_request_size (int): The maximum size of a request. This is also
  160. effectively a cap on the maximum record size. Note that the server
  161. has its own cap on record size which may be different from this.
  162. This setting will limit the number of record batches the producer
  163. will send in a single request to avoid sending huge requests.
  164. Default: 1048576.
  165. metadata_max_age_ms (int): The period of time in milliseconds after
  166. which we force a refresh of metadata even if we haven't seen any
  167. partition leadership changes to proactively discover any new
  168. brokers or partitions. Default: 300000
  169. retry_backoff_ms (int): Milliseconds to backoff when retrying on
  170. errors. Default: 100.
  171. request_timeout_ms (int): Client request timeout in milliseconds.
  172. Default: 30000.
  173. receive_buffer_bytes (int): The size of the TCP receive buffer
  174. (SO_RCVBUF) to use when reading data. Default: None (relies on
  175. system defaults). Java client defaults to 32768.
  176. send_buffer_bytes (int): The size of the TCP send buffer
  177. (SO_SNDBUF) to use when sending data. Default: None (relies on
  178. system defaults). Java client defaults to 131072.
  179. socket_options (list): List of tuple-arguments to socket.setsockopt
  180. to apply to broker connection sockets. Default:
  181. [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
  182. reconnect_backoff_ms (int): The amount of time in milliseconds to
  183. wait before attempting to reconnect to a given host.
  184. Default: 50.
  185. reconnect_backoff_max_ms (int): The maximum amount of time in
  186. milliseconds to wait when reconnecting to a broker that has
  187. repeatedly failed to connect. If provided, the backoff per host
  188. will increase exponentially for each consecutive connection
  189. failure, up to this maximum. To avoid connection storms, a
  190. randomization factor of 0.2 will be applied to the backoff
  191. resulting in a random range between 20% below and 20% above
  192. the computed value. Default: 1000.
  193. max_in_flight_requests_per_connection (int): Requests are pipelined
  194. to kafka brokers up to this number of maximum requests per
  195. broker connection. Note that if this setting is set to be greater
  196. than 1 and there are failed sends, there is a risk of message
  197. re-ordering due to retries (i.e., if retries are enabled).
  198. Default: 5.
  199. security_protocol (str): Protocol used to communicate with brokers.
  200. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
  201. Default: PLAINTEXT.
  202. ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
  203. socket connections. If provided, all other ssl_* configurations
  204. will be ignored. Default: None.
  205. ssl_check_hostname (bool): flag to configure whether ssl handshake
  206. should verify that the certificate matches the brokers hostname.
  207. default: true.
  208. ssl_cafile (str): optional filename of ca file to use in certificate
  209. veriication. default: none.
  210. ssl_certfile (str): optional filename of file in pem format containing
  211. the client certificate, as well as any ca certificates needed to
  212. establish the certificate's authenticity. default: none.
  213. ssl_keyfile (str): optional filename containing the client private key.
  214. default: none.
  215. ssl_password (str): optional password to be used when loading the
  216. certificate chain. default: none.
  217. ssl_crlfile (str): optional filename containing the CRL to check for
  218. certificate expiration. By default, no CRL check is done. When
  219. providing a file, only the leaf certificate will be checked against
  220. this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
  221. default: none.
  222. api_version (tuple): Specify which Kafka API version to use. If set to
  223. None, the client will attempt to infer the broker version by probing
  224. various APIs. For a full list of supported versions, see
  225. KafkaClient.API_VERSIONS. Default: None
  226. api_version_auto_timeout_ms (int): number of milliseconds to throw a
  227. timeout exception from the constructor when checking the broker
  228. api version. Only applies if api_version set to 'auto'
  229. metric_reporters (list): A list of classes to use as metrics reporters.
  230. Implementing the AbstractMetricsReporter interface allows plugging
  231. in classes that will be notified of new metric creation. Default: []
  232. metrics_num_samples (int): The number of samples maintained to compute
  233. metrics. Default: 2
  234. metrics_sample_window_ms (int): The maximum age in milliseconds of
  235. samples used to compute metrics. Default: 30000
  236. selector (selectors.BaseSelector): Provide a specific selector
  237. implementation to use for I/O multiplexing.
  238. Default: selectors.DefaultSelector
  239. sasl_mechanism (str): string picking sasl mechanism when security_protocol
  240. is SASL_PLAINTEXT or SASL_SSL. Currently only PLAIN is supported.
  241. Default: None
  242. sasl_plain_username (str): username for sasl PLAIN authentication.
  243. Default: None
  244. sasl_plain_password (str): password for sasl PLAIN authentication.
  245. Default: None
  246. Note:
  247. Configuration parameters are described in more detail at
  248. https://kafka.apache.org/0100/configuration.html#producerconfigs
  249. """
  250. DEFAULT_CONFIG = {
  251. 'bootstrap_servers': 'localhost',
  252. 'client_id': None,
  253. 'key_serializer': None,
  254. 'value_serializer': None,
  255. 'acks': 1,
  256. 'compression_type': None,
  257. 'retries': 0,
  258. 'batch_size': 16384,
  259. 'linger_ms': 0,
  260. 'partitioner': DefaultPartitioner(),
  261. 'buffer_memory': 33554432,
  262. 'connections_max_idle_ms': 9 * 60 * 1000,
  263. 'max_block_ms': 60000,
  264. 'max_request_size': 1048576,
  265. 'metadata_max_age_ms': 300000,
  266. 'retry_backoff_ms': 100,
  267. 'request_timeout_ms': 30000,
  268. 'receive_buffer_bytes': None,
  269. 'send_buffer_bytes': None,
  270. 'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
  271. 'reconnect_backoff_ms': 50,
  272. 'reconnect_backoff_max': 1000,
  273. 'max_in_flight_requests_per_connection': 5,
  274. 'security_protocol': 'PLAINTEXT',
  275. 'ssl_context': None,
  276. 'ssl_check_hostname': True,
  277. 'ssl_cafile': None,
  278. 'ssl_certfile': None,
  279. 'ssl_keyfile': None,
  280. 'ssl_crlfile': None,
  281. 'ssl_password': None,
  282. 'api_version': None,
  283. 'api_version_auto_timeout_ms': 2000,
  284. 'metric_reporters': [],
  285. 'metrics_num_samples': 2,
  286. 'metrics_sample_window_ms': 30000,
  287. 'selector': selectors.DefaultSelector,
  288. 'sasl_mechanism': None,
  289. 'sasl_plain_username': None,
  290. 'sasl_plain_password': None,
  291. }
  292. def __init__(self, **configs):
  293. log.debug("Starting the Kafka producer") # trace
  294. self.config = copy.copy(self.DEFAULT_CONFIG)
  295. for key in self.config:
  296. if key in configs:
  297. self.config[key] = configs.pop(key)
  298. # Only check for extra config keys in top-level class
  299. assert not configs, 'Unrecognized configs: %s' % configs
  300. if self.config['client_id'] is None:
  301. self.config['client_id'] = 'kafka-python-producer-%s' % \
  302. PRODUCER_CLIENT_ID_SEQUENCE.increment()
  303. if self.config['acks'] == 'all':
  304. self.config['acks'] = -1
  305. # api_version was previously a str. accept old format for now
  306. if isinstance(self.config['api_version'], str):
  307. deprecated = self.config['api_version']
  308. if deprecated == 'auto':
  309. self.config['api_version'] = None
  310. else:
  311. self.config['api_version'] = tuple(map(int, deprecated.split('.')))
  312. log.warning('use api_version=%s [tuple] -- "%s" as str is deprecated',
  313. str(self.config['api_version']), deprecated)
  314. # Configure metrics
  315. metrics_tags = {'client-id': self.config['client_id']}
  316. metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
  317. time_window_ms=self.config['metrics_sample_window_ms'],
  318. tags=metrics_tags)
  319. reporters = [reporter() for reporter in self.config['metric_reporters']]
  320. self._metrics = Metrics(metric_config, reporters)
  321. client = KafkaClient(metrics=self._metrics, metric_group_prefix='producer',
  322. **self.config)
  323. # Get auto-discovered version from client if necessary
  324. if self.config['api_version'] is None:
  325. self.config['api_version'] = client.config['api_version']
  326. if self.config['compression_type'] == 'lz4':
  327. assert self.config['api_version'] >= (0, 8, 2), 'LZ4 Requires >= Kafka 0.8.2 Brokers'
  328. message_version = 1 if self.config['api_version'] >= (0, 10) else 0
  329. self._accumulator = RecordAccumulator(message_version=message_version, metrics=self._metrics, **self.config)
  330. self._metadata = client.cluster
  331. guarantee_message_order = bool(self.config['max_in_flight_requests_per_connection'] == 1)
  332. self._sender = Sender(client, self._metadata,
  333. self._accumulator, self._metrics,
  334. guarantee_message_order=guarantee_message_order,
  335. **self.config)
  336. self._sender.daemon = True
  337. self._sender.start()
  338. self._closed = False
  339. self._cleanup = self._cleanup_factory()
  340. atexit.register(self._cleanup)
  341. log.debug("Kafka producer started")
  342. def _cleanup_factory(self):
  343. """Build a cleanup clojure that doesn't increase our ref count"""
  344. _self = weakref.proxy(self)
  345. def wrapper():
  346. try:
  347. _self.close(timeout=0)
  348. except (ReferenceError, AttributeError):
  349. pass
  350. return wrapper
  351. def _unregister_cleanup(self):
  352. if getattr(self, '_cleanup', None):
  353. if hasattr(atexit, 'unregister'):
  354. atexit.unregister(self._cleanup) # pylint: disable=no-member
  355. # py2 requires removing from private attribute...
  356. else:
  357. # ValueError on list.remove() if the exithandler no longer exists
  358. # but that is fine here
  359. try:
  360. atexit._exithandlers.remove( # pylint: disable=no-member
  361. (self._cleanup, (), {}))
  362. except ValueError:
  363. pass
  364. self._cleanup = None
  365. def __del__(self):
  366. self.close(timeout=0)
  367. def close(self, timeout=None):
  368. """Close this producer.
  369. Arguments:
  370. timeout (float, optional): timeout in seconds to wait for completion.
  371. """
  372. # drop our atexit handler now to avoid leaks
  373. self._unregister_cleanup()
  374. if not hasattr(self, '_closed') or self._closed:
  375. log.info('Kafka producer closed')
  376. return
  377. if timeout is None:
  378. # threading.TIMEOUT_MAX is available in Python3.3+
  379. timeout = getattr(threading, 'TIMEOUT_MAX', 999999999)
  380. if getattr(threading, 'TIMEOUT_MAX', False):
  381. assert 0 <= timeout <= getattr(threading, 'TIMEOUT_MAX')
  382. else:
  383. assert timeout >= 0
  384. log.info("Closing the Kafka producer with %s secs timeout.", timeout)
  385. #first_exception = AtomicReference() # this will keep track of the first encountered exception
  386. invoked_from_callback = bool(threading.current_thread() is self._sender)
  387. if timeout > 0:
  388. if invoked_from_callback:
  389. log.warning("Overriding close timeout %s secs to 0 in order to"
  390. " prevent useless blocking due to self-join. This"
  391. " means you have incorrectly invoked close with a"
  392. " non-zero timeout from the producer call-back.",
  393. timeout)
  394. else:
  395. # Try to close gracefully.
  396. if self._sender is not None:
  397. self._sender.initiate_close()
  398. self._sender.join(timeout)
  399. if self._sender is not None and self._sender.is_alive():
  400. log.info("Proceeding to force close the producer since pending"
  401. " requests could not be completed within timeout %s.",
  402. timeout)
  403. self._sender.force_close()
  404. # Only join the sender thread when not calling from callback.
  405. if not invoked_from_callback:
  406. self._sender.join()
  407. self._metrics.close()
  408. try:
  409. self.config['key_serializer'].close()
  410. except AttributeError:
  411. pass
  412. try:
  413. self.config['value_serializer'].close()
  414. except AttributeError:
  415. pass
  416. self._closed = True
  417. log.debug("The Kafka producer has closed.")
  418. def partitions_for(self, topic):
  419. """Returns set of all known partitions for the topic."""
  420. max_wait = self.config['max_block_ms'] / 1000.0
  421. return self._wait_on_metadata(topic, max_wait)
  422. def send(self, topic, value=None, key=None, partition=None, timestamp_ms=None):
  423. """Publish a message to a topic.
  424. Arguments:
  425. topic (str): topic where the message will be published
  426. value (optional): message value. Must be type bytes, or be
  427. serializable to bytes via configured value_serializer. If value
  428. is None, key is required and message acts as a 'delete'.
  429. See kafka compaction documentation for more details:
  430. http://kafka.apache.org/documentation.html#compaction
  431. (compaction requires kafka >= 0.8.1)
  432. partition (int, optional): optionally specify a partition. If not
  433. set, the partition will be selected using the configured
  434. 'partitioner'.
  435. key (optional): a key to associate with the message. Can be used to
  436. determine which partition to send the message to. If partition
  437. is None (and producer's partitioner config is left as default),
  438. then messages with the same key will be delivered to the same
  439. partition (but if key is None, partition is chosen randomly).
  440. Must be type bytes, or be serializable to bytes via configured
  441. key_serializer.
  442. timestamp_ms (int, optional): epoch milliseconds (from Jan 1 1970 UTC)
  443. to use as the message timestamp. Defaults to current time.
  444. Returns:
  445. FutureRecordMetadata: resolves to RecordMetadata
  446. Raises:
  447. KafkaTimeoutError: if unable to fetch topic metadata, or unable
  448. to obtain memory buffer prior to configured max_block_ms
  449. """
  450. assert value is not None or self.config['api_version'] >= (0, 8, 1), (
  451. 'Null messages require kafka >= 0.8.1')
  452. assert not (value is None and key is None), 'Need at least one: key or value'
  453. key_bytes = value_bytes = None
  454. try:
  455. # first make sure the metadata for the topic is
  456. # available
  457. self._wait_on_metadata(topic, self.config['max_block_ms'] / 1000.0)
  458. key_bytes = self._serialize(
  459. self.config['key_serializer'],
  460. topic, key)
  461. value_bytes = self._serialize(
  462. self.config['value_serializer'],
  463. topic, value)
  464. partition = self._partition(topic, partition, key, value,
  465. key_bytes, value_bytes)
  466. message_size = MessageSet.HEADER_SIZE + Message.HEADER_SIZE
  467. if key_bytes is not None:
  468. message_size += len(key_bytes)
  469. if value_bytes is not None:
  470. message_size += len(value_bytes)
  471. self._ensure_valid_record_size(message_size)
  472. tp = TopicPartition(topic, partition)
  473. if timestamp_ms is None:
  474. timestamp_ms = int(time.time() * 1000)
  475. log.debug("Sending (key=%r value=%r) to %s", key, value, tp)
  476. result = self._accumulator.append(tp, timestamp_ms,
  477. key_bytes, value_bytes,
  478. self.config['max_block_ms'])
  479. future, batch_is_full, new_batch_created = result
  480. if batch_is_full or new_batch_created:
  481. log.debug("Waking up the sender since %s is either full or"
  482. " getting a new batch", tp)
  483. self._sender.wakeup()
  484. return future
  485. # handling exceptions and record the errors;
  486. # for API exceptions return them in the future,
  487. # for other exceptions raise directly
  488. except Errors.KafkaTimeoutError:
  489. raise
  490. except AssertionError:
  491. raise
  492. except Exception as e:
  493. log.debug("Exception occurred during message send: %s", e)
  494. return FutureRecordMetadata(
  495. FutureProduceResult(TopicPartition(topic, partition)),
  496. -1, None, None,
  497. len(key_bytes) if key_bytes is not None else -1,
  498. len(value_bytes) if value_bytes is not None else -1
  499. ).failure(e)
  500. def flush(self, timeout=None):
  501. """
  502. Invoking this method makes all buffered records immediately available
  503. to send (even if linger_ms is greater than 0) and blocks on the
  504. completion of the requests associated with these records. The
  505. post-condition of :meth:`~kafka.KafkaProducer.flush` is that any
  506. previously sent record will have completed
  507. (e.g. Future.is_done() == True). A request is considered completed when
  508. either it is successfully acknowledged according to the 'acks'
  509. configuration for the producer, or it results in an error.
  510. Other threads can continue sending messages while one thread is blocked
  511. waiting for a flush call to complete; however, no guarantee is made
  512. about the completion of messages sent after the flush call begins.
  513. Arguments:
  514. timeout (float, optional): timeout in seconds to wait for completion.
  515. Raises:
  516. KafkaTimeoutError: failure to flush buffered records within the
  517. provided timeout
  518. """
  519. log.debug("Flushing accumulated records in producer.") # trace
  520. self._accumulator.begin_flush()
  521. self._sender.wakeup()
  522. self._accumulator.await_flush_completion(timeout=timeout)
  523. def _ensure_valid_record_size(self, size):
  524. """Validate that the record size isn't too large."""
  525. if size > self.config['max_request_size']:
  526. raise Errors.MessageSizeTooLargeError(
  527. "The message is %d bytes when serialized which is larger than"
  528. " the maximum request size you have configured with the"
  529. " max_request_size configuration" % size)
  530. if size > self.config['buffer_memory']:
  531. raise Errors.MessageSizeTooLargeError(
  532. "The message is %d bytes when serialized which is larger than"
  533. " the total memory buffer you have configured with the"
  534. " buffer_memory configuration." % size)
  535. def _wait_on_metadata(self, topic, max_wait):
  536. """
  537. Wait for cluster metadata including partitions for the given topic to
  538. be available.
  539. Arguments:
  540. topic (str): topic we want metadata for
  541. max_wait (float): maximum time in secs for waiting on the metadata
  542. Returns:
  543. set: partition ids for the topic
  544. Raises:
  545. KafkaTimeoutError: if partitions for topic were not obtained before
  546. specified max_wait timeout
  547. """
  548. # add topic to metadata topic list if it is not there already.
  549. self._sender.add_topic(topic)
  550. begin = time.time()
  551. elapsed = 0.0
  552. metadata_event = None
  553. while True:
  554. partitions = self._metadata.partitions_for_topic(topic)
  555. if partitions is not None:
  556. return partitions
  557. if not metadata_event:
  558. metadata_event = threading.Event()
  559. log.debug("Requesting metadata update for topic %s", topic)
  560. metadata_event.clear()
  561. future = self._metadata.request_update()
  562. future.add_both(lambda e, *args: e.set(), metadata_event)
  563. self._sender.wakeup()
  564. metadata_event.wait(max_wait - elapsed)
  565. elapsed = time.time() - begin
  566. if not metadata_event.is_set():
  567. raise Errors.KafkaTimeoutError(
  568. "Failed to update metadata after %.1f secs." % max_wait)
  569. elif topic in self._metadata.unauthorized_topics:
  570. raise Errors.TopicAuthorizationFailedError(topic)
  571. else:
  572. log.debug("_wait_on_metadata woke after %s secs.", elapsed)
  573. def _serialize(self, f, topic, data):
  574. if not f:
  575. return data
  576. if isinstance(f, Serializer):
  577. return f.serialize(topic, data)
  578. return f(data)
  579. def _partition(self, topic, partition, key, value,
  580. serialized_key, serialized_value):
  581. if partition is not None:
  582. assert partition >= 0
  583. assert partition in self._metadata.partitions_for_topic(topic), 'Unrecognized partition'
  584. return partition
  585. all_partitions = sorted(self._metadata.partitions_for_topic(topic))
  586. available = list(self._metadata.available_partitions_for_topic(topic))
  587. return self.config['partitioner'](serialized_key,
  588. all_partitions,
  589. available)
  590. def metrics(self, raw=False):
  591. """Warning: this is an unstable interface.
  592. It may change in future releases without warning"""
  593. if raw:
  594. return self._metrics.metrics
  595. metrics = {}
  596. for k, v in self._metrics.metrics.items():
  597. if k.group not in metrics:
  598. metrics[k.group] = {}
  599. if k.name not in metrics[k.group]:
  600. metrics[k.group][k.name] = {}
  601. metrics[k.group][k.name] = v.value()
  602. return metrics