-
Notifications
You must be signed in to change notification settings - Fork 187
/
write_api.py
419 lines (318 loc) · 15.9 KB
/
write_api.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
# coding: utf-8
import logging
import os
import re
from datetime import timedelta
from enum import Enum
from functools import reduce
from itertools import chain
from random import random
from time import sleep
from typing import Union, List
import rx
from rx import operators as ops, Observable
from rx.scheduler import ThreadPoolScheduler
from rx.subject import Subject
from influxdb_client import WritePrecision, WriteService
from influxdb_client.client.abstract_client import AbstractClient
from influxdb_client.client.write.point import Point, DEFAULT_WRITE_PRECISION, _ESCAPE_KEY
from influxdb_client.rest import ApiException
logger = logging.getLogger(__name__)
class WriteType(Enum):
batching = 1
asynchronous = 2
synchronous = 3
class WriteOptions(object):
def __init__(self, write_type: WriteType = WriteType.batching,
batch_size=1_000, flush_interval=1_000,
jitter_interval=0,
retry_interval=1_000,
write_scheduler=ThreadPoolScheduler(max_workers=1)) -> None:
"""
Creates write api configuration.
:param write_type: methods of write (batching, asynchronous, synchronous)
:param batch_size: the number of data point to collect in batch
:param flush_interval: flush data at least in this interval
:param jitter_interval: this is primarily to avoid large write spikes for users running a large number of
client instances ie, a jitter of 5s and flush duration 10s means flushes will happen every 10-15s.
:param retry_interval: the time to wait before retry unsuccessful write
:param write_scheduler:
"""
self.write_type = write_type
self.batch_size = batch_size
self.flush_interval = flush_interval
self.jitter_interval = jitter_interval
self.retry_interval = retry_interval
self.write_scheduler = write_scheduler
def __getstate__(self):
state = self.__dict__.copy()
# Remove write scheduler
del state['write_scheduler']
return state
def __setstate__(self, state):
self.__dict__.update(state)
# Init default write Scheduler
self.write_scheduler = ThreadPoolScheduler(max_workers=1)
SYNCHRONOUS = WriteOptions(write_type=WriteType.synchronous)
ASYNCHRONOUS = WriteOptions(write_type=WriteType.asynchronous)
class PointSettings(object):
def __init__(self, **default_tags) -> None:
"""
Creates point settings for write api.
:param default_tags: Default tags which will be added to each point written by api.
"""
self.defaultTags = dict()
for key, val in default_tags.items():
self.add_default_tag(key, val)
@staticmethod
def _get_value(value):
if value.startswith("${env."):
return os.environ.get(value[6:-1])
return value
def add_default_tag(self, key, value) -> None:
self.defaultTags[key] = self._get_value(value)
class _BatchItem(object):
def __init__(self, key, data, size=1) -> None:
self.key = key
self.data = data
self.size = size
pass
def __str__(self) -> str:
return '_BatchItem[key:\'{}\', \'{}\']' \
.format(str(self.key), str(self.size))
class _BatchItemKey(object):
def __init__(self, bucket, org, precision=DEFAULT_WRITE_PRECISION) -> None:
self.bucket = bucket
self.org = org
self.precision = precision
pass
def __hash__(self) -> int:
return hash((self.bucket, self.org, self.precision))
def __eq__(self, o: object) -> bool:
return isinstance(o, self.__class__) \
and self.bucket == o.bucket and self.org == o.org and self.precision == o.precision
def __str__(self) -> str:
return '_BatchItemKey[bucket:\'{}\', org:\'{}\', precision:\'{}\']' \
.format(str(self.bucket), str(self.org), str(self.precision))
class _BatchResponse(object):
def __init__(self, data: _BatchItem, exception=None):
self.data = data
self.exception = exception
pass
def __str__(self) -> str:
return '_BatchResponse[status:\'{}\', \'{}\']' \
.format("failed" if self.exception else "success", str(self.data))
def _body_reduce(batch_items):
return b'\n'.join(map(lambda batch_item: batch_item.data, batch_items))
class WriteApi(AbstractClient):
def __init__(self, influxdb_client, write_options: WriteOptions = WriteOptions(),
point_settings: PointSettings = PointSettings()) -> None:
self._influxdb_client = influxdb_client
self._write_service = WriteService(influxdb_client.api_client)
self._write_options = write_options
self._point_settings = point_settings
if influxdb_client.default_tags:
for key, value in influxdb_client.default_tags.items():
self._point_settings.add_default_tag(key, value)
if self._write_options.write_type is WriteType.batching:
# Define Subject that listen incoming data and produces writes into InfluxDB
self._subject = Subject()
self._disposable = self._subject.pipe(
# Split incoming data to windows by batch_size or flush_interval
ops.window_with_time_or_count(count=write_options.batch_size,
timespan=timedelta(milliseconds=write_options.flush_interval)),
# Map window into groups defined by 'organization', 'bucket' and 'precision'
ops.flat_map(lambda window: window.pipe(
# Group window by 'organization', 'bucket' and 'precision'
ops.group_by(lambda batch_item: batch_item.key),
# Create batch (concatenation line protocols by \n)
ops.map(lambda group: group.pipe(
ops.to_iterable(),
ops.map(lambda xs: _BatchItem(key=group.key, data=_body_reduce(xs), size=len(xs))))),
ops.merge_all())),
# Write data into InfluxDB (possibility to retry if its fail)
ops.filter(lambda batch: batch.size > 0),
ops.map(mapper=lambda batch: self._retryable(data=batch, delay=self._jitter_delay())),
ops.merge_all()) \
.subscribe(self._on_next, self._on_error, self._on_complete)
else:
self._subject = None
self._disposable = None
def write(self, bucket: str, org: str = None,
record: Union[
str, List['str'], Point, List['Point'], dict, List['dict'], bytes, List['bytes'], Observable] = None,
write_precision: WritePrecision = DEFAULT_WRITE_PRECISION, **kwargs) -> None:
"""
Writes time-series data into influxdb.
:param str org: specifies the destination organization for writes; take either the ID or Name interchangeably; if both orgID and org are specified, org takes precedence. (required)
:param str bucket: specifies the destination bucket for writes (required)
:param WritePrecision write_precision: specifies the precision for the unix timestamps within the body line-protocol
:param record: Points, line protocol, Pandas DataFrame, RxPY Observable to write
:param data_frame_measurement_name: name of measurement for writing Pandas DataFrame
:param data_frame_tag_columns: list of DataFrame columns which are tags, rest columns will be fields
"""
if org is None:
org = self._influxdb_client.org
if self._point_settings.defaultTags and record is not None:
for key, val in self._point_settings.defaultTags.items():
if isinstance(record, dict):
record.get("tags")[key] = val
else:
for r in record:
if isinstance(r, dict):
r.get("tags")[key] = val
elif isinstance(r, Point):
r.tag(key, val)
if self._write_options.write_type is WriteType.batching:
return self._write_batching(bucket, org, record,
write_precision, **kwargs)
final_string = self._serialize(record, write_precision, **kwargs)
_async_req = True if self._write_options.write_type == WriteType.asynchronous else False
return self._post_write(_async_req, bucket, org, final_string, write_precision)
def flush(self):
# TODO
pass
def __del__(self):
if self._subject:
self._subject.on_completed()
self._subject.dispose()
self._subject = None
# Wait for finish writing
while not self._disposable.is_disposed:
sleep(0.1)
if self._disposable:
self._disposable = None
pass
def _serialize(self, record, write_precision, **kwargs) -> bytes:
_result = b''
if isinstance(record, bytes):
_result = record
elif isinstance(record, str):
_result = record.encode("utf-8")
elif isinstance(record, Point):
_result = self._serialize(record.to_line_protocol(), write_precision, **kwargs)
elif isinstance(record, dict):
_result = self._serialize(Point.from_dict(record, write_precision=write_precision),
write_precision, **kwargs)
elif 'DataFrame' in type(record).__name__:
_data = self._data_frame_to_list_of_points(record, precision=write_precision, **kwargs)
_result = self._serialize(_data, write_precision, **kwargs)
elif isinstance(record, list):
_result = b'\n'.join([self._serialize(item, write_precision,
**kwargs) for item in record])
return _result
def _write_batching(self, bucket, org, data,
precision=DEFAULT_WRITE_PRECISION,
**kwargs):
_key = _BatchItemKey(bucket, org, precision)
if isinstance(data, bytes):
self._subject.on_next(_BatchItem(key=_key, data=data))
elif isinstance(data, str):
self._write_batching(bucket, org, data.encode("utf-8"),
precision, **kwargs)
elif isinstance(data, Point):
self._write_batching(bucket, org, data.to_line_protocol(),
precision, **kwargs)
elif isinstance(data, dict):
self._write_batching(bucket, org, Point.from_dict(data, write_precision=precision),
precision, **kwargs)
elif 'DataFrame' in type(data).__name__:
self._write_batching(bucket, org, self._data_frame_to_list_of_points(data, precision, **kwargs),
precision, **kwargs)
elif isinstance(data, list):
for item in data:
self._write_batching(bucket, org, item, precision, **kwargs)
elif isinstance(data, Observable):
data.subscribe(lambda it: self._write_batching(bucket, org, it, precision, **kwargs))
pass
return None
def _itertuples(self, data_frame):
cols = [data_frame.iloc[:, k] for k in range(len(data_frame.columns))]
return zip(data_frame.index, *cols)
def _data_frame_to_list_of_points(self, data_frame, precision, **kwargs):
from ..extras import pd, np
if not isinstance(data_frame, pd.DataFrame):
raise TypeError('Must be DataFrame, but type was: {0}.'
.format(type(data_frame)))
if 'data_frame_measurement_name' not in kwargs:
raise TypeError('"data_frame_measurement_name" is a Required Argument')
if isinstance(data_frame.index, pd.PeriodIndex):
data_frame.index = data_frame.index.to_timestamp()
else:
data_frame.index = pd.to_datetime(data_frame.index)
if data_frame.index.tzinfo is None:
data_frame.index = data_frame.index.tz_localize('UTC')
measurement_name = kwargs.get('data_frame_measurement_name')
data_frame_tag_columns = kwargs.get('data_frame_tag_columns')
data_frame_tag_columns = set(data_frame_tag_columns or [])
tags = []
fields = []
if self._point_settings.defaultTags:
for key, value in self._point_settings.defaultTags.items():
data_frame[key] = value
data_frame_tag_columns.add(key)
for index, (key, value) in enumerate(data_frame.dtypes.items()):
key = str(key).translate(_ESCAPE_KEY)
if key in data_frame_tag_columns:
tags.append(f"{key}={{p[{index + 1}].translate(_ESCAPE_KEY)}}")
elif issubclass(value.type, np.integer):
fields.append(f"{key}={{p[{index + 1}]}}i")
elif issubclass(value.type, (np.float, np.bool_)):
fields.append(f"{key}={{p[{index + 1}]}}")
else:
fields.append(f"{key}=\"{{p[{index + 1}].translate(_ESCAPE_KEY)}}\"")
fmt = (f'{measurement_name}', f'{"," if tags else ""}', ','.join(tags),
' ', ','.join(fields), ' {p[0].value}')
f = eval("lambda p: f'{}'".format(''.join(fmt)))
return list(map(f, self._itertuples(data_frame)))
def _http(self, batch_item: _BatchItem):
logger.debug("Write time series data into InfluxDB: %s", batch_item)
self._post_write(False, batch_item.key.bucket, batch_item.key.org, batch_item.data,
batch_item.key.precision)
logger.debug("Write request finished %s", batch_item)
return _BatchResponse(data=batch_item)
def _post_write(self, _async_req, bucket, org, body, precision):
return self._write_service.post_write(org=org, bucket=bucket, body=body, precision=precision,
async_req=_async_req, content_encoding="identity",
content_type="text/plain; charset=utf-8")
def _retryable(self, data: str, delay: timedelta):
return rx.of(data).pipe(
ops.subscribe_on(self._write_options.write_scheduler),
# use delay if its specified
ops.delay(duetime=delay, scheduler=self._write_options.write_scheduler),
# invoke http call
ops.map(lambda x: self._http(x)),
# if there is an error than retry
ops.catch(handler=lambda exception, source: self._retry_handler(exception, source, data)),
)
def _retry_handler(self, exception, source, data):
if isinstance(exception, ApiException):
if exception.status == 429 or exception.status == 503:
_delay = self._jitter_delay() + timedelta(milliseconds=self._write_options.retry_interval)
return self._retryable(data, delay=_delay)
return rx.just(_BatchResponse(exception=exception, data=data))
def _jitter_delay(self):
return timedelta(milliseconds=random() * self._write_options.jitter_interval)
@staticmethod
def _on_next(response: _BatchResponse):
if response.exception:
logger.error("The batch item wasn't processed successfully because: %s", response.exception)
else:
logger.debug("The batch item: %s was processed successfully.", response)
@staticmethod
def _on_error(ex):
logger.error("unexpected error during batching: %s", ex)
def _on_complete(self):
self._disposable.dispose()
logger.info("the batching processor was disposed")
def __getstate__(self):
state = self.__dict__.copy()
# Remove rx
del state['_subject']
del state['_disposable']
del state['_write_service']
return state
def __setstate__(self, state):
self.__dict__.update(state)
# Init Rx
self.__init__(self._influxdb_client, self._write_options, self._point_settings)