-
Notifications
You must be signed in to change notification settings - Fork 44
/
get_data_simple.py
executable file
·116 lines (98 loc) · 2.89 KB
/
get_data_simple.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
#!/usr/bin/env python
#
# This script shows the basics of getting data out of Sysdig Monitor by creating a
# very simple request that has no filter and no segmentation.
#
# The request queries for the average CPU across all of the instrumented hosts for
# the last 10 minutes, with 1 minute data granularity
#
import sys
from sdcclient import SdcClient
#
# Parse arguments
#
if len(sys.argv) != 2:
print(('usage: %s <sysdig-token>' % sys.argv[0]))
print('You can find your token at https://app.sysdigcloud.com/#/settings/user')
sys.exit(1)
sdc_token = sys.argv[1]
sdclient = SdcClient(sdc_token)
#
# List of metrics to export. Imagine a SQL data table, with key columns and value columns
# You just need to specify the ID for keys, and ID with aggregation for values.
#
metrics = [
# {"id": "container.id"},
# {"id": "agent.tag.env", "aggregations": {"time": "concat", "group": "concat"}},
{"id": "cpu.used.percent", "aggregations": {"time": "timeAvg", "group": "avg"}}
]
#
# Data filter or None if you want to see "everything"
#
filter = None
#
# Time window:
# - for "from A to B": start is equal to A, end is equal to B (expressed in seconds)
# - for "last X seconds": start is equal to -X, end is equal to 0
#
start = -600
end = 0
#
# Sampling time:
# - for time series: sampling is equal to the "width" of each data point (expressed in seconds)
# - for aggregated data (similar to bar charts, pie charts, tables, etc.): sampling is equal to 0
#
sampling = 60
#
# Load data
#
ok, res = sdclient.get_data(metrics, start, end, sampling, filter=filter)
#
# Show the result
#
if ok:
#
# Read response. The JSON looks like this:
#
# {
# start: timestamp,
# end: timestamp,
# data: [
# {
# t: timestamp,
# d: [ value1, value2, value3, ... ]
# },
# ...
# ]
# }
#
colLen = 25
#
# Print summary (what, when)
#
start = res['start']
end = res['end']
data = res['data']
print(('Data for %s from %d to %d' % (filter if filter else 'everything', start, end)))
print('')
#
# Print table headers
#
dataToPrint = ' '.join(
[str(x['id']).ljust(colLen) if len(str(x['id'])) < colLen else str(x['id'])[:(colLen - 3)].ljust(
colLen - 3) + '...' for x in metrics])
print(('%s %s' % ('timestamp'.ljust(colLen), dataToPrint) if sampling > 0 else dataToPrint))
print('')
#
# Print table body
#
for d in data:
timestamp = d['t'] if sampling > 0 else start
values = d['d']
dataToPrint = ' '.join(
[str(x).ljust(colLen) if len(str(x)) < colLen else str(x)[:(colLen - 3)].ljust(colLen - 3) + '...' for x in
values])
print(('%s %s' % (('<t: %d>' % (timestamp)).ljust(colLen), dataToPrint) if sampling > 0 else dataToPrint))
else:
print(res)
sys.exit(1)