This repository has been archived by the owner on Jun 28, 2018. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 21
/
Copy pathreference.conf
81 lines (65 loc) · 2.84 KB
/
reference.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
hbase-journal {
# class name of the hbase journal plugin
class = "akka.persistence.hbase.journal.HBaseAsyncWriteJournal"
# Partitions will be used to avoid the "hot write region" problem.
# Set this to a number greater than the expected number of regions of your table.
# WARNING: It is not supported to change the partition count when already written to a table (could miss some records)
partition.count = 50
# All these settings will be set on the underlying Hadoop Configuration
hadoop-pass-through {
hbase.zookeeper.quorum = "localhost:2181"
zookeeper.znode.parent = "/hbase"
}
# Name of the table to be used by the journal
table = "akka_messages"
# Name of the family to be used by the journal
family = "message"
# When performing scans, how many items to we want to obtain per one next(N) call.
# This most notably affects the speed at which message replay progresses, fine tune this to match your cluster.
scan-batch-size = 200
# Dispatcher for fetching and replaying messages
replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher"
# Default dispatcher used by plugin
plugin-dispatcher = "akka-hbase-persistence-dispatcher"
# Internal events published to the eventStream.
publish-testing-events = off
# Write batch size configuration is self to akka-persistence, refer to:
# http://doc.akka.io/docs/akka/2.3.0-RC1/scala/persistence.html#batch-writes
# Settings are defined per-operation:
# akka.persistence.journal.max-message-batch-size
# akka.persistence.journal.max-confirmation-batch-size
# akka.persistence.journal.max-deletion-batch-size
#
# Flushes occur after every batch operation, for example for a batch 200 msgs, we'll flush right away, but may happen more often.
}
hadoop-snapshot-store {
# class name of snapshot plugin
class = "akka.persistence.hbase.snapshot.HadoopSnapshotStore"
# select your preferred implementation based on your needs
#
# * HBase - snapshots stored together with snapshots; Snapshot size limited by Int.MaxValue bytes (currently)
# impl class is "akka.persistence.hbase.snapshot.HBaseSnapshotter"
#
# * HDFS *deprecated, will be separate project* - can handle HUGE snapshots;
# Can be easily dumped to local filesystem using Hadoop CL tools
# impl class is "akka.persistence.hbase.snapshot.HdfsSnapshotter"
mode = "hbase"
hbase {
# Name of the table to be used by the journal
table = "akka_snapshots"
# Name of the family to be used by the journal
family = "snapshot"
}
# directory on HDFS where to store snapshots
snapshot-dir = "/akka-persistence-snapshots"
}
akka-hbase-persistence-dispatcher {
type = Dispatcher
executor = "thread-pool-executor"
thread-pool-executor {
core-pool-size-min = 2
core-pool-size-factor = 2.0
core-pool-size-max = 10
}
throughput = 200
}