Coverage Summary for Class: DataSourceWithCache (org.ethereum.datasource)
Class |
Class, %
|
Method, %
|
Line, %
|
DataSourceWithCache |
0%
(0/1)
|
0%
(0/20)
|
0%
(0/97)
|
1 /*
2 * This file is part of RskJ
3 * Copyright (C) 2018 RSK Labs Ltd.
4 * (derived from ethereumJ library, Copyright (c) 2016 <ether.camp>)
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation, either version 3 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19 package org.ethereum.datasource;
20
21 import co.rsk.util.FormatUtils;
22 import co.rsk.util.MaxSizeHashMap;
23 import org.ethereum.db.ByteArrayWrapper;
24 import org.ethereum.util.ByteUtil;
25 import org.slf4j.Logger;
26 import org.slf4j.LoggerFactory;
27
28 import java.util.*;
29 import java.util.concurrent.locks.ReentrantReadWriteLock;
30 import java.util.stream.Collectors;
31 import java.util.stream.Stream;
32
33 public class DataSourceWithCache implements KeyValueDataSource {
34 private static final Logger logger = LoggerFactory.getLogger(DataSourceWithCache.class);
35
36 private final int cacheSize;
37 private final KeyValueDataSource base;
38 private final Map<ByteArrayWrapper, byte[]> uncommittedCache;
39 private final Map<ByteArrayWrapper, byte[]> committedCache;
40
41 private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
42
43 public DataSourceWithCache(KeyValueDataSource base, int cacheSize) {
44 this.cacheSize = cacheSize;
45 this.base = base;
46 this.uncommittedCache = new LinkedHashMap<>(cacheSize / 8, (float)0.75, false);
47 this.committedCache = Collections.synchronizedMap(new MaxSizeHashMap<>(cacheSize, true));
48 }
49
50 @Override
51 public byte[] get(byte[] key) {
52 Objects.requireNonNull(key);
53 ByteArrayWrapper wrappedKey = ByteUtil.wrap(key);
54 byte[] value;
55
56 this.lock.readLock().lock();
57
58 try {
59 if (committedCache.containsKey(wrappedKey)) {
60 return committedCache.get(wrappedKey);
61 }
62
63 if (uncommittedCache.containsKey(wrappedKey)) {
64 return uncommittedCache.get(wrappedKey);
65 }
66
67 value = base.get(key);
68
69 //null value, as expected, is allowed here to be stored in committedCache
70 committedCache.put(wrappedKey, value);
71 }
72 finally {
73 this.lock.readLock().unlock();
74 }
75
76 return value;
77 }
78
79 @Override
80 public byte[] put(byte[] key, byte[] value) {
81 ByteArrayWrapper wrappedKey = ByteUtil.wrap(key);
82
83 return put(wrappedKey, value);
84 }
85
86 private byte[] put(ByteArrayWrapper wrappedKey, byte[] value) {
87 Objects.requireNonNull(value);
88
89 this.lock.writeLock().lock();
90
91 try {
92 // here I could check for equal data or just move to the uncommittedCache.
93 byte[] priorValue = committedCache.get(wrappedKey);
94
95 if (priorValue != null && Arrays.equals(priorValue, value)) {
96 return value;
97 }
98
99 committedCache.remove(wrappedKey);
100 this.putKeyValue(wrappedKey, value);
101 }
102 finally {
103 this.lock.writeLock().unlock();
104 }
105
106 return value;
107 }
108
109 private void putKeyValue(ByteArrayWrapper key, byte[] value) {
110 uncommittedCache.put(key, value);
111
112 if (uncommittedCache.size() > cacheSize) {
113 this.flush();
114 }
115 }
116
117 @Override
118 public void delete(byte[] key) {
119 delete(ByteUtil.wrap(key));
120 }
121
122 private void delete(ByteArrayWrapper wrappedKey) {
123 this.lock.writeLock().lock();
124
125 try {
126 // always mark for deletion if we don't know the state in the underlying store
127 if (!committedCache.containsKey(wrappedKey)) {
128 this.putKeyValue(wrappedKey, null);
129 return;
130 }
131
132 byte[] valueToRemove = committedCache.get(wrappedKey);
133
134 // a null value means we know for a fact that the key doesn't exist in the underlying store, so this is a noop
135 if (valueToRemove != null) {
136 this.putKeyValue(wrappedKey, null);
137 committedCache.remove(wrappedKey);
138 }
139 }
140 finally {
141 this.lock.writeLock().unlock();
142 }
143 }
144
145 @Override
146 public Set<byte[]> keys() {
147 Stream<ByteArrayWrapper> baseKeys;
148 Stream<ByteArrayWrapper> committedKeys;
149 Stream<ByteArrayWrapper> uncommittedKeys;
150 Set<ByteArrayWrapper> uncommittedKeysToRemove;
151
152 this.lock.readLock().lock();
153
154 try {
155 baseKeys = base.keys().stream().map(ByteArrayWrapper::new);
156 committedKeys = committedCache.entrySet().stream()
157 .filter(e -> e.getValue() != null)
158 .map(Map.Entry::getKey);
159 uncommittedKeys = uncommittedCache.entrySet().stream()
160 .filter(e -> e.getValue() != null)
161 .map(Map.Entry::getKey);
162 uncommittedKeysToRemove = uncommittedCache.entrySet().stream()
163 .filter(e -> e.getValue() == null)
164 .map(Map.Entry::getKey)
165 .collect(Collectors.toSet());
166 }
167 finally {
168 this.lock.readLock().unlock();
169 }
170
171 Set<ByteArrayWrapper> knownKeys = Stream.concat(Stream.concat(baseKeys, committedKeys), uncommittedKeys)
172 .collect(Collectors.toSet());
173 knownKeys.removeAll(uncommittedKeysToRemove);
174
175 // note that toSet doesn't work with byte[], so we have to do this extra step
176 return knownKeys.stream()
177 .map(ByteArrayWrapper::getData)
178 .collect(Collectors.toSet());
179 }
180
181 @Override
182 public void updateBatch(Map<ByteArrayWrapper, byte[]> rows, Set<ByteArrayWrapper> keysToRemove) {
183 if (rows.containsKey(null) || rows.containsValue(null)) {
184 throw new IllegalArgumentException("Cannot update null values");
185 }
186
187 // remove overlapping entries
188 rows.keySet().removeAll(keysToRemove);
189
190 this.lock.writeLock().lock();
191
192 try {
193 rows.forEach(this::put);
194 keysToRemove.forEach(this::delete);
195 }
196 finally {
197 this.lock.writeLock().unlock();
198 }
199 }
200
201 @Override
202 public void flush() {
203 Map<ByteArrayWrapper, byte[]> uncommittedBatch = new LinkedHashMap<>();
204
205 this.lock.writeLock().lock();
206
207 try {
208 long saveTime = System.nanoTime();
209
210 this.uncommittedCache.forEach((key, value) -> {
211 if (value != null) {
212 uncommittedBatch.put(key, value);
213 }
214 });
215
216 Set<ByteArrayWrapper> uncommittedKeysToRemove = uncommittedCache.entrySet().stream().filter(e -> e.getValue() == null).map(Map.Entry::getKey).collect(Collectors.toSet());
217 base.updateBatch(uncommittedBatch, uncommittedKeysToRemove);
218 committedCache.putAll(uncommittedCache);
219 uncommittedCache.clear();
220
221 long totalTime = System.nanoTime() - saveTime;
222
223 if (logger.isTraceEnabled()) {
224 logger.trace("datasource flush: [{}]seconds", FormatUtils.formatNanosecondsToSeconds(totalTime));
225 }
226 }
227 finally {
228 this.lock.writeLock().unlock();
229 }
230 }
231
232 public String getName() {
233 return base.getName() + "-with-uncommittedCache";
234 }
235
236 public void init() {
237 base.init();
238 }
239
240 public boolean isAlive() {
241 return base.isAlive();
242 }
243
244 public void close() {
245 this.lock.writeLock().lock();
246
247 try {
248 flush();
249 base.close();
250 uncommittedCache.clear();
251 committedCache.clear();
252 }
253 finally {
254 this.lock.writeLock().unlock();
255 }
256 }
257 }