Coverage Summary for Class: LevelDbDataSource (org.ethereum.datasource)
Class |
Class, %
|
Method, %
|
Line, %
|
LevelDbDataSource |
0%
(0/1)
|
0%
(0/15)
|
0%
(0/173)
|
1 /*
2 * This file is part of RskJ
3 * Copyright (C) 2017 RSK Labs Ltd.
4 * (derived from ethereumJ library, Copyright (c) 2016 <ether.camp>)
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation, either version 3 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 package org.ethereum.datasource;
21
22 import co.rsk.metrics.profilers.Metric;
23 import co.rsk.metrics.profilers.Profiler;
24 import co.rsk.metrics.profilers.ProfilerFactory;
25 import co.rsk.panic.PanicProcessor;
26 import org.ethereum.db.ByteArrayWrapper;
27 import org.ethereum.util.ByteUtil;
28 import org.iq80.leveldb.*;
29 import org.slf4j.Logger;
30 import org.slf4j.LoggerFactory;
31
32 import java.io.IOException;
33 import java.nio.file.Files;
34 import java.nio.file.Path;
35 import java.nio.file.Paths;
36 import java.util.*;
37 import java.util.concurrent.locks.ReadWriteLock;
38 import java.util.concurrent.locks.ReentrantReadWriteLock;
39
40 import static java.lang.System.getProperty;
41 import static org.fusesource.leveldbjni.JniDBFactory.factory;
42
43 public class LevelDbDataSource implements KeyValueDataSource {
44
45 private static final Logger logger = LoggerFactory.getLogger("db");
46 private static final Profiler profiler = ProfilerFactory.getInstance();
47 private static final PanicProcessor panicProcessor = new PanicProcessor();
48
49 private final String databaseDir;
50 private final String name;
51 private DB db;
52 private boolean alive;
53
54 // The native LevelDB insert/update/delete are normally thread-safe
55 // However close operation is not thread-safe and may lead to a native crash when
56 // accessing a closed DB.
57 // The leveldbJNI lib has a protection over accessing closed DB but it is not synchronized
58 // This ReadWriteLock still permits concurrent execution of insert/delete/update operations
59 // however blocks them on init/close/delete operations
60 private ReadWriteLock resetDbLock = new ReentrantReadWriteLock();
61
62 public LevelDbDataSource(String name, String databaseDir) {
63 this.databaseDir = databaseDir;
64 this.name = name;
65 logger.debug("New LevelDbDataSource: {}", name);
66 }
67
68 public static KeyValueDataSource makeDataSource(Path datasourcePath) {
69 KeyValueDataSource ds = new LevelDbDataSource(datasourcePath.getFileName().toString(), datasourcePath.getParent().toString());
70 ds.init();
71 return ds;
72 }
73
74 @Override
75 public void init() {
76 resetDbLock.writeLock().lock();
77 Metric metric = profiler.start(Profiler.PROFILING_TYPE.LEVEL_DB_INIT);
78 try {
79 logger.debug("~> LevelDbDataSource.init(): {}", name);
80
81 if (isAlive()) {
82 return;
83 }
84
85 Objects.requireNonNull(name, "no name set to the db");
86
87 Options options = new Options();
88 options.createIfMissing(true);
89 options.compressionType(CompressionType.NONE);
90 options.blockSize(10 * 1024 * 1024);
91 options.writeBufferSize(10 * 1024 * 1024);
92 options.cacheSize(0);
93 options.paranoidChecks(true);
94 options.verifyChecksums(true);
95
96 try {
97
98 logger.debug("Opening database");
99 Path dbPath = getPathForName(name, databaseDir);
100
101 Files.createDirectories(dbPath.getParent());
102
103 logger.debug("Initializing new or existing database: '{}'", name);
104 db = factory.open(dbPath.toFile(), options);
105
106 alive = true;
107 } catch (IOException ioe) {
108 logger.error(ioe.getMessage(), ioe);
109 panicProcessor.panic("leveldb", ioe.getMessage());
110 throw new RuntimeException("Can't initialize database");
111 }
112 logger.debug("<~ LevelDbDataSource.init(): {}", name);
113 } finally {
114 profiler.stop(metric);
115 resetDbLock.writeLock().unlock();
116 }
117 }
118
119 public static Path getPathForName(String name, String databaseDir) {
120 if (Paths.get(databaseDir).isAbsolute()) {
121 return Paths.get(databaseDir, name);
122 } else {
123 return Paths.get(getProperty("user.dir"), databaseDir, name);
124 }
125 }
126
127 @Override
128 public boolean isAlive() {
129 try {
130 resetDbLock.readLock().lock();
131 return alive;
132 } finally {
133 resetDbLock.readLock().unlock();
134 }
135 }
136
137 @Override
138 public String getName() {
139 return name;
140 }
141
142 @Override
143 public byte[] get(byte[] key) {
144 Objects.requireNonNull(key);
145 Metric metric = profiler.start(Profiler.PROFILING_TYPE.DB_READ);
146 resetDbLock.readLock().lock();
147 try {
148 if (logger.isTraceEnabled()) {
149 logger.trace("~> LevelDbDataSource.get(): {}, key: {}", name, ByteUtil.toHexString(key));
150 }
151
152 try {
153 byte[] ret = db.get(key);
154 if (logger.isTraceEnabled()) {
155 logger.trace("<~ LevelDbDataSource.get(): {}, key: {}, return length: {}", name, ByteUtil.toHexString(key), (ret == null ? "null" : ret.length));
156 }
157
158 return ret;
159 } catch (DBException e) {
160 logger.error("Exception. Retrying again...", e);
161 try {
162 byte[] ret = db.get(key);
163 if (logger.isTraceEnabled()) {
164 logger.trace("<~ LevelDbDataSource.get(): {}, key: {}, return length: {}", name, ByteUtil.toHexString(key), (ret == null ? "null" : ret.length));
165 }
166
167 return ret;
168 } catch (DBException e2) {
169 logger.error("Exception. Not retrying.", e2);
170 panicProcessor.panic("leveldb", String.format("Exception. Not retrying. %s", e2.getMessage()));
171 throw e2;
172 }
173 }
174 } finally {
175 resetDbLock.readLock().unlock();
176 profiler.stop(metric);
177 }
178 }
179
180 @Override
181 public byte[] put(byte[] key, byte[] value) {
182 Objects.requireNonNull(key);
183 Objects.requireNonNull(value);
184
185 Metric metric = profiler.start(Profiler.PROFILING_TYPE.DB_WRITE);
186 resetDbLock.readLock().lock();
187 try {
188 if (logger.isTraceEnabled()) {
189 logger.trace("~> LevelDbDataSource.put(): {}, key: {}, return length: {}", name, ByteUtil.toHexString(key), value.length);
190 }
191
192 db.put(key, value);
193 if (logger.isTraceEnabled()) {
194 logger.trace("<~ LevelDbDataSource.put(): {}, key: {}, return length: {}", name, ByteUtil.toHexString(key), value.length);
195 }
196
197 return value;
198 } finally {
199 resetDbLock.readLock().unlock();
200 profiler.stop(metric);
201 }
202 }
203
204 @Override
205 public void delete(byte[] key) {
206 Metric metric = profiler.start(Profiler.PROFILING_TYPE.DB_WRITE);
207 resetDbLock.readLock().lock();
208 try {
209 if (logger.isTraceEnabled()) {
210 logger.trace("~> LevelDbDataSource.delete(): {}, key: {}", name, ByteUtil.toHexString(key));
211 }
212
213 db.delete(key);
214 if (logger.isTraceEnabled()) {
215 logger.trace("<~ LevelDbDataSource.delete(): {}, key: {}", name, ByteUtil.toHexString(key));
216 }
217
218 } finally {
219 resetDbLock.readLock().unlock();
220 profiler.stop(metric);
221 }
222 }
223
224 @Override
225 public Set<byte[]> keys() {
226 Metric metric = profiler.start(Profiler.PROFILING_TYPE.DB_READ);
227 resetDbLock.readLock().lock();
228 try {
229 if (logger.isTraceEnabled()) {
230 logger.trace("~> LevelDbDataSource.keys(): {}", name);
231 }
232
233 try (DBIterator iterator = db.iterator()) {
234 Set<byte[]> result = new HashSet<>();
235 for (iterator.seekToFirst(); iterator.hasNext(); iterator.next()) {
236 result.add(iterator.peekNext().getKey());
237 }
238 if (logger.isTraceEnabled()) {
239 logger.trace("<~ LevelDbDataSource.keys(): {}, {}", name, result.size());
240 }
241
242 return result;
243 } catch (IOException e) {
244 logger.error("Unexpected", e);
245 panicProcessor.panic("leveldb", String.format("Unexpected %s", e.getMessage()));
246 throw new RuntimeException(e);
247 }
248 } finally {
249 resetDbLock.readLock().unlock();
250 profiler.stop(metric);
251 }
252 }
253
254 private void updateBatchInternal(Map<ByteArrayWrapper, byte[]> rows, Set<ByteArrayWrapper> deleteKeys) throws IOException {
255 Metric metric = profiler.start(Profiler.PROFILING_TYPE.DB_WRITE);
256 if (rows.containsKey(null) || rows.containsValue(null)) {
257 profiler.stop(metric);
258 throw new IllegalArgumentException("Cannot update null values");
259 }
260 // Note that this is not atomic.
261 try (WriteBatch batch = db.createWriteBatch()) {
262 for (Map.Entry<ByteArrayWrapper, byte[]> entry : rows.entrySet()) {
263 batch.put(entry.getKey().getData(), entry.getValue());
264 }
265 for (ByteArrayWrapper deleteKey : deleteKeys) {
266 batch.delete(deleteKey.getData());
267 }
268 db.write(batch);
269 profiler.stop(metric);
270 }
271
272 }
273
274 @Override
275 public void updateBatch(Map<ByteArrayWrapper, byte[]> rows, Set<ByteArrayWrapper> deleteKeys) {
276 if (rows.containsKey(null)) {
277 throw new IllegalArgumentException("Cannot update null values");
278 }
279 resetDbLock.readLock().lock();
280 try {
281 if (logger.isTraceEnabled()) {
282 logger.trace("~> LevelDbDataSource.updateBatch(): {}, {}", name, rows.size());
283 }
284
285 try {
286 updateBatchInternal(rows, deleteKeys);
287 if (logger.isTraceEnabled()) {
288 logger.trace("<~ LevelDbDataSource.updateBatch(): {}, {}", name, rows.size());
289 }
290 } catch (IllegalArgumentException iae) {
291 throw iae;
292 } catch (Exception e) {
293 logger.error("Error, retrying one more time...", e);
294 // try one more time
295 try {
296 updateBatchInternal(rows, deleteKeys);
297 if (logger.isTraceEnabled()) {
298 logger.trace("<~ LevelDbDataSource.updateBatch(): {}, {}", name, rows.size());
299 }
300 } catch (IllegalArgumentException iae) {
301 throw iae;
302 } catch (Exception e1) {
303 logger.error("Error", e);
304 panicProcessor.panic("leveldb", String.format("Error %s", e.getMessage()));
305 throw new RuntimeException(e);
306 }
307 }
308 } finally {
309 resetDbLock.readLock().unlock();
310 }
311 }
312
313 @Override
314 public void close() {
315 Metric metric = profiler.start(Profiler.PROFILING_TYPE.LEVEL_DB_CLOSE);
316 resetDbLock.writeLock().lock();
317 try {
318 if (!isAlive()) {
319 return;
320 }
321
322 try {
323 logger.debug("Close db: {}", name);
324 db.close();
325
326 alive = false;
327 } catch (IOException e) {
328 logger.error("Failed to find the db file on the close: {} ", name);
329 panicProcessor.panic("leveldb", String.format("Failed to find the db file on the close: %s", name));
330 }
331 } finally {
332 resetDbLock.writeLock().unlock();
333 profiler.stop(metric);
334 }
335 }
336
337 @Override
338 public void flush(){
339 // All is flushed immediately: there is no uncommittedCache to flush
340 }
341
342 public static void mergeDataSources(Path destinationPath, List<Path> originPaths) {
343 Map<ByteArrayWrapper, byte[]> mergedStores = new HashMap<>();
344 for (Path originPath : originPaths) {
345 KeyValueDataSource singleOriginDataSource = makeDataSource(originPath);
346 for (byte[] key : singleOriginDataSource.keys()) {
347 mergedStores.put(ByteUtil.wrap(key), singleOriginDataSource.get(key));
348 }
349 singleOriginDataSource.close();
350 }
351 KeyValueDataSource destinationDataSource = makeDataSource(destinationPath);
352 destinationDataSource.updateBatch(mergedStores, Collections.emptySet());
353 destinationDataSource.close();
354 }
355 }