2018-01-06 22:05:16 -07:00
|
|
|
# Copyright (c) 2014-2018, The Monero Project
|
2015-12-13 21:54:39 -07:00
|
|
|
#
|
2014-09-11 00:25:07 -06:00
|
|
|
# All rights reserved.
|
2015-12-13 21:54:39 -07:00
|
|
|
#
|
2014-09-11 00:25:07 -06:00
|
|
|
# Redistribution and use in source and binary forms, with or without modification, are
|
|
|
|
# permitted provided that the following conditions are met:
|
2015-12-13 21:54:39 -07:00
|
|
|
#
|
2014-09-11 00:25:07 -06:00
|
|
|
# 1. Redistributions of source code must retain the above copyright notice, this list of
|
|
|
|
# conditions and the following disclaimer.
|
2015-12-13 21:54:39 -07:00
|
|
|
#
|
2014-09-11 00:25:07 -06:00
|
|
|
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
|
|
|
# of conditions and the following disclaimer in the documentation and/or other
|
|
|
|
# materials provided with the distribution.
|
2015-12-13 21:54:39 -07:00
|
|
|
#
|
2014-09-11 00:25:07 -06:00
|
|
|
# 3. Neither the name of the copyright holder nor the names of its contributors may be
|
|
|
|
# used to endorse or promote products derived from this software without specific
|
|
|
|
# prior written permission.
|
2015-12-13 21:54:39 -07:00
|
|
|
#
|
2014-09-11 00:25:07 -06:00
|
|
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
|
|
|
|
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
|
|
|
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
|
|
|
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
|
|
|
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2015-12-13 21:54:39 -07:00
|
|
|
#
|
2014-09-11 00:25:07 -06:00
|
|
|
# Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
|
|
|
|
|
2014-10-24 13:52:01 -06:00
|
|
|
if (WIN32 OR STATIC)
|
2014-10-23 14:34:42 -06:00
|
|
|
add_definitions(-DSTATICLIB)
|
|
|
|
# miniupnp changed their static define
|
|
|
|
add_definitions(-DMINIUPNP_STATICLIB)
|
|
|
|
endif ()
|
2014-04-09 06:14:35 -06:00
|
|
|
|
2016-09-26 08:42:29 -06:00
|
|
|
function (monero_private_headers group)
|
2014-10-21 10:38:00 -06:00
|
|
|
source_group("${group}\\Private"
|
|
|
|
FILES
|
|
|
|
${ARGN})
|
|
|
|
endfunction ()
|
|
|
|
|
2016-09-26 08:42:29 -06:00
|
|
|
function (monero_install_headers subdir)
|
2014-10-21 10:38:00 -06:00
|
|
|
install(
|
|
|
|
FILES ${ARGN}
|
|
|
|
DESTINATION "include/${subdir}"
|
|
|
|
COMPONENT development)
|
|
|
|
endfunction ()
|
|
|
|
|
2016-07-28 19:19:01 -06:00
|
|
|
function (enable_stack_trace target)
|
|
|
|
if(STACK_TRACE)
|
|
|
|
set_property(TARGET ${target}
|
2016-08-19 21:38:44 -06:00
|
|
|
APPEND PROPERTY COMPILE_DEFINITIONS "STACK_TRACE")
|
2016-07-28 19:19:01 -06:00
|
|
|
if (STATIC)
|
|
|
|
set_property(TARGET "${target}"
|
|
|
|
APPEND PROPERTY LINK_FLAGS "-Wl,--wrap=__cxa_throw")
|
|
|
|
endif()
|
|
|
|
endif()
|
|
|
|
endfunction()
|
|
|
|
|
2016-09-26 08:42:29 -06:00
|
|
|
function (monero_add_executable name)
|
2014-10-21 09:21:28 -06:00
|
|
|
source_group("${name}"
|
|
|
|
FILES
|
|
|
|
${ARGN})
|
|
|
|
|
|
|
|
add_executable("${name}"
|
|
|
|
${ARGN})
|
|
|
|
target_link_libraries("${name}"
|
2016-09-17 11:59:29 -06:00
|
|
|
PRIVATE
|
2014-10-21 09:21:28 -06:00
|
|
|
${EXTRA_LIBRARIES})
|
|
|
|
set_property(TARGET "${name}"
|
|
|
|
PROPERTY
|
|
|
|
FOLDER "prog")
|
2014-11-18 15:02:28 -07:00
|
|
|
set_property(TARGET "${name}"
|
|
|
|
PROPERTY
|
|
|
|
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin")
|
2016-07-28 19:19:01 -06:00
|
|
|
enable_stack_trace("${name}")
|
2014-10-21 09:21:28 -06:00
|
|
|
endfunction ()
|
|
|
|
|
2016-09-26 08:42:29 -06:00
|
|
|
function (monero_add_library name)
|
2017-10-20 20:07:34 -06:00
|
|
|
monero_add_library_with_deps(NAME "${name}" SOURCES ${ARGN})
|
|
|
|
endfunction()
|
|
|
|
|
|
|
|
function (monero_add_library_with_deps)
|
|
|
|
cmake_parse_arguments(MONERO_ADD_LIBRARY "" "NAME" "DEPENDS;SOURCES" ${ARGN})
|
|
|
|
source_group("${MONERO_ADD_LIBRARY_NAME}" FILES ${MONERO_ADD_LIBRARY_SOURCES})
|
2014-10-21 09:24:14 -06:00
|
|
|
|
2016-08-29 08:58:52 -06:00
|
|
|
# Define a ("virtual") object library and an actual library that links those
|
|
|
|
# objects together. The virtual libraries can be arbitrarily combined to link
|
|
|
|
# any subset of objects into one library archive. This is used for releasing
|
|
|
|
# libwallet, which combines multiple components.
|
2017-10-20 20:07:34 -06:00
|
|
|
set(objlib obj_${MONERO_ADD_LIBRARY_NAME})
|
|
|
|
add_library(${objlib} OBJECT ${MONERO_ADD_LIBRARY_SOURCES})
|
|
|
|
add_library("${MONERO_ADD_LIBRARY_NAME}" $<TARGET_OBJECTS:${objlib}>)
|
|
|
|
if (MONERO_ADD_LIBRARY_DEPENDS)
|
|
|
|
add_dependencies(${objlib} ${MONERO_ADD_LIBRARY_DEPENDS})
|
|
|
|
endif()
|
|
|
|
set_property(TARGET "${MONERO_ADD_LIBRARY_NAME}" PROPERTY FOLDER "libs")
|
2016-09-03 15:39:45 -06:00
|
|
|
target_compile_definitions(${objlib}
|
2017-10-20 20:07:34 -06:00
|
|
|
PRIVATE $<TARGET_PROPERTY:${MONERO_ADD_LIBRARY_NAME},INTERFACE_COMPILE_DEFINITIONS>)
|
2014-10-21 09:24:14 -06:00
|
|
|
endfunction ()
|
|
|
|
|
2017-10-20 20:07:34 -06:00
|
|
|
include(Version)
|
|
|
|
monero_add_library(version SOURCES ${CMAKE_BINARY_DIR}/version.cpp DEPENDS genversion)
|
2017-09-13 17:28:23 -06:00
|
|
|
|
2014-10-21 09:13:59 -06:00
|
|
|
add_subdirectory(common)
|
|
|
|
add_subdirectory(crypto)
|
2016-05-13 13:45:20 -06:00
|
|
|
add_subdirectory(ringct)
|
2017-09-10 10:35:59 -06:00
|
|
|
add_subdirectory(checkpoints)
|
2017-01-26 08:07:23 -07:00
|
|
|
add_subdirectory(cryptonote_basic)
|
2014-10-21 09:13:59 -06:00
|
|
|
add_subdirectory(cryptonote_core)
|
2017-09-26 16:16:25 -06:00
|
|
|
add_subdirectory(multisig)
|
2018-12-16 10:57:44 -07:00
|
|
|
add_subdirectory(net)
|
2017-04-02 04:19:25 -06:00
|
|
|
if(NOT IOS)
|
|
|
|
add_subdirectory(blockchain_db)
|
|
|
|
endif()
|
2014-10-21 09:13:59 -06:00
|
|
|
add_subdirectory(mnemonics)
|
2017-04-02 04:19:25 -06:00
|
|
|
if(NOT IOS)
|
|
|
|
add_subdirectory(rpc)
|
2017-09-05 10:20:27 -06:00
|
|
|
add_subdirectory(serialization)
|
2017-04-02 04:19:25 -06:00
|
|
|
endif()
|
2014-10-21 09:13:59 -06:00
|
|
|
add_subdirectory(wallet)
|
2017-04-02 04:19:25 -06:00
|
|
|
if(NOT IOS)
|
|
|
|
add_subdirectory(p2p)
|
|
|
|
endif()
|
2015-01-05 12:30:17 -07:00
|
|
|
add_subdirectory(cryptonote_protocol)
|
2017-04-02 04:19:25 -06:00
|
|
|
if(NOT IOS)
|
|
|
|
add_subdirectory(simplewallet)
|
2017-06-30 10:36:31 -06:00
|
|
|
add_subdirectory(gen_multisig)
|
2017-04-02 04:19:25 -06:00
|
|
|
add_subdirectory(daemonizer)
|
|
|
|
add_subdirectory(daemon)
|
|
|
|
add_subdirectory(blockchain_utilities)
|
2017-08-03 13:36:21 -06:00
|
|
|
endif()
|
|
|
|
|
|
|
|
if(CMAKE_BUILD_TYPE STREQUAL Debug)
|
2017-08-03 09:18:24 -06:00
|
|
|
add_subdirectory(debug_utilities)
|
2017-04-02 04:19:25 -06:00
|
|
|
endif()
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 14:09:32 -06:00
|
|
|
|
|
|
|
if(PER_BLOCK_CHECKPOINT)
|
2015-12-13 21:54:39 -07:00
|
|
|
add_subdirectory(blocks)
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 14:09:32 -06:00
|
|
|
endif()
|
2018-02-20 09:01:27 -07:00
|
|
|
|
|
|
|
add_subdirectory(device)
|
2018-08-23 15:50:53 -06:00
|
|
|
add_subdirectory(device_trezor)
|