From f49130a2f59cf9cefc6b47fa3b31e3aae44d33fb Mon Sep 17 00:00:00 2001 From: Al Liu Date: Thu, 31 Oct 2024 18:05:37 +0800 Subject: [PATCH] 0.5.0 (#24) - Refactor the project to make all WALs based on the generic implementation. - Support different memtables based on [`crossbeam-skiplist`](https://github.com/crossbeam-rs/crossbeam) or [`skl`](https://github.com/al8n/skl) - More user-friendly APIs - Support `no-std` environment --- .codecov.yml | 10 +- .github/workflows/ci.yml | 93 +- CHANGELOG.md | 9 +- Cargo.toml | 51 +- README-zh_CN.md | 53 +- README.md | 30 +- ci/miri_tb.sh | 11 +- ci/sanitizer.sh | 6 +- examples/generic_not_sized.rs | 29 +- examples/multiple_version.rs | 36 + examples/zero_cost.rs | 27 +- src/batch.rs | 226 +++ src/buffer.rs | 29 - src/builder.rs | 823 ++-------- src/builder/memmap.rs | 710 +++++++++ src/entry.rs | 601 ------- src/error.rs | 263 +++- src/lib.rs | 80 +- src/memtable.rs | 280 ++++ src/memtable/alternative.rs | 289 ++++ src/memtable/alternative/multiple_version.rs | 362 +++++ src/memtable/alternative/table.rs | 215 +++ src/memtable/arena.rs | 102 ++ src/memtable/arena/multiple_version.rs | 358 +++++ src/memtable/arena/table.rs | 204 +++ src/memtable/linked.rs | 7 + src/memtable/linked/multiple_version.rs | 350 +++++ src/memtable/linked/table.rs | 213 +++ src/options.rs | 493 +----- src/options/memmap.rs | 475 ++++++ src/pointer.rs | 195 --- src/sealed.rs | 1393 +++++++++++++++++ src/swmr.rs | 141 +- src/swmr/generic.rs | 1155 -------------- src/swmr/generic/builder.rs | 906 ----------- src/swmr/generic/iter.rs | 111 -- src/swmr/generic/reader.rs | 207 --- src/swmr/generic/tests.rs | 180 --- src/swmr/generic/tests/constructor.rs | 285 ---- src/swmr/generic/tests/get.rs | 562 ------- src/swmr/generic/tests/insert.rs | 1158 -------------- src/swmr/reader.rs | 65 + src/swmr/tests.rs | 344 ++++ src/swmr/tests/constructor.rs | 113 ++ src/swmr/tests/get.rs | 254 +++ src/swmr/tests/insert.rs | 451 ++++++ src/swmr/{generic => }/tests/iters.rs | 305 ++-- .../tests/multiple_version_constructor.rs | 113 ++ src/swmr/tests/multiple_version_get.rs | 724 +++++++++ src/swmr/tests/multiple_version_insert.rs | 549 +++++++ src/swmr/tests/multiple_version_iters.rs | 556 +++++++ src/swmr/wal.rs | 423 +---- src/swmr/wal/iter.rs | 258 --- src/swmr/wal/reader.rs | 192 --- src/swmr/wal/tests.rs | 15 - src/swmr/wal/tests/constructor.rs | 3 - src/swmr/wal/tests/get.rs | 3 - src/swmr/wal/tests/insert.rs | 5 - src/swmr/wal/tests/iter.rs | 3 - src/swmr/writer.rs | 121 ++ src/tests.rs | 1151 -------------- src/types.rs | 199 +++ src/types/base.rs | 296 ++++ src/types/multiple_version.rs | 525 +++++++ src/unsync.rs | 372 ----- src/unsync/c.rs | 48 - src/unsync/iter.rs | 234 --- src/unsync/tests.rs | 15 - src/unsync/tests/constructor.rs | 3 - src/unsync/tests/get.rs | 3 - src/unsync/tests/insert.rs | 5 - src/unsync/tests/iter.rs | 3 - src/utils.rs | 88 +- src/wal.rs | 494 +----- src/wal/base.rs | 687 ++++++++ src/wal/base/iter.rs | 476 ++++++ src/wal/batch.rs | 219 --- src/wal/iter.rs | 1 + src/wal/multiple_version.rs | 1134 ++++++++++++++ src/wal/multiple_version/iter.rs | 881 +++++++++++ src/wal/pointer.rs | 249 +++ src/wal/query.rs | 107 ++ src/wal/sealed.rs | 601 ------- 83 files changed, 14086 insertions(+), 10930 deletions(-) create mode 100644 examples/multiple_version.rs create mode 100644 src/batch.rs delete mode 100644 src/buffer.rs create mode 100644 src/builder/memmap.rs delete mode 100644 src/entry.rs create mode 100644 src/memtable.rs create mode 100644 src/memtable/alternative.rs create mode 100644 src/memtable/alternative/multiple_version.rs create mode 100644 src/memtable/alternative/table.rs create mode 100644 src/memtable/arena.rs create mode 100644 src/memtable/arena/multiple_version.rs create mode 100644 src/memtable/arena/table.rs create mode 100644 src/memtable/linked.rs create mode 100644 src/memtable/linked/multiple_version.rs create mode 100644 src/memtable/linked/table.rs create mode 100644 src/options/memmap.rs delete mode 100644 src/pointer.rs create mode 100644 src/sealed.rs delete mode 100644 src/swmr/generic.rs delete mode 100644 src/swmr/generic/builder.rs delete mode 100644 src/swmr/generic/iter.rs delete mode 100644 src/swmr/generic/reader.rs delete mode 100644 src/swmr/generic/tests.rs delete mode 100644 src/swmr/generic/tests/constructor.rs delete mode 100644 src/swmr/generic/tests/get.rs delete mode 100644 src/swmr/generic/tests/insert.rs create mode 100644 src/swmr/reader.rs create mode 100644 src/swmr/tests.rs create mode 100644 src/swmr/tests/constructor.rs create mode 100644 src/swmr/tests/get.rs create mode 100644 src/swmr/tests/insert.rs rename src/swmr/{generic => }/tests/iters.rs (56%) create mode 100644 src/swmr/tests/multiple_version_constructor.rs create mode 100644 src/swmr/tests/multiple_version_get.rs create mode 100644 src/swmr/tests/multiple_version_insert.rs create mode 100644 src/swmr/tests/multiple_version_iters.rs delete mode 100644 src/swmr/wal/iter.rs delete mode 100644 src/swmr/wal/reader.rs delete mode 100644 src/swmr/wal/tests.rs delete mode 100644 src/swmr/wal/tests/constructor.rs delete mode 100644 src/swmr/wal/tests/get.rs delete mode 100644 src/swmr/wal/tests/insert.rs delete mode 100644 src/swmr/wal/tests/iter.rs create mode 100644 src/swmr/writer.rs delete mode 100644 src/tests.rs create mode 100644 src/types.rs create mode 100644 src/types/base.rs create mode 100644 src/types/multiple_version.rs delete mode 100644 src/unsync.rs delete mode 100644 src/unsync/c.rs delete mode 100644 src/unsync/iter.rs delete mode 100644 src/unsync/tests.rs delete mode 100644 src/unsync/tests/constructor.rs delete mode 100644 src/unsync/tests/get.rs delete mode 100644 src/unsync/tests/insert.rs delete mode 100644 src/unsync/tests/iter.rs create mode 100644 src/wal/base.rs create mode 100644 src/wal/base/iter.rs delete mode 100644 src/wal/batch.rs create mode 100644 src/wal/iter.rs create mode 100644 src/wal/multiple_version.rs create mode 100644 src/wal/multiple_version/iter.rs create mode 100644 src/wal/pointer.rs create mode 100644 src/wal/query.rs delete mode 100644 src/wal/sealed.rs diff --git a/.codecov.yml b/.codecov.yml index 074b20b..c50b306 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -5,15 +5,9 @@ ignore: - "**/integration/" - "**/examples/" - "**/benches/" - - "src/tests.rs" - "src/error.rs" - - "src/swmr/generic/tests.rs" - - "src/swmr/generic/tests/" - - "src/swmr/wal/tests.rs" - - "src/swmr/wal/tests/" - - "src/wal/type/" - - "src/unsync/tests.rs" - - "src/unsync/tests/" + - "src/swmr/tests.rs" + - "src/swmr/tests/" coverage: status: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 17860b0..6aa1dd6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -82,9 +82,9 @@ jobs: - powerpc64-unknown-linux-gnu # - mips64-unknown-linux-gnuabi64 - riscv64gc-unknown-linux-gnu - # - wasm32-unknown-unknown - # - wasm32-unknown-emscripten - # - wasm32-wasi + - wasm32-unknown-unknown + - wasm32-unknown-emscripten + - wasm32-wasi runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -100,22 +100,10 @@ jobs: ${{ runner.os }}-cross- - name: Install Rust run: rustup update stable && rustup default stable - - name: cross build --target ${{ matrix.target }} + - name: cargo build --target ${{ matrix.target }} run: | - cargo install cross - cross build --target ${{ matrix.target }} --all-features - # if: matrix.target != 'wasm32-unknown-unknown' && matrix.target != 'wasm32-wasi' - # # WASM support - # - name: cargo build --target ${{ matrix.target }} - # run: | - # rustup target add ${{ matrix.target }} - # cargo build --target ${{ matrix.target }} - # if: matrix.target == 'wasm32-unknown-unknown' || matrix.target == 'wasm32-wasi' - # - name: cargo build --target ${{ matrix.target }} - # run: | - # rustup target add ${{ matrix.target }} - # cargo +nightly build --no-default-features --features alloc --target ${{ matrix.target }} -Z build-std=core,alloc - # if: matrix.target == 'mips64-unknown-linux-gnuabi64' + rustup target add ${{ matrix.target }} + cargo build --target ${{ matrix.target }} build: name: build @@ -150,7 +138,47 @@ jobs: key: ${{ runner.os }}-coverage-dotcargo - name: Run build run: cargo hack build --feature-powerset --exclude-no-default-features --group-features xxhash64,xxhash3 --exclude-features tracing - + no-std: + name: no-std + strategy: + matrix: + os: + - ubuntu-latest + - macos-latest + - windows-latest + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Cache cargo build and registry + uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-test-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-test- + - name: Install Rust + # --no-self-update is necessary because the windows environment cannot self-update rustup.exe. + run: rustup update stable --no-self-update && rustup default stable + - name: Install cargo-hack + run: cargo install cargo-hack + - name: Cache ~/.cargo + uses: actions/cache@v4 + with: + path: ~/.cargo + key: ${{ runner.os }}-coverage-dotcargo + - name: Run test (Unix) + run: RUSTFLAGS="--cfg all_orderwal_tests" cargo test --no-default-features --features alloc + if: matrix.os != 'windows-latest' + - name: Run test (Windows) + shell: pwsh + run: | + $env:RUSTFLAGS="--cfg all_orderwal_tests" + cargo test --no-default-features --features alloc + if: matrix.os == 'windows-latest' + test: name: test strategy: @@ -183,12 +211,12 @@ jobs: path: ~/.cargo key: ${{ runner.os }}-coverage-dotcargo - name: Run test (Unix) - run: RUSTFLAGS="--cfg all_tests" cargo test --all-features + run: RUSTFLAGS="--cfg all_orderwal_tests" cargo test --all-features if: matrix.os != 'windows-latest' - name: Run test (Windows) shell: pwsh run: | - $env:RUSTFLAGS="--cfg all_tests" + $env:RUSTFLAGS="--cfg all_orderwal_tests" cargo test --all-features if: matrix.os == 'windows-latest' @@ -238,18 +266,14 @@ jobs: - x86_64-apple-darwin - aarch64-apple-darwin cfg: - - unsync_insert - - unsync_iters - - unsync_get - - unsync_constructor - swmr_insert - swmr_iters - swmr_get - swmr_constructor - - swmr_generic_insert - - swmr_generic_iters - - swmr_generic_get - - swmr_generic_constructor + - swmr_multiple_version_insert + - swmr_multiple_version_iters + - swmr_multiple_version_get + - swmr_multiple_version_constructor # Exclude invalid combinations exclude: - os: ubuntu-latest @@ -303,10 +327,10 @@ jobs: # - swmr_iters # - swmr_get # - swmr_constructor - # - swmr_generic_insert - # - swmr_generic_iters - # - swmr_generic_get - # - swmr_generic_constructor + # - swmr_insert + # - swmr_iters + # - swmr_get + # - swmr_constructor # # Exclude invalid combinations # exclude: # - os: ubuntu-latest @@ -408,6 +432,7 @@ jobs: - build - cross - test + - no-std - sanitizer steps: - uses: actions/checkout@v4 @@ -433,7 +458,7 @@ jobs: - name: Run tarpaulin uses: actions-rs/cargo@v1 env: - RUSTFLAGS: "--cfg all_tests" + RUSTFLAGS: "--cfg all_orderwal_tests" with: command: tarpaulin args: --all-features --run-types tests --run-types doctests --workspace --out xml diff --git a/CHANGELOG.md b/CHANGELOG.md index 4290633..a1a3629 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,17 @@ # Rleases +## 0.5.0 (Oct 27th, 2024) + +- Refactor the project to make all of the WALs based on the generic implementation. +- Support different memtables based on [`crossbeam-skiplist`](https://github.com/crossbeam-rs/crossbeam) or [`skl`](https://github.com/al8n/skl) +- More user-friendly APIs +- Support `no-std` environment + ## 0.4.0 (Sep 30th, 2024) FEATURES -- Support `K: ?Sized` and `V: ?Sized` for `GenericOrderWal`. +- Support `K: ?Sized` and `V: ?Sized` for `OrderWal`. - Use `flush_header_and_range` instead of `flush_range` when insertion. ## 0.1.0 (Sep 14th, 2024) diff --git a/Cargo.toml b/Cargo.toml index 85ed467..27c41ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,14 +1,14 @@ [package] name = "orderwal" -version = "0.4.1" +version = "0.5.0" edition = "2021" repository = "https://github.com/al8n/orderwal" homepage = "https://github.com/al8n/orderwal" documentation = "https://docs.rs/orderwal" -description = "A generic-purpose, atomic, ordered, zero-copy, Write-Ahead Log implementation for Rust." +description = "A generic-purpose, atomic, ordered, zero-copy read, zero-cost (in-place) write, Write-Ahead Log implementation for Rust." license = "MIT OR Apache-2.0" -rust-version = "1.80" -categories = ["filesystem", "database-implementations", "development-tools", "data-structures"] +rust-version = "1.81.0" +categories = ["filesystem", "database-implementations", "development-tools", "data-structures", "no-std"] keywords = ["wal", "write-ahead-log", "append-only", "append-only-log", "bitcask"] [[bench]] @@ -17,8 +17,10 @@ name = "foo" harness = false [features] -default = ["std"] -std = ["rarena-allocator/default", "crossbeam-skiplist/default", "bitflags/std", "dbutils/default", "among/default", "faststr?/default", "bytes?/default", "smol_str?/default"] +default = ["memmap"] +alloc = ["rarena-allocator/alloc", "skl/alloc", "dbutils/alloc"] +std = ["rarena-allocator/default", "crossbeam-skiplist/default", "bitflags/std", "dbutils/default", "among/default", "faststr?/default", "bytes?/default", "smol_str?/default", "skl/std"] +memmap = ["std", "rarena-allocator/memmap", "skl/memmap"] xxhash3 = ["dbutils/xxhash3", "std"] xxhash64 = ["dbutils/xxhash64", "std"] @@ -28,12 +30,14 @@ tracing = ["dep:tracing", "dbutils/tracing"] [dependencies] among = { version = "0.1", default-features = false, features = ["either"] } bitflags = { version = "2", default-features = false } -dbutils = { version = "0.6", default-features = false, features = ["crc32fast"] } +dbutils = { version = "0.9", default-features = false, features = ["crc32fast"] } +derive-where = "1" ref-cast = "1" -rarena-allocator = { version = "0.4", default-features = false, features = ["memmap"] } +rarena-allocator = { version = "0.4", default-features = false } crossbeam-skiplist = { version = "0.1", default-features = false, package = "crossbeam-skiplist-pr1132" } +crossbeam-skiplist-mvcc = "0.2" +skl = { version = "0.19", default-features = false, features = ["alloc"] } paste = "1" -thiserror = "1" bytes = { version = "1", default-features = false, optional = true } smallvec = { version = "1", default-features = false, optional = true, features = ["const_generics"] } @@ -68,17 +72,28 @@ rustdoc-args = ["--cfg", "docsrs"] rust_2018_idioms = "warn" single_use_lifetimes = "warn" unexpected_cfgs = { level = "warn", check-cfg = [ - 'cfg(all_tests)', - 'cfg(test_unsync_constructor)', - 'cfg(test_unsync_insert)', - 'cfg(test_unsync_iters)', - 'cfg(test_unsync_get)', + 'cfg(all_orderwal_tests)', 'cfg(test_swmr_constructor)', 'cfg(test_swmr_insert)', 'cfg(test_swmr_iters)', 'cfg(test_swmr_get)', - 'cfg(test_swmr_generic_constructor)', - 'cfg(test_swmr_generic_insert)', - 'cfg(test_swmr_generic_iters)', - 'cfg(test_swmr_generic_get)', + 'cfg(test_swmr_multiple_version_constructor)', + 'cfg(test_swmr_multiple_version_insert)', + 'cfg(test_swmr_multiple_version_iters)', + 'cfg(test_swmr_multiple_version_get)', ] } + +[[example]] +name = "zero_cost" +path = "examples/zero_cost.rs" +required-features = ["memmap"] + +[[example]] +name = "multiple_version" +path = "examples/multiple_version.rs" +required-features = ["memmap"] + +[[example]] +name = "generic_not_sized" +path = "examples/generic_not_sized.rs" +required-features = ["memmap"] diff --git a/README-zh_CN.md b/README-zh_CN.md index ae4c4a8..01e25a3 100644 --- a/README-zh_CN.md +++ b/README-zh_CN.md @@ -3,7 +3,7 @@
-An ordered, zero-copy, Write-Ahead Log implementation for Rust. +A generic-purpose, atomic, ordered, zero-copy read, zero-cost (in-place) write, Write-Ahead Log implementation for Rust. [github][Github-url] LoC @@ -15,16 +15,49 @@ An ordered, zero-copy, Write-Ahead Log implementation for Rust. [crates.io][crates-url] license -[English][en-url] | 简体中文 +English | [简体中文][zh-cn-url]
+## Introduction + +`orderwal` is generic-purpose, atomic, ordered, zero-copy read, zero-cost (in-place) write, concurrent-safe, pre-allocate style (memory map) write-ahead-log for developing databases. + +`orderwal` also supports generic structured key and value types, which is not limited to just bytes like other implementations. + ## Installation -```toml -[dependencies] -orderwal = "0.3" -``` +- Default (with on-disk support) + + ```toml + [dependencies] + orderwal = "0.5" + ``` + +- `std` only (without on-disk support) + + ```toml + [dependencies] + orderwal = { version = "0.5", default-features = false, features = ["std"] } + ``` + +- `no-std` (`alloc` required) + + ```toml + [dependencies] + orderwal = { version = "0.5", default-features = false, features = ["alloc"] } + ``` + +## Example + +See [examples](./examples/) for more information. + +## Related projects + +- [`aol`](https://github.com/al8n/aol): Yet another generic purpose, append-only write-ahead log implementation based on `std::fs::File`. +- [`skl`](https://github.com/al8n/skl): A lock-free, ARNEA based skiplist implementation, which supports in-memory and on-disk, suitable for frozen durable data file or memtable for LSM database. +- [`valog`](https://github.com/al8n/valog): A lock-free, generic, lightweight value log for WiscKey or Bitcask architecture databases. +- [`dtlog`](https://github.com/al8n/dtlog): A log for tracking discard stats of multi-files databases. #### License @@ -36,12 +69,8 @@ See [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT) for details. Copyright (c) 2024 Al Liu. [Github-url]: https://github.com/al8n/orderwal/ -[CI-url]: https://github.com/al8n/template/actions/workflows/template.yml +[CI-url]: https://github.com/al8n/orderwal/actions/workflows/ci.yml [doc-url]: https://docs.rs/orderwal [crates-url]: https://crates.io/crates/orderwal [codecov-url]: https://app.codecov.io/gh/al8n/orderwal/ -[license-url]: https://opensource.org/licenses/Apache-2.0 -[rustc-url]: https://github.com/rust-lang/rust/blob/master/RELEASES.md -[license-apache-url]: https://opensource.org/licenses/Apache-2.0 -[license-mit-url]: https://opensource.org/licenses/MIT -[en-url]: https://github.com/al8n/orderwal/tree/main/README.md +[zh-cn-url]: https://github.com/al8n/orderwal/tree/main/README-zh_CN.md diff --git a/README.md b/README.md index 1288103..01e25a3 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@
-A generic-purpose, atomic, ordered, zero-copy, Write-Ahead Log implementation for Rust. +A generic-purpose, atomic, ordered, zero-copy read, zero-cost (in-place) write, Write-Ahead Log implementation for Rust. [github][Github-url] LoC @@ -21,16 +21,32 @@ English | [简体中文][zh-cn-url] ## Introduction -`orderwal` is generic-purpose, atomic, ordered, zero-copy, concurrent-safe, pre-allocate style (memory map) write-ahead-log for developing databases. +`orderwal` is generic-purpose, atomic, ordered, zero-copy read, zero-cost (in-place) write, concurrent-safe, pre-allocate style (memory map) write-ahead-log for developing databases. `orderwal` also supports generic structured key and value types, which is not limited to just bytes like other implementations. ## Installation -```toml -[dependencies] -orderwal = "0.4" -``` +- Default (with on-disk support) + + ```toml + [dependencies] + orderwal = "0.5" + ``` + +- `std` only (without on-disk support) + + ```toml + [dependencies] + orderwal = { version = "0.5", default-features = false, features = ["std"] } + ``` + +- `no-std` (`alloc` required) + + ```toml + [dependencies] + orderwal = { version = "0.5", default-features = false, features = ["alloc"] } + ``` ## Example @@ -40,6 +56,8 @@ See [examples](./examples/) for more information. - [`aol`](https://github.com/al8n/aol): Yet another generic purpose, append-only write-ahead log implementation based on `std::fs::File`. - [`skl`](https://github.com/al8n/skl): A lock-free, ARNEA based skiplist implementation, which supports in-memory and on-disk, suitable for frozen durable data file or memtable for LSM database. +- [`valog`](https://github.com/al8n/valog): A lock-free, generic, lightweight value log for WiscKey or Bitcask architecture databases. +- [`dtlog`](https://github.com/al8n/dtlog): A log for tracking discard stats of multi-files databases. #### License diff --git a/ci/miri_tb.sh b/ci/miri_tb.sh index 93fa21a..fb34df0 100755 --- a/ci/miri_tb.sh +++ b/ci/miri_tb.sh @@ -1,5 +1,10 @@ #!/bin/bash -set -e +set -euxo pipefail +IFS=$'\n\t' + +# We need 'ts' for the per-line timing +sudo apt-get -y install moreutils +echo # Check if TARGET and CONFIG_FLAGS are provided, otherwise panic if [ -z "$1" ]; then @@ -19,9 +24,9 @@ rustup toolchain install nightly --component miri rustup override set nightly cargo miri setup +# Zmiri-ignore-leaks needed because of https://github.com/crossbeam-rs/crossbeam/issues/579 export MIRIFLAGS="-Zmiri-symbolic-alignment-check -Zmiri-disable-isolation -Zmiri-tree-borrows -Zmiri-ignore-leaks" - export RUSTFLAGS="--cfg test_$CONFIG_FLAGS" -cargo miri test --tests --target $TARGET --lib +cargo miri test --tests --target $TARGET --lib 2>&1 | ts -i '%.s ' diff --git a/ci/sanitizer.sh b/ci/sanitizer.sh index 72cf179..aa26fa9 100755 --- a/ci/sanitizer.sh +++ b/ci/sanitizer.sh @@ -5,15 +5,15 @@ set -ex export ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" # Run address sanitizer -RUSTFLAGS="-Z sanitizer=address --cfg all_tests" \ +RUSTFLAGS="-Z sanitizer=address --cfg all_orderwal_tests" \ cargo test -Z build-std --all --release --tests --target x86_64-unknown-linux-gnu --all-features --exclude benchmarks -- --test-threads=1 # Run memory sanitizer -RUSTFLAGS="-Z sanitizer=memory --cfg all_tests" \ +RUSTFLAGS="-Z sanitizer=memory --cfg all_orderwal_tests" \ cargo test -Z build-std --all --release --tests --target x86_64-unknown-linux-gnu --all-features --exclude benchmarks -- --test-threads=1 # Run thread sanitizer cargo clean TSAN_OPTIONS="suppressions=$(pwd)/ci/tsan" \ -RUSTFLAGS="${RUSTFLAGS:-} -Z sanitizer=thread --cfg all_tests" \ +RUSTFLAGS="${RUSTFLAGS:-} -Z sanitizer=thread --cfg all_orderwal_tests" \ cargo test -Z build-std --all --release --target x86_64-unknown-linux-gnu --all-features --tests --exclude benchmarks -- --test-threads=1 \ No newline at end of file diff --git a/examples/generic_not_sized.rs b/examples/generic_not_sized.rs index f328e4d..25cd293 100644 --- a/examples/generic_not_sized.rs +++ b/examples/generic_not_sized.rs @@ -1 +1,28 @@ -fn main() {} +use orderwal::{ + base::{OrderWal, Reader, Writer}, + Builder, +}; + +fn main() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("not_sized.wal"); + + let mut wal = unsafe { + Builder::new() + .with_capacity(1024 * 1024) + .with_create_new(true) + .with_read(true) + .with_write(true) + .map_mut::, _>(&path) + .unwrap() + }; + + wal.insert("a", b"a1".as_slice()).unwrap(); + wal.insert("c", b"c1".as_slice()).unwrap(); + + let a = wal.get("a").unwrap(); + let c = wal.get("c").unwrap(); + + assert_eq!(a.value(), b"a1"); + assert_eq!(c.value(), b"c1"); +} diff --git a/examples/multiple_version.rs b/examples/multiple_version.rs new file mode 100644 index 0000000..d57adc4 --- /dev/null +++ b/examples/multiple_version.rs @@ -0,0 +1,36 @@ +use orderwal::{ + multiple_version::{OrderWal, Reader, Writer}, + Builder, +}; + +fn main() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("not_sized.wal"); + + let mut wal = unsafe { + Builder::new() + .with_capacity(1024 * 1024) + .with_create_new(true) + .with_read(true) + .with_write(true) + .map_mut::, _>(&path) + .unwrap() + }; + + wal.insert(1, "a", b"a1".as_slice()).unwrap(); + wal.insert(3, "a", b"a3".as_slice()).unwrap(); + wal.insert(1, "c", b"c1".as_slice()).unwrap(); + wal.insert(3, "c", b"c3".as_slice()).unwrap(); + + let a = wal.get(2, "a").unwrap(); + let c = wal.get(2, "c").unwrap(); + + assert_eq!(a.value(), b"a1"); + assert_eq!(c.value(), b"c1"); + + let a = wal.get(3, "a").unwrap(); + let c = wal.get(3, "c").unwrap(); + + assert_eq!(a.value(), b"a3"); + assert_eq!(c.value(), b"c3"); +} diff --git a/examples/zero_cost.rs b/examples/zero_cost.rs index c6b09d6..0ae38b8 100644 --- a/examples/zero_cost.rs +++ b/examples/zero_cost.rs @@ -1,8 +1,10 @@ use std::{cmp, sync::Arc, thread::spawn}; +use dbutils::leb128::{decode_u64_varint, encode_u64_varint, encoded_u64_varint_len}; use orderwal::{ - swmr::generic::{Comparable, Equivalent, GenericBuilder, KeyRef, Type, TypeRef}, - utils::*, + base::{OrderWal, Reader, Writer}, + types::{KeyRef, Type, TypeRef}, + Builder, Comparable, Equivalent, }; #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] @@ -20,7 +22,7 @@ impl Person { } } -#[derive(Debug)] +#[derive(Debug, Clone, Copy)] struct PersonRef<'a> { id: u64, name: &'a str, @@ -35,7 +37,7 @@ impl PartialEq for PersonRef<'_> { impl Eq for PersonRef<'_> {} impl PartialOrd for PersonRef<'_> { - fn partial_cmp(&self, other: &Self) -> Option { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } @@ -56,7 +58,7 @@ impl Equivalent for PersonRef<'_> { } impl Comparable for PersonRef<'_> { - fn compare(&self, key: &Person) -> std::cmp::Ordering { + fn compare(&self, key: &Person) -> core::cmp::Ordering { self.id.cmp(&key.id).then_with(|| self.name.cmp(&key.name)) } } @@ -68,7 +70,7 @@ impl Equivalent> for Person { } impl Comparable> for Person { - fn compare(&self, key: &PersonRef<'_>) -> std::cmp::Ordering { + fn compare(&self, key: &PersonRef<'_>) -> core::cmp::Ordering { self .id .cmp(&key.id) @@ -79,7 +81,7 @@ impl Comparable> for Person { impl<'a> KeyRef<'a, Person> for PersonRef<'a> { fn compare(&self, a: &Q) -> cmp::Ordering where - Q: ?Sized + Ord + Comparable, + Q: ?Sized + Comparable, { Comparable::compare(a, self).reverse() } @@ -115,7 +117,10 @@ impl Type for Person { } #[inline] - fn encode_to_buffer(&self, buf: &mut orderwal::VacantBuffer<'_>) -> Result { + fn encode_to_buffer( + &self, + buf: &mut orderwal::types::VacantBuffer<'_>, + ) -> Result { let id_size = buf.put_u64_varint(self.id)?; buf.put_slice_unchecked(self.name.as_bytes()); Ok(id_size + self.name.len()) @@ -137,18 +142,18 @@ fn main() { let people = (0..100) .map(|_| { let p = Person::random(); - let v = format!("My name is {}", p.name); + let v = std::format!("My name is {}", p.name); (p, v) }) .collect::>(); let mut wal = unsafe { - GenericBuilder::new() + Builder::new() .with_capacity(1024 * 1024) .with_create_new(true) .with_read(true) .with_write(true) - .map_mut::(&path) + .map_mut::, _>(&path) .unwrap() }; diff --git a/src/batch.rs b/src/batch.rs new file mode 100644 index 0000000..ce55883 --- /dev/null +++ b/src/batch.rs @@ -0,0 +1,226 @@ +use crate::{ + memtable::BaseTable, + wal::{KeyPointer, ValuePointer}, +}; + +use super::{ + sealed::{WithVersion, WithoutVersion}, + types::{BufWriter, EncodedEntryMeta, EntryFlags}, +}; + +/// An entry can be inserted into the WALs through [`Batch`]. +pub struct BatchEntry { + pub(crate) key: K, + pub(crate) value: Option, + pub(crate) flag: EntryFlags, + pub(crate) meta: EncodedEntryMeta, + pointers: Option<(KeyPointer, Option>)>, + pub(crate) version: Option, +} + +impl BatchEntry +where + M: BaseTable, + for<'a> M::Item<'a>: WithoutVersion, +{ + /// Creates a new entry. + #[inline] + pub const fn new(key: K, value: V) -> Self { + Self { + key, + value: Some(value), + flag: EntryFlags::empty(), + meta: EncodedEntryMeta::batch_zero(false), + pointers: None, + version: None, + } + } + + /// Creates a tombstone entry. + #[inline] + pub const fn tombstone(key: K) -> Self { + Self { + key, + value: None, + flag: EntryFlags::REMOVED, + meta: EncodedEntryMeta::batch_zero(false), + pointers: None, + version: None, + } + } +} + +impl BatchEntry +where + M: BaseTable, + for<'a> M::Item<'a>: WithVersion, +{ + /// Creates a new entry with version. + #[inline] + pub fn with_version(version: u64, key: K, value: V) -> Self { + Self { + key, + value: Some(value), + flag: EntryFlags::empty() | EntryFlags::VERSIONED, + meta: EncodedEntryMeta::batch_zero(true), + pointers: None, + version: Some(version), + } + } + + /// Creates a tombstone entry with version. + #[inline] + pub fn tombstone_with_version(version: u64, key: K) -> Self { + Self { + key, + value: None, + flag: EntryFlags::REMOVED | EntryFlags::VERSIONED, + meta: EncodedEntryMeta::batch_zero(true), + pointers: None, + version: Some(version), + } + } + + /// Returns the version of the entry. + #[inline] + pub const fn version(&self) -> u64 { + match self.version { + Some(version) => version, + None => unreachable!(), + } + } + + /// Set the version of the entry. + #[inline] + pub fn set_version(&mut self, version: u64) { + self.version = Some(version); + } +} + +impl BatchEntry +where + M: BaseTable, +{ + /// Returns the length of the key. + #[inline] + pub fn key_len(&self) -> usize + where + K: BufWriter, + { + self.key.encoded_len() + } + + /// Returns the length of the value. + #[inline] + pub fn value_len(&self) -> usize + where + V: BufWriter, + { + self.value.as_ref().map_or(0, |v| v.encoded_len()) + } + + /// Returns the key. + #[inline] + pub const fn key(&self) -> &K { + &self.key + } + + /// Returns the value. + #[inline] + pub const fn value(&self) -> Option<&V> { + self.value.as_ref() + } + + /// Consumes the entry and returns the key and value. + #[inline] + pub fn into_components(self) -> (K, Option) { + (self.key, self.value) + } + + #[inline] + pub(crate) fn encoded_key_len(&self) -> usize + where + K: BufWriter, + V: BufWriter, + { + self.key.encoded_len() + } + + #[inline] + pub(crate) const fn internal_version(&self) -> Option { + self.version + } + + #[inline] + pub(crate) fn take_pointer( + &mut self, + ) -> Option<(KeyPointer, Option>)> { + self.pointers.take() + } + + #[inline] + pub(crate) fn set_pointer(&mut self, kp: KeyPointer, vp: Option>) { + self.pointers = Some((kp, vp)); + } + + #[inline] + pub(crate) fn set_encoded_meta(&mut self, meta: EncodedEntryMeta) { + self.meta = meta; + } + + #[inline] + pub(crate) fn encoded_meta(&self) -> &EncodedEntryMeta { + &self.meta + } +} + +/// A trait for batch insertions. +pub trait Batch { + /// Any type that can be converted into a key. + type Key; + /// Any type that can be converted into a value. + type Value; + + /// The iterator type. + type IterMut<'a>: Iterator> + where + Self: 'a, + Self::Key: 'a, + Self::Value: 'a, + M: 'a; + + /// Returns an iterator over the keys and values. + fn iter_mut<'a>(&'a mut self) -> Self::IterMut<'a> + where + Self: 'a, + Self::Key: 'a, + Self::Value: 'a, + M: 'a; +} + +impl Batch for T +where + M: BaseTable, + for<'a> &'a mut T: IntoIterator>, +{ + type Key = K; + type Value = V; + + type IterMut<'a> + = <&'a mut T as IntoIterator>::IntoIter + where + Self: 'a, + Self::Key: 'a, + Self::Value: 'a, + M: 'a; + + fn iter_mut<'a>(&'a mut self) -> Self::IterMut<'a> + where + Self: 'a, + Self::Key: 'a, + Self::Value: 'a, + M: 'a, + { + IntoIterator::into_iter(self) + } +} diff --git a/src/buffer.rs b/src/buffer.rs deleted file mode 100644 index 87f99ae..0000000 --- a/src/buffer.rs +++ /dev/null @@ -1,29 +0,0 @@ -pub use dbutils::{buffer::VacantBuffer, builder}; - -macro_rules! builder_ext { - ($($name:ident),+ $(,)?) => { - $( - paste::paste! { - impl $name { - #[doc = "Creates a new `" $name "` with the given size and builder closure which requires `FnOnce`."] - #[inline] - pub const fn once(size: u32, f: F) -> Self - where - F: for<'a> FnOnce(&mut VacantBuffer<'a>) -> Result<(), E>, - { - Self { size, f } - } - } - } - )* - }; -} - -builder!( - /// A value builder for the wal, which requires the value size for accurate allocation and a closure to build the value. - pub ValueBuilder(u32); - /// A key builder for the wal, which requires the key size for accurate allocation and a closure to build the key. - pub KeyBuilder(u32); -); - -builder_ext!(ValueBuilder, KeyBuilder,); diff --git a/src/builder.rs b/src/builder.rs index 5bd32db..c866133 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1,69 +1,91 @@ -use checksum::BuildChecksumer; -use options::ArenaOptionsExt; -use wal::{sealed::Constructor, Wal}; +use dbutils::checksum::Crc32; +use skl::KeySize; -use super::*; +use super::{ + error::Error, + memtable::BaseTable, + options::{arena_options, Options}, + sealed::Constructable, +}; + +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +#[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] +mod memmap; /// A write-ahead log builder. -pub struct Builder { +pub struct Builder +where + M: BaseTable, +{ pub(super) opts: Options, - pub(super) cmp: C, pub(super) cks: S, + pub(super) memtable_opts: M::Options, } -impl Default for Builder { +impl Default for Builder +where + M: BaseTable, + M::Options: Default, +{ #[inline] fn default() -> Self { Self::new() } } -impl Builder { +impl Builder +where + M: BaseTable, + M::Options: Default, +{ /// Returns a new write-ahead log builder with the given options. #[inline] pub fn new() -> Self { Self { opts: Options::default(), - cmp: Ascend, cks: Crc32::default(), + memtable_opts: M::Options::default(), } } } -impl Builder { - /// Returns a new write-ahead log builder with the new comparator +impl Builder +where + M: BaseTable, +{ + /// Returns a new write-ahead log builder with the new checksumer /// /// ## Example /// /// ```rust - /// use orderwal::{Builder, Ascend}; + /// use orderwal::{Builder, Crc32, multiple_version::DefaultTable}; /// - /// let opts = Builder::new().with_comparator(Ascend); + /// let opts = Builder::>::new().with_checksumer(Crc32::new()); /// ``` #[inline] - pub fn with_comparator(self, cmp: NC) -> Builder { + pub fn with_checksumer(self, cks: NS) -> Builder { Builder { opts: self.opts, - cmp, - cks: self.cks, + cks, + memtable_opts: self.memtable_opts, } } - /// Returns a new write-ahead log builder with the new checksumer + /// Returns a new write-ahead log builder with the new options /// /// ## Example /// /// ```rust - /// use orderwal::{Builder, Crc32}; + /// use orderwal::{Builder, Options, multiple_version::DefaultTable}; /// - /// let opts = Builder::new().with_checksumer(Crc32::new()); + /// let opts = Builder::>::new().with_options(Options::default()); /// ``` #[inline] - pub fn with_checksumer(self, cks: NS) -> Builder { - Builder { - opts: self.opts, - cmp: self.cmp, - cks, + pub fn with_options(self, opts: Options) -> Self { + Self { + opts, + cks: self.cks, + memtable_opts: self.memtable_opts, } } @@ -72,102 +94,101 @@ impl Builder { /// ## Example /// /// ```rust - /// use orderwal::{Builder, Options}; + /// use orderwal::{Builder, multiple_version::{ArenaTable, ArenaTableOptions}}; /// - /// let opts = Builder::new().with_options(Options::default()); + /// let opts = Builder::>::new().with_memtable_options(ArenaTableOptions::default()); /// ``` #[inline] - pub fn with_options(self, opts: Options) -> Self { + pub fn with_memtable_options(self, opts: M::Options) -> Self { Self { - opts, - cmp: self.cmp, + opts: self.opts, cks: self.cks, + memtable_opts: opts, } } - /// Set the reserved bytes of the WAL. - /// - /// The `reserved` is used to configure the start position of the WAL. This is useful - /// when you want to add some bytes as your own WAL's header. - /// - /// The default reserved is `0`. + /// Returns a new write-ahead log builder with the new memtable. /// /// ## Example /// /// ```rust - /// use orderwal::Builder; + /// use orderwal::{Builder, multiple_version::{DefaultTable, ArenaTable}}; /// - /// let opts = Builder::new().with_reserved(8); + /// let opts = Builder::>::new().change_memtable::>(); /// ``` #[inline] - pub const fn with_reserved(mut self, reserved: u32) -> Self { - self.opts = self.opts.with_reserved(reserved); - self + pub fn change_memtable(self) -> Builder + where + NM: BaseTable, + NM::Options: Default, + { + Builder { + opts: self.opts, + cks: self.cks, + memtable_opts: NM::Options::default(), + } } - /// Get the reserved of the WAL. - /// - /// The `reserved` is used to configure the start position of the WAL. This is useful - /// when you want to add some bytes as your own WAL's header. - /// - /// The default reserved is `0`. + /// Returns a new write-ahead log builder with the new memtable and its options /// /// ## Example /// /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_reserved(8); + /// use orderwal::{Builder, multiple_version::{DefaultTable, ArenaTable, ArenaTableOptions}}; /// - /// assert_eq!(opts.reserved(), 8); + /// let opts = Builder::>::new().change_memtable_with_options::>(ArenaTableOptions::default().with_capacity(1000)); /// ``` #[inline] - pub const fn reserved(&self) -> u32 { - self.opts.reserved() + pub fn change_memtable_with_options(self, opts: NM::Options) -> Builder + where + NM: BaseTable, + { + Builder { + opts: self.opts, + cks: self.cks, + memtable_opts: opts, + } } - /// Set if lock the meta of the WAL in the memory to prevent OS from swapping out the header of WAL. - /// When using memory map backed WAL, the meta of the WAL - /// is in the header, meta is frequently accessed, - /// lock (`mlock` on the header) the meta can reduce the page fault, - /// but yes, this means that one WAL will have one page are locked in memory, - /// and will not be swapped out. So, this is a trade-off between performance and memory usage. + /// Set the reserved bytes of the WAL. /// - /// Default is `true`. + /// The `reserved` is used to configure the start position of the WAL. This is useful + /// when you want to add some bytes as your own WAL's header. /// - /// This configuration has no effect on windows and vec backed WAL. + /// The default reserved is `0`. /// /// ## Example /// /// ```rust - /// use orderwal::Builder; + /// use orderwal::{Builder, multiple_version::DefaultTable}; /// - /// let opts = Builder::new().with_lock_meta(false); + /// let opts = Builder::>::new().with_reserved(8); /// ``` #[inline] - pub const fn with_lock_meta(mut self, lock_meta: bool) -> Self { - self.opts.lock_meta = lock_meta; + pub const fn with_reserved(mut self, reserved: u32) -> Self { + self.opts = self.opts.with_reserved(reserved); self } - /// Get if lock the meta of the WAL in the memory to prevent OS from swapping out the header of WAL. - /// When using memory map backed WAL, the meta of the WAL - /// is in the header, meta is frequently accessed, - /// lock (`mlock` on the header) the meta can reduce the page fault, - /// but yes, this means that one WAL will have one page are locked in memory, - /// and will not be swapped out. So, this is a trade-off between performance and memory usage. + /// Get the reserved of the WAL. + /// + /// The `reserved` is used to configure the start position of the WAL. This is useful + /// when you want to add some bytes as your own WAL's header. + /// + /// The default reserved is `0`. /// /// ## Example /// /// ```rust - /// use orderwal::Builder; + /// use orderwal::{Builder, multiple_version::DefaultTable}; + /// + /// let opts = Builder::>::new().with_reserved(8); /// - /// let opts = Builder::new().with_lock_meta(false); - /// assert_eq!(opts.lock_meta(), false); + /// assert_eq!(opts.reserved(), 8); /// ``` #[inline] - pub const fn lock_meta(&self) -> bool { - self.opts.lock_meta + pub const fn reserved(&self) -> u32 { + self.opts.reserved() } /// Returns the magic version. @@ -177,9 +198,9 @@ impl Builder { /// ## Example /// /// ```rust - /// use orderwal::Builder; + /// use orderwal::{Builder, multiple_version::DefaultTable}; /// - /// let options = Builder::new().with_magic_version(1); + /// let options = Builder::>::new().with_magic_version(1); /// assert_eq!(options.magic_version(), 1); /// ``` #[inline] @@ -194,9 +215,9 @@ impl Builder { /// ## Example /// /// ```rust - /// use orderwal::Builder; + /// use orderwal::{Builder, multiple_version::DefaultTable}; /// - /// let options = Builder::new().with_capacity(1000); + /// let options = Builder::>::new().with_capacity(1000); /// assert_eq!(options.capacity(), 1000); /// ``` #[inline] @@ -211,13 +232,13 @@ impl Builder { /// ## Example /// /// ```rust - /// use orderwal::Builder; + /// use orderwal::{Builder, KeySize, multiple_version::DefaultTable}; /// - /// let options = Builder::new().with_maximum_key_size(1024); - /// assert_eq!(options.maximum_key_size(), 1024); + /// let options = Builder::>::new().with_maximum_key_size(KeySize::with(1024)); + /// assert_eq!(options.maximum_key_size(), KeySize::with(1024)); /// ``` #[inline] - pub const fn maximum_key_size(&self) -> u32 { + pub const fn maximum_key_size(&self) -> KeySize { self.opts.maximum_key_size() } @@ -228,9 +249,9 @@ impl Builder { /// ## Example /// /// ```rust - /// use orderwal::Builder; + /// use orderwal::{Builder, multiple_version::DefaultTable}; /// - /// let options = Builder::new().with_maximum_value_size(1024); + /// let options = Builder::>::new().with_maximum_value_size(1024); /// assert_eq!(options.maximum_value_size(), 1024); /// ``` #[inline] @@ -245,9 +266,9 @@ impl Builder { /// ## Example /// /// ```rust - /// use orderwal::Builder; + /// use orderwal::{Builder, multiple_version::DefaultTable}; /// - /// let options = Builder::new(); + /// let options = Builder::>::new(); /// assert_eq!(options.sync(), true); /// ``` #[inline] @@ -264,9 +285,9 @@ impl Builder { /// ## Example /// /// ```rust - /// use orderwal::Builder; + /// use orderwal::{Builder, multiple_version::DefaultTable}; /// - /// let options = Builder::new().with_capacity(100); + /// let options = Builder::>::new().with_capacity(100); /// assert_eq!(options.capacity(), 100); /// ``` #[inline] @@ -280,13 +301,13 @@ impl Builder { /// ## Example /// /// ```rust - /// use orderwal::Builder; + /// use orderwal::{Builder, KeySize, multiple_version::DefaultTable}; /// - /// let options = Builder::new().with_maximum_key_size(1024); - /// assert_eq!(options.maximum_key_size(), 1024); + /// let options = Builder::>::new().with_maximum_key_size(KeySize::with(1024)); + /// assert_eq!(options.maximum_key_size(), KeySize::with(1024)); /// ``` #[inline] - pub const fn with_maximum_key_size(mut self, size: u32) -> Self { + pub const fn with_maximum_key_size(mut self, size: KeySize) -> Self { self.opts = self.opts.with_maximum_key_size(size); self } @@ -296,9 +317,9 @@ impl Builder { /// ## Example /// /// ```rust - /// use orderwal::Builder; + /// use orderwal::{Builder, multiple_version::DefaultTable}; /// - /// let options = Builder::new().with_maximum_value_size(1024); + /// let options = Builder::>::new().with_maximum_value_size(1024); /// assert_eq!(options.maximum_value_size(), 1024); /// ``` #[inline] @@ -314,9 +335,9 @@ impl Builder { /// ## Example /// /// ```rust - /// use orderwal::Builder; + /// use orderwal::{Builder, multiple_version::DefaultTable}; /// - /// let options = Builder::new().with_sync(false); + /// let options = Builder::>::new().with_sync(false); /// assert_eq!(options.sync(), false); /// ``` #[inline] @@ -332,9 +353,10 @@ impl Builder { /// ## Example /// /// ```rust - /// use orderwal::Builder; /// - /// let options = Builder::new().with_magic_version(1); + /// use orderwal::{Builder, multiple_version::DefaultTable}; + /// + /// let options = Builder::>::new().with_magic_version(1); /// assert_eq!(options.magic_version(), 1); /// ``` #[inline] @@ -344,609 +366,36 @@ impl Builder { } } -impl Builder { - /// Sets the option for read access. - /// - /// This option, when true, will indicate that the file should be - /// `read`-able if opened. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_read(true); - /// ``` - #[inline] - pub fn with_read(mut self, read: bool) -> Self { - self.opts.read = read; - self - } - - /// Sets the option for write access. - /// - /// This option, when true, will indicate that the file should be - /// `write`-able if opened. - /// - /// If the file already exists, any write calls on it will overwrite its - /// contents, without truncating it. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_write(true); - /// ``` - #[inline] - pub fn with_write(mut self, write: bool) -> Self { - self.opts.write = write; - self - } - - /// Sets the option for the append mode. - /// - /// This option, when true, means that writes will append to a file instead - /// of overwriting previous contents. - /// Note that setting `.write(true).append(true)` has the same effect as - /// setting only `.append(true)`. - /// - /// For most filesystems, the operating system guarantees that all writes are - /// atomic: no writes get mangled because another process writes at the same - /// time. - /// - /// One maybe obvious note when using append-mode: make sure that all data - /// that belongs together is written to the file in one operation. This - /// can be done by concatenating strings before passing them to [`write()`], - /// or using a buffered writer (with a buffer of adequate size), - /// and calling [`flush()`] when the message is complete. - /// - /// If a file is opened with both read and append access, beware that after - /// opening, and after every write, the position for reading may be set at the - /// end of the file. So, before writing, save the current position (using - /// [seek]\([SeekFrom](std::io::SeekFrom)::[Current]\(opts))), and restore it before the next read. - /// - /// ## Note - /// - /// This function doesn't create the file if it doesn't exist. Use the - /// [`Options::with_create`] method to do so. - /// - /// [`write()`]: std::io::Write::write "io::Write::write" - /// [`flush()`]: std::io::Write::flush "io::Write::flush" - /// [seek]: std::io::Seek::seek "io::Seek::seek" - /// [Current]: std::io::SeekFrom::Current "io::SeekFrom::Current" - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_append(true); - /// ``` - #[inline] - pub fn with_append(mut self, append: bool) -> Self { - self.opts.write = true; - self.opts.append = append; - self - } - - /// Sets the option for truncating a previous file. - /// - /// If a file is successfully opened with this option set it will truncate - /// the file to opts length if it already exists. - /// - /// The file must be opened with write access for truncate to work. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_write(true).with_truncate(true); - /// ``` - #[inline] - pub fn with_truncate(mut self, truncate: bool) -> Self { - self.opts.truncate = truncate; - self.opts.write = true; - self - } - - /// Sets the option to create a new file, or open it if it already exists. - /// If the file does not exist, it is created and set the lenght of the file to the given size. - /// - /// In order for the file to be created, [`Options::with_write`] or - /// [`Options::with_append`] access must be used. - /// - /// See also [`std::fs::write()`][std::fs::write] for a simple function to - /// create a file with some given data. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_write(true).with_create(true); - /// ``` - #[inline] - pub fn with_create(mut self, val: bool) -> Self { - self.opts.create = val; - self - } - - /// Sets the option to create a new file and set the file length to the given value, failing if it already exists. - /// - /// No file is allowed to exist at the target location, also no (dangling) symlink. In this - /// way, if the call succeeds, the file returned is guaranteed to be new. - /// - /// This option is useful because it is atomic. Otherwise between checking - /// whether a file exists and creating a new one, the file may have been - /// created by another process (a TOCTOU race condition / attack). - /// - /// If `.with_create_new(true)` is set, [`.with_create()`] and [`.with_truncate()`] are - /// ignored. - /// - /// The file must be opened with write or append access in order to create - /// a new file. - /// - /// [`.with_create()`]: Builder::with_create - /// [`.with_truncate()`]: Builder::with_truncate - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new() - /// .with_write(true) - /// .with_create_new(true); - /// ``` - #[inline] - pub fn with_create_new(mut self, val: bool) -> Self { - self.opts.create_new = val; - self - } - - /// Configures the anonymous memory map to be suitable for a process or thread stack. - /// - /// This option corresponds to the `MAP_STACK` flag on Linux. It has no effect on Windows. - /// - /// This option has no effect on file-backed memory maps and vec backed [`Wal`](crate::Wal). - /// - /// ## Example - /// - /// ``` - /// use orderwal::Builder; - /// - /// let stack = Builder::new().with_stack(true); - /// ``` - #[inline] - pub fn with_stack(mut self, stack: bool) -> Self { - self.opts.stack = stack; - self - } - - /// Configures the anonymous memory map to be allocated using huge pages. - /// - /// This option corresponds to the `MAP_HUGETLB` flag on Linux. It has no effect on Windows. - /// - /// The size of the requested page can be specified in page bits. If not provided, the system - /// default is requested. The requested length should be a multiple of this, or the mapping - /// will fail. - /// - /// This option has no effect on file-backed memory maps and vec backed [`Wal`](crate::Wal). - /// - /// ## Example - /// - /// ``` - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_huge(Some(8)); - /// ``` - #[inline] - pub fn with_huge(mut self, page_bits: Option) -> Self { - self.opts.huge = page_bits; - self - } - - /// Populate (prefault) page tables for a mapping. - /// - /// For a file mapping, this causes read-ahead on the file. This will help to reduce blocking on page faults later. - /// - /// This option corresponds to the `MAP_POPULATE` flag on Linux. It has no effect on Windows. - /// - /// This option has no effect on vec backed [`Wal`](crate::Wal). - /// - /// ## Example - /// - /// ``` - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_populate(true); - /// ``` - #[inline] - pub fn with_populate(mut self, populate: bool) -> Self { - self.opts.populate = populate; - self - } -} - -impl Builder { - /// Returns `true` if the file should be opened with read access. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_read(true); - /// assert_eq!(opts.read(), true); - /// ``` - #[inline] - pub const fn read(&self) -> bool { - self.opts.read - } - - /// Returns `true` if the file should be opened with write access. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_write(true); - /// assert_eq!(opts.write(), true); - /// ``` - #[inline] - pub const fn write(&self) -> bool { - self.opts.write - } - - /// Returns `true` if the file should be opened with append access. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_append(true); - /// assert_eq!(opts.append(), true); - /// ``` - #[inline] - pub const fn append(&self) -> bool { - self.opts.append - } - - /// Returns `true` if the file should be opened with truncate access. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_truncate(true); - /// assert_eq!(opts.truncate(), true); - /// ``` - #[inline] - pub const fn truncate(&self) -> bool { - self.opts.truncate - } - - /// Returns `true` if the file should be created if it does not exist. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_create(true); - /// assert_eq!(opts.create(), true); - /// ``` - #[inline] - pub const fn create(&self) -> bool { - self.opts.create - } - - /// Returns `true` if the file should be created if it does not exist and fail if it does. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_create_new(true); - /// assert_eq!(opts.create_new(), true); - /// ``` - #[inline] - pub const fn create_new(&self) -> bool { - self.opts.create_new - } - - /// Returns `true` if the memory map should be suitable for a process or thread stack. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_stack(true); - /// assert_eq!(opts.stack(), true); - /// ``` - #[inline] - pub const fn stack(&self) -> bool { - self.opts.stack - } - - /// Returns the page bits of the memory map. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_huge(Some(8)); - /// assert_eq!(opts.huge(), Some(8)); - /// ``` - #[inline] - pub const fn huge(&self) -> Option { - self.opts.huge - } - - /// Returns `true` if the memory map should populate (prefault) page tables for a mapping. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Builder; - /// - /// let opts = Builder::new().with_populate(true); - /// assert_eq!(opts.populate(), true); - /// ``` - #[inline] - pub const fn populate(&self) -> bool { - self.opts.populate - } -} - -impl Builder { +impl Builder +where + M: BaseTable, +{ /// Creates a new in-memory write-ahead log backed by an aligned vec. /// /// ## Example /// /// ```rust - /// use orderwal::{swmr::OrderWal, Builder}; + /// + /// use orderwal::{base::OrderWal, Builder}; /// /// let wal = Builder::new() /// .with_capacity(1024) - /// .alloc::() + /// .alloc::>() /// .unwrap(); /// ``` - pub fn alloc(self) -> Result + pub fn alloc(self) -> Result> where - W: Wal, + W: Constructable, { - let Self { opts, cmp, cks } = self; + let Self { + opts, + cks, + memtable_opts, + } = self; arena_options(opts.reserved()) .with_capacity(opts.capacity()) .alloc() .map_err(Error::from_insufficient_space) - .and_then(|arena| >::new_in(arena, opts, cmp, cks).map(W::from_core)) - } - - /// Creates a new in-memory write-ahead log but backed by an anonymous mmap. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::{swmr::OrderWal, Builder}; - /// - /// let wal = Builder::new() - /// .with_capacity(1024) - /// .map_anon::() - /// .unwrap(); - /// ``` - pub fn map_anon(self) -> Result - where - W: Wal, - { - let Self { opts, cmp, cks } = self; - arena_options(opts.reserved()) - .merge(&opts) - .map_anon() - .map_err(Into::into) - .and_then(|arena| >::new_in(arena, opts, cmp, cks).map(W::from_core)) - } - - /// Opens a write-ahead log backed by a file backed memory map in read-only mode. - /// - /// ## Safety - /// - /// All file-backed memory map constructors are marked `unsafe` because of the potential for - /// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or - /// out of process. Applications must consider the risk and take appropriate precautions when - /// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g. - /// unlinked) files exist but are platform specific and limited. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::{swmr::OrderWal, Builder}; - /// - /// # let dir = tempfile::tempdir().unwrap(); - /// # let path = dir.path().join("map.wal"); - /// - /// # let wal = unsafe { - /// # Builder::new() - /// # .with_capacity(1000).with_create(true).with_read(true).with_write(true) - /// # .map_mut::(&path) - /// # .unwrap() - /// # }; - /// - /// let wal = unsafe { - /// Builder::new() - /// .map::(&path) - /// .unwrap() - /// }; - pub unsafe fn map(self, path: P) -> Result - where - C: Comparator + CheapClone + 'static, - S: BuildChecksumer, - P: AsRef, - W: Wal, - { - self - .map_with_path_builder::(|| Ok(path.as_ref().to_path_buf())) - .map_err(|e| e.unwrap_right()) - } - - /// Opens a write-ahead log backed by a file backed memory map in read-only mode. - /// - /// ## Safety - /// - /// All file-backed memory map constructors are marked `unsafe` because of the potential for - /// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or - /// out of process. Applications must consider the risk and take appropriate precautions when - /// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g. - /// unlinked) files exist but are platform specific and limited. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::{swmr::OrderWal, Builder}; - /// - /// # let dir = tempfile::tempdir().unwrap(); - /// # let path = dir.path().join("map_with_path_builder.wal"); - /// - /// # let wal = unsafe { - /// # Builder::new() - /// # .with_capacity(1000).with_create(true).with_read(true).with_write(true) - /// # .map_mut::(&path) - /// # .unwrap() - /// # }; - /// - /// let wal = unsafe { - /// Builder::new() - /// .map_with_path_builder::(|| Ok(path)) - /// .unwrap() - /// }; - pub unsafe fn map_with_path_builder( - self, - path_builder: PB, - ) -> Result> - where - PB: FnOnce() -> Result, - C: Comparator + CheapClone + 'static, - S: BuildChecksumer, - W: Wal, - W::Pointer: Ord + 'static, - { - let Self { opts, cmp, cks } = self; - - arena_options(opts.reserved()) - .merge(&opts) - .with_read(true) - .map_with_path_builder(path_builder) - .map_err(|e| e.map_right(Into::into)) - .and_then(|arena| { - >::replay(arena, Options::new(), true, cmp, cks) - .map(>::from_core) - .map_err(Either::Right) - }) - } - - /// Opens a write-ahead log backed by a file backed memory map. - /// - /// ## Safety - /// - /// All file-backed memory map constructors are marked `unsafe` because of the potential for - /// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or - /// out of process. Applications must consider the risk and take appropriate precautions when - /// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g. - /// unlinked) files exist but are platform specific and limited. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::{swmr::OrderWal, Builder}; - /// - /// let dir = tempfile::tempdir().unwrap(); - /// let path = dir.path().join("map_mut_with_path_builder_example.wal"); - /// - /// let wal = unsafe { - /// Builder::new() - /// .with_create_new(true) - /// .with_read(true) - /// .with_write(true) - /// .with_capacity(1000) - /// .map_mut::(&path) - /// .unwrap() - /// }; - /// ``` - pub unsafe fn map_mut(self, path: P) -> Result - where - C: Comparator + CheapClone + 'static, - S: BuildChecksumer, - P: AsRef, - W: Wal, - { - self - .map_mut_with_path_builder::(|| Ok(path.as_ref().to_path_buf())) - .map_err(|e| e.unwrap_right()) - } - - /// Opens a write-ahead log backed by a file backed memory map. - /// - /// ## Safety - /// - /// All file-backed memory map constructors are marked `unsafe` because of the potential for - /// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or - /// out of process. Applications must consider the risk and take appropriate precautions when - /// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g. - /// unlinked) files exist but are platform specific and limited. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::{swmr::OrderWal, Builder}; - /// - /// let dir = tempfile::tempdir().unwrap(); - /// - /// let wal = unsafe { - /// Builder::new() - /// .with_create_new(true) - /// .with_read(true) - /// .with_write(true) - /// .with_capacity(1000) - /// .map_mut_with_path_builder::( - /// || Ok(dir.path().join("map_mut_with_path_builder_example.wal")), - /// ) - /// .unwrap() - /// }; - /// ``` - pub unsafe fn map_mut_with_path_builder( - self, - path_builder: PB, - ) -> Result> - where - PB: FnOnce() -> Result, - C: Comparator + CheapClone + 'static, - S: BuildChecksumer, - W: Wal, - { - let path = path_builder().map_err(Either::Left)?; - let exist = path.exists(); - let Self { opts, cmp, cks } = self; - - arena_options(opts.reserved()) - .merge(&opts) - .map_mut(path) - .map_err(Into::into) - .and_then(|arena| { - if !exist { - >::new_in(arena, opts, cmp, cks).map(W::from_core) - } else { - >::replay(arena, opts, false, cmp, cks).map(W::from_core) - } - }) - .map_err(Either::Right) + .and_then(|arena| W::new_in(arena, opts, memtable_opts, cks).map(W::from_core)) } } diff --git a/src/builder/memmap.rs b/src/builder/memmap.rs new file mode 100644 index 0000000..5895d6d --- /dev/null +++ b/src/builder/memmap.rs @@ -0,0 +1,710 @@ +use super::*; +use crate::{options::ArenaOptionsExt, sealed::Immutable}; + +use dbutils::{ + checksum::BuildChecksumer, + types::{KeyRef, Type}, +}; +use skl::either::Either; + +impl Builder +where + M: BaseTable, +{ + /// Set if lock the meta of the WAL in the memory to prevent OS from swapping out the header of WAL. + /// When using memory map backed WAL, the meta of the WAL + /// is in the header, meta is frequently accessed, + /// lock (`mlock` on the header) the meta can reduce the page fault, + /// but yes, this means that one WAL will have one page are locked in memory, + /// and will not be swapped out. So, this is a trade-off between performance and memory usage. + /// + /// Default is `true`. + /// + /// This configuration has no effect on windows and vec backed WAL. + /// + /// ## Example + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_lock_meta(false); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn with_lock_meta(mut self, lock_meta: bool) -> Self { + self.opts.lock_meta = lock_meta; + self + } + + /// Sets the option for read access. + /// + /// This option, when true, will indicate that the file should be + /// `read`-able if opened. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_read(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_read(mut self, read: bool) -> Self { + self.opts.read = read; + self + } + + /// Sets the option for write access. + /// + /// This option, when true, will indicate that the file should be + /// `write`-able if opened. + /// + /// If the file already exists, any write calls on it will overwrite its + /// contents, without truncating it. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_write(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_write(mut self, write: bool) -> Self { + self.opts.write = write; + self + } + + /// Sets the option for the append mode. + /// + /// This option, when true, means that writes will append to a file instead + /// of overwriting previous contents. + /// Note that setting `.write(true).append(true)` has the same effect as + /// setting only `.append(true)`. + /// + /// For most filesystems, the operating system guarantees that all writes are + /// atomic: no writes get mangled because another process writes at the same + /// time. + /// + /// One maybe obvious note when using append-mode: make sure that all data + /// that belongs together is written to the file in one operation. This + /// can be done by concatenating strings before passing them to [`write()`], + /// or using a buffered writer (with a buffer of adequate size), + /// and calling [`flush()`] when the message is complete. + /// + /// If a file is opened with both read and append access, beware that after + /// opening, and after every write, the position for reading may be set at the + /// end of the file. So, before writing, save the current position (using + /// [seek]\([SeekFrom](std::io::SeekFrom)::[Current]\(opts))), and restore it before the next read. + /// + /// ## Note + /// + /// This function doesn't create the file if it doesn't exist. Use the + /// [`Options::with_create`] method to do so. + /// + /// [`write()`]: std::io::Write::write "io::Write::write" + /// [`flush()`]: std::io::Write::flush "io::Write::flush" + /// [seek]: std::io::Seek::seek "io::Seek::seek" + /// [Current]: std::io::SeekFrom::Current "io::SeekFrom::Current" + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_append(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_append(mut self, append: bool) -> Self { + self.opts.write = true; + self.opts.append = append; + self + } + + /// Sets the option for truncating a previous file. + /// + /// If a file is successfully opened with this option set it will truncate + /// the file to opts length if it already exists. + /// + /// The file must be opened with write access for truncate to work. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_write(true).with_truncate(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_truncate(mut self, truncate: bool) -> Self { + self.opts.truncate = truncate; + self.opts.write = true; + self + } + + /// Sets the option to create a new file, or open it if it already exists. + /// If the file does not exist, it is created and set the lenght of the file to the given size. + /// + /// In order for the file to be created, [`Options::with_write`] or + /// [`Options::with_append`] access must be used. + /// + /// See also [`std::fs::write()`][std::fs::write] for a simple function to + /// create a file with some given data. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_write(true).with_create(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_create(mut self, val: bool) -> Self { + self.opts.create = val; + self + } + + /// Sets the option to create a new file and set the file length to the given value, failing if it already exists. + /// + /// No file is allowed to exist at the target location, also no (dangling) symlink. In this + /// way, if the call succeeds, the file returned is guaranteed to be new. + /// + /// This option is useful because it is atomic. Otherwise between checking + /// whether a file exists and creating a new one, the file may have been + /// created by another process (a TOCTOU race condition / attack). + /// + /// If `.with_create_new(true)` is set, [`.with_create()`] and [`.with_truncate()`] are + /// ignored. + /// + /// The file must be opened with write or append access in order to create + /// a new file. + /// + /// [`.with_create()`]: Builder::with_create + /// [`.with_truncate()`]: Builder::with_truncate + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new() + /// .with_write(true) + /// .with_create_new(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_create_new(mut self, val: bool) -> Self { + self.opts.create_new = val; + self + } + + /// Configures the anonymous memory map to be suitable for a process or thread stack. + /// + /// This option corresponds to the `MAP_STACK` flag on Linux. It has no effect on Windows. + /// + /// This option has no effect on file-backed memory maps and vec backed `Wal`. + /// + /// ## Example + /// + /// ``` + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let stack = Builder::>::new().with_stack(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_stack(mut self, stack: bool) -> Self { + self.opts.stack = stack; + self + } + + /// Configures the anonymous memory map to be allocated using huge pages. + /// + /// This option corresponds to the `MAP_HUGETLB` flag on Linux. It has no effect on Windows. + /// + /// The size of the requested page can be specified in page bits. If not provided, the system + /// default is requested. The requested length should be a multiple of this, or the mapping + /// will fail. + /// + /// This option has no effect on file-backed memory maps and vec backed `Wal`. + /// + /// ## Example + /// + /// ``` + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_huge(Some(8)); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_huge(mut self, page_bits: Option) -> Self { + self.opts.huge = page_bits; + self + } + + /// Populate (prefault) page tables for a mapping. + /// + /// For a file mapping, this causes read-ahead on the file. This will help to reduce blocking on page faults later. + /// + /// This option corresponds to the `MAP_POPULATE` flag on Linux. It has no effect on Windows. + /// + /// This option has no effect on vec backed `Wal`. + /// + /// ## Example + /// + /// ``` + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_populate(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_populate(mut self, populate: bool) -> Self { + self.opts.populate = populate; + self + } +} + +impl Builder +where + M: BaseTable, +{ + /// Get if lock the meta of the WAL in the memory to prevent OS from swapping out the header of WAL. + /// When using memory map backed WAL, the meta of the WAL + /// is in the header, meta is frequently accessed, + /// lock (`mlock` on the header) the meta can reduce the page fault, + /// but yes, this means that one WAL will have one page are locked in memory, + /// and will not be swapped out. So, this is a trade-off between performance and memory usage. + /// + /// ## Example + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_lock_meta(false); + /// assert_eq!(opts.lock_meta(), false); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn lock_meta(&self) -> bool { + self.opts.lock_meta + } + + /// Returns `true` if the file should be opened with read access. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_read(true); + /// assert_eq!(opts.read(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn read(&self) -> bool { + self.opts.read + } + + /// Returns `true` if the file should be opened with write access. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_write(true); + /// assert_eq!(opts.write(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn write(&self) -> bool { + self.opts.write + } + + /// Returns `true` if the file should be opened with append access. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_append(true); + /// assert_eq!(opts.append(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn append(&self) -> bool { + self.opts.append + } + + /// Returns `true` if the file should be opened with truncate access. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_truncate(true); + /// assert_eq!(opts.truncate(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn truncate(&self) -> bool { + self.opts.truncate + } + + /// Returns `true` if the file should be created if it does not exist. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_create(true); + /// assert_eq!(opts.create(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn create(&self) -> bool { + self.opts.create + } + + /// Returns `true` if the file should be created if it does not exist and fail if it does. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_create_new(true); + /// assert_eq!(opts.create_new(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn create_new(&self) -> bool { + self.opts.create_new + } + + /// Returns `true` if the memory map should be suitable for a process or thread stack. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_stack(true); + /// assert_eq!(opts.stack(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn stack(&self) -> bool { + self.opts.stack + } + + /// Returns the page bits of the memory map. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_huge(Some(8)); + /// assert_eq!(opts.huge(), Some(8)); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn huge(&self) -> Option { + self.opts.huge + } + + /// Returns `true` if the memory map should populate (prefault) page tables for a mapping. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::{Builder, multiple_version::LinkedTable}; + /// + /// let opts = Builder::>::new().with_populate(true); + /// assert_eq!(opts.populate(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn populate(&self) -> bool { + self.opts.populate + } +} + +impl Builder +where + M: BaseTable, +{ + /// Creates a new in-memory write-ahead log but backed by an anonymous mmap. + /// + /// ## Example + /// + /// ```rust + /// use orderwal::{base::OrderWal, Builder}; + /// + /// let wal = Builder::new() + /// .with_capacity(1024) + /// .map_anon::>() + /// .unwrap(); + /// ``` + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn map_anon(self) -> Result> + where + W: Constructable, + { + let Self { + opts, + cks, + memtable_opts, + } = self; + arena_options(opts.reserved()) + .merge(&opts) + .map_anon() + .map_err(Into::into) + .and_then(|arena| W::new_in(arena, opts, memtable_opts, cks).map(W::from_core)) + } + + /// Opens a write-ahead log backed by a file backed memory map in read-only mode. + /// + /// ## Safety + /// + /// All file-backed memory map constructors are marked `unsafe` because of the potential for + /// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or + /// out of process. Applications must consider the risk and take appropriate precautions when + /// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g. + /// unlinked) files exist but are platform specific and limited. + /// + /// ## Example + /// + /// ```rust + /// use orderwal::{base::OrderWalReader, Builder}; + /// + /// # let dir = tempfile::tempdir().unwrap(); + /// # let path = dir.path().join("map.wal"); + /// + /// # let wal = unsafe { + /// # Builder::new() + /// # .with_capacity(1000).with_create(true).with_read(true).with_write(true) + /// # .map_mut::, _>(&path) + /// # .unwrap() + /// # }; + /// + /// let wal = unsafe { + /// Builder::new() + /// .map::, _>(&path) + /// .unwrap() + /// }; + /// ``` + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub unsafe fn map<'a, W, P>(self, path: P) -> Result> + where + S: BuildChecksumer, + P: AsRef, + W: Constructable + Immutable, + M::Key: Type + Ord + 'static, + ::Ref<'a>: KeyRef<'a, M::Key>, + { + self + .map_with_path_builder::(|| Ok(path.as_ref().to_path_buf())) + .map_err(Either::unwrap_right) + } + + /// Opens a write-ahead log backed by a file backed memory map in read-only mode. + /// + /// ## Safety + /// + /// All file-backed memory map constructors are marked `unsafe` because of the potential for + /// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or + /// out of process. Applications must consider the risk and take appropriate precautions when + /// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g. + /// unlinked) files exist but are platform specific and limited. + /// + /// ## Example + /// + /// ```rust + /// use orderwal::{base::OrderWalReader, Builder}; + /// + /// # let dir = tempfile::tempdir().unwrap(); + /// # let path = dir.path().join("map_with_path_builder.wal"); + /// + /// # let wal = unsafe { + /// # Builder::new() + /// # .with_capacity(1000).with_create(true).with_read(true).with_write(true) + /// # .map_mut::, _>(&path) + /// # .unwrap() + /// # }; + /// + /// let wal = unsafe { + /// Builder::new() + /// .map_with_path_builder::, _, ()>(|| Ok(path)) + /// .unwrap() + /// }; + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub unsafe fn map_with_path_builder<'a, W, PB, E>( + self, + path_builder: PB, + ) -> Result>> + where + PB: FnOnce() -> Result, + S: BuildChecksumer, + W: Constructable + Immutable, + M::Key: Type + Ord + 'static, + ::Ref<'a>: KeyRef<'a, M::Key>, + { + let Self { + opts, + cks, + memtable_opts, + } = self; + + arena_options(opts.reserved()) + .merge(&opts) + .with_read(true) + .map_with_path_builder(path_builder) + .map_err(|e| e.map_right(Into::into)) + .and_then(|arena| { + W::replay(arena, Options::new(), memtable_opts, true, cks) + .map(Constructable::from_core) + .map_err(Either::Right) + }) + } + + /// Opens a write-ahead log backed by a file backed memory map. + /// + /// ## Safety + /// + /// All file-backed memory map constructors are marked `unsafe` because of the potential for + /// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or + /// out of process. Applications must consider the risk and take appropriate precautions when + /// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g. + /// unlinked) files exist but are platform specific and limited. + /// + /// ## Example + /// + /// ```rust + /// use orderwal::{base::OrderWal, Builder}; + /// + /// let dir = tempfile::tempdir().unwrap(); + /// let path = dir.path().join("map_mut_with_path_builder_example.wal"); + /// + /// let wal = unsafe { + /// Builder::new() + /// .with_create_new(true) + /// .with_read(true) + /// .with_write(true) + /// .with_capacity(1000) + /// .map_mut::, _>(&path) + /// .unwrap() + /// }; + /// ``` + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub unsafe fn map_mut<'a, W, P>(self, path: P) -> Result> + where + S: BuildChecksumer, + P: AsRef, + W: Constructable, + M::Key: Type + Ord + 'static, + ::Ref<'a>: KeyRef<'a, M::Key>, + { + self + .map_mut_with_path_builder::(|| Ok(path.as_ref().to_path_buf())) + .map_err(Either::unwrap_right) + } + + /// Opens a write-ahead log backed by a file backed memory map. + /// + /// ## Safety + /// + /// All file-backed memory map constructors are marked `unsafe` because of the potential for + /// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or + /// out of process. Applications must consider the risk and take appropriate precautions when + /// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g. + /// unlinked) files exist but are platform specific and limited. + /// + /// ## Example + /// + /// ```rust + /// use orderwal::{base::OrderWal, Builder}; + /// + /// let dir = tempfile::tempdir().unwrap(); + /// + /// let wal = unsafe { + /// Builder::new() + /// .with_create_new(true) + /// .with_read(true) + /// .with_write(true) + /// .with_capacity(1000) + /// .map_mut_with_path_builder::, _, ()>( + /// || Ok(dir.path().join("map_mut_with_path_builder_example.wal")), + /// ) + /// .unwrap() + /// }; + /// ``` + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub unsafe fn map_mut_with_path_builder<'a, W, PB, E>( + self, + path_builder: PB, + ) -> Result>> + where + PB: FnOnce() -> Result, + S: BuildChecksumer, + W: Constructable, + M::Key: Type + Ord + 'static, + ::Ref<'a>: KeyRef<'a, M::Key>, + { + let path = path_builder().map_err(Either::Left)?; + let exist = path.exists(); + let Self { + opts, + cks, + memtable_opts, + } = self; + + arena_options(opts.reserved()) + .merge(&opts) + .map_mut(path) + .map_err(Into::into) + .and_then(|arena| { + if !exist { + W::new_in(arena, opts, memtable_opts, cks).map(W::from_core) + } else { + W::replay(arena, opts, memtable_opts, false, cks).map(W::from_core) + } + }) + .map_err(Either::Right) + } +} diff --git a/src/entry.rs b/src/entry.rs deleted file mode 100644 index 41e1fd2..0000000 --- a/src/entry.rs +++ /dev/null @@ -1,601 +0,0 @@ -use core::borrow::Borrow; - -use crossbeam_skiplist::set::Entry as SetEntry; -use dbutils::{ - buffer::VacantBuffer, - equivalent::{Comparable, Equivalent}, - traits::{KeyRef, Type, TypeRef}, -}; -use rarena_allocator::either::Either; - -use super::{ - pointer::{GenericPointer, Pointer}, - KeyBuilder, ValueBuilder, -}; - -pub(crate) struct BatchEncodedEntryMeta { - /// The output of `merge_lengths(klen, vlen)` - pub(crate) kvlen: u64, - /// the length of `encoded_u64_varint(merge_lengths(klen, vlen))` - pub(crate) kvlen_size: usize, - pub(crate) klen: usize, - pub(crate) vlen: usize, -} - -impl BatchEncodedEntryMeta { - #[inline] - pub(crate) const fn new(klen: usize, vlen: usize, kvlen: u64, kvlen_size: usize) -> Self { - Self { - klen, - vlen, - kvlen, - kvlen_size, - } - } - - #[inline] - const fn zero() -> Self { - Self { - klen: 0, - vlen: 0, - kvlen: 0, - kvlen_size: 0, - } - } -} - -/// An entry which can be inserted into the [`Wal`](crate::wal::Wal). -pub struct Entry { - pub(crate) key: K, - pub(crate) value: V, - pub(crate) pointer: Option>, - pub(crate) meta: BatchEncodedEntryMeta, -} - -impl Entry -where - K: Borrow<[u8]>, - V: Borrow<[u8]>, -{ - /// Returns the length of the value. - #[inline] - pub fn key_len(&self) -> usize { - self.key.borrow().len() - } - - /// Returns the length of the value. - #[inline] - pub fn value_len(&self) -> usize { - self.value.borrow().len() - } -} - -impl Entry { - /// Creates a new entry. - #[inline] - pub const fn new(key: K, value: V) -> Self { - Self { - key, - value, - pointer: None, - meta: BatchEncodedEntryMeta::zero(), - } - } - - /// Returns the key. - #[inline] - pub const fn key(&self) -> &K { - &self.key - } - - /// Returns the value. - #[inline] - pub const fn value(&self) -> &V { - &self.value - } - - /// Consumes the entry and returns the key and value. - #[inline] - pub fn into_components(self) -> (K, V) { - (self.key, self.value) - } -} - -/// An entry builder which can build an [`Entry`] to be inserted into the [`Wal`](crate::wal::Wal). -pub struct EntryWithKeyBuilder { - pub(crate) kb: KeyBuilder, - pub(crate) value: V, - pub(crate) pointer: Option

, - pub(crate) meta: BatchEncodedEntryMeta, -} - -impl EntryWithKeyBuilder -where - V: Borrow<[u8]>, -{ - /// Returns the length of the value. - #[inline] - pub(crate) fn value_len(&self) -> usize { - self.value.borrow().len() - } -} - -impl EntryWithKeyBuilder { - /// Creates a new entry. - #[inline] - pub const fn new(kb: KeyBuilder, value: V) -> Self { - Self { - kb, - value, - pointer: None, - meta: BatchEncodedEntryMeta::zero(), - } - } - - /// Returns the key. - #[inline] - pub const fn key_builder(&self) -> &KeyBuilder { - &self.kb - } - - /// Returns the value. - #[inline] - pub const fn value(&self) -> &V { - &self.value - } - - /// Returns the length of the key. - #[inline] - pub const fn key_len(&self) -> usize { - self.kb.size() as usize - } - - /// Consumes the entry and returns the key and value. - #[inline] - pub fn into_components(self) -> (KeyBuilder, V) { - (self.kb, self.value) - } -} - -/// An entry builder which can build an [`Entry`] to be inserted into the [`Wal`](crate::wal::Wal). -pub struct EntryWithValueBuilder { - pub(crate) key: K, - pub(crate) vb: ValueBuilder, - pub(crate) pointer: Option

, - pub(crate) meta: BatchEncodedEntryMeta, -} - -impl EntryWithValueBuilder -where - K: Borrow<[u8]>, -{ - /// Returns the length of the key. - #[inline] - pub(crate) fn key_len(&self) -> usize { - self.key.borrow().len() - } -} - -impl EntryWithValueBuilder { - /// Creates a new entry. - #[inline] - pub const fn new(key: K, vb: ValueBuilder) -> Self { - Self { - key, - vb, - pointer: None, - meta: BatchEncodedEntryMeta::zero(), - } - } - - /// Returns the key. - #[inline] - pub const fn value_builder(&self) -> &ValueBuilder { - &self.vb - } - - /// Returns the value. - #[inline] - pub const fn key(&self) -> &K { - &self.key - } - - /// Returns the length of the value. - #[inline] - pub const fn value_len(&self) -> usize { - self.vb.size() as usize - } - - /// Consumes the entry and returns the key and value. - #[inline] - pub fn into_components(self) -> (K, ValueBuilder) { - (self.key, self.vb) - } -} - -/// A wrapper around a generic type that can be used to construct a [`GenericEntry`]. -#[repr(transparent)] -pub struct Generic<'a, T: ?Sized> { - data: Either<&'a T, &'a [u8]>, -} - -impl<'a, T: 'a> PartialEq for Generic<'a, T> -where - T: ?Sized + PartialEq + Type + for<'b> Equivalent>, -{ - #[inline] - fn eq(&self, other: &T) -> bool { - match &self.data { - Either::Left(val) => (*val).eq(other), - Either::Right(val) => { - let ref_ = unsafe { as TypeRef<'_>>::from_slice(val) }; - other.equivalent(&ref_) - } - } - } -} - -impl<'a, T: 'a> PartialEq for Generic<'a, T> -where - T: ?Sized + PartialEq + Type + for<'b> Equivalent>, -{ - #[inline] - fn eq(&self, other: &Self) -> bool { - match (&self.data, &other.data) { - (Either::Left(val), Either::Left(other_val)) => val.eq(other_val), - (Either::Right(val), Either::Right(other_val)) => val.eq(other_val), - (Either::Left(val), Either::Right(other_val)) => { - let ref_ = unsafe { as TypeRef<'_>>::from_slice(other_val) }; - val.equivalent(&ref_) - } - (Either::Right(val), Either::Left(other_val)) => { - let ref_ = unsafe { as TypeRef<'_>>::from_slice(val) }; - other_val.equivalent(&ref_) - } - } - } -} - -impl<'a, T: 'a> Eq for Generic<'a, T> where T: ?Sized + Eq + Type + for<'b> Equivalent> {} - -impl<'a, T: 'a> PartialOrd for Generic<'a, T> -where - T: ?Sized + Ord + Type + for<'b> Comparable>, - for<'b> T::Ref<'b>: Comparable + Ord, -{ - #[inline] - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl<'a, T: 'a> PartialOrd for Generic<'a, T> -where - T: ?Sized + PartialOrd + Type + for<'b> Comparable>, -{ - #[inline] - fn partial_cmp(&self, other: &T) -> Option { - match &self.data { - Either::Left(val) => (*val).partial_cmp(other), - Either::Right(val) => { - let ref_ = unsafe { as TypeRef<'_>>::from_slice(val) }; - Some(other.compare(&ref_).reverse()) - } - } - } -} - -impl<'a, T: 'a> Ord for Generic<'a, T> -where - T: ?Sized + Ord + Type + for<'b> Comparable>, - for<'b> T::Ref<'b>: Comparable + Ord, -{ - #[inline] - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - match (&self.data, &other.data) { - (Either::Left(val), Either::Left(other_val)) => (*val).cmp(other_val), - (Either::Right(val), Either::Right(other_val)) => { - let this = unsafe { as TypeRef<'_>>::from_slice(val) }; - let other = unsafe { as TypeRef<'_>>::from_slice(other_val) }; - this.cmp(&other) - } - (Either::Left(val), Either::Right(other_val)) => { - let other = unsafe { as TypeRef<'_>>::from_slice(other_val) }; - other.compare(*val).reverse() - } - (Either::Right(val), Either::Left(other_val)) => { - let this = unsafe { as TypeRef<'_>>::from_slice(val) }; - this.compare(*other_val) - } - } - } -} - -impl<'a, T: 'a + Type + ?Sized> Generic<'a, T> { - /// Returns the encoded length. - #[inline] - pub fn encoded_len(&self) -> usize { - match &self.data { - Either::Left(val) => val.encoded_len(), - Either::Right(val) => val.len(), - } - } - - /// Encodes the generic into the buffer. - /// - /// ## Panics - /// - if the buffer is not large enough. - #[inline] - pub fn encode(&self, buf: &mut [u8]) -> Result { - match &self.data { - Either::Left(val) => val.encode(buf), - Either::Right(val) => { - buf.copy_from_slice(val); - Ok(buf.len()) - } - } - } - - /// Encodes the generic into the given buffer. - /// - /// ## Panics - /// - if the buffer is not large enough. - #[inline] - pub fn encode_to_buffer(&self, buf: &mut VacantBuffer<'_>) -> Result { - match &self.data { - Either::Left(val) => val.encode_to_buffer(buf), - Either::Right(val) => { - buf.put_slice_unchecked(val); - Ok(buf.len()) - } - } - } -} - -impl<'a, T: 'a + ?Sized> Generic<'a, T> { - /// Returns the value contained in the generic. - #[inline] - pub const fn data(&self) -> Either<&T, &'a [u8]> { - self.data - } - - /// Creates a new generic from bytes for querying or inserting into the [`GenericOrderWal`](crate::swmr::GenericOrderWal). - /// - /// ## Safety - /// - the `slice` must the same as the one returned by [`T::encode`](Type::encode). - #[inline] - pub const unsafe fn from_slice(slice: &'a [u8]) -> Self { - Self { - data: Either::Right(slice), - } - } -} - -impl<'a, T: 'a + ?Sized> From<&'a T> for Generic<'a, T> { - #[inline] - fn from(value: &'a T) -> Self { - Self { - data: Either::Left(value), - } - } -} - -/// An entry in the [`GenericOrderWal`](crate::swmr::GenericOrderWal). -pub struct GenericEntry<'a, K: ?Sized, V: ?Sized> { - pub(crate) key: Generic<'a, K>, - pub(crate) value: Generic<'a, V>, - pub(crate) pointer: Option>, - pub(crate) meta: BatchEncodedEntryMeta, -} - -impl<'a, K: ?Sized, V: ?Sized> GenericEntry<'a, K, V> { - /// Creates a new entry. - #[inline] - pub fn new(key: impl Into>, value: impl Into>) -> Self { - Self { - key: key.into(), - value: value.into(), - pointer: None, - meta: BatchEncodedEntryMeta::zero(), - } - } - - /// Returns the length of the key. - #[inline] - pub fn key_len(&self) -> usize - where - K: Type, - { - match self.key.data() { - Either::Left(val) => val.encoded_len(), - Either::Right(val) => val.len(), - } - } - - /// Returns the length of the value. - #[inline] - pub fn value_len(&self) -> usize - where - V: Type, - { - match self.value.data() { - Either::Left(val) => val.encoded_len(), - Either::Right(val) => val.len(), - } - } - - /// Returns the key. - #[inline] - pub const fn key(&self) -> Either<&K, &[u8]> { - self.key.data() - } - - /// Returns the value. - #[inline] - pub const fn value(&self) -> Either<&V, &[u8]> { - self.value.data() - } - - /// Consumes the entry and returns the key and value. - #[inline] - pub fn into_components(self) -> (Generic<'a, K>, Generic<'a, V>) { - (self.key, self.value) - } -} - -/// An entry builder which can build an [`GenericEntry`] to be inserted into the [`GenericOrderWal`](crate::swmr::generic::GenericOrderWal). -pub struct EntryWithBuilders { - pub(crate) kb: KeyBuilder, - pub(crate) vb: ValueBuilder, - pub(crate) pointer: Option

, - pub(crate) meta: BatchEncodedEntryMeta, -} - -impl EntryWithBuilders { - /// Creates a new entry. - #[inline] - pub const fn new(kb: KeyBuilder, vb: ValueBuilder) -> Self { - Self { - kb, - vb, - pointer: None, - meta: BatchEncodedEntryMeta::zero(), - } - } - - /// Returns the value builder. - #[inline] - pub const fn value_builder(&self) -> &ValueBuilder { - &self.vb - } - - /// Returns the key builder. - #[inline] - pub const fn key_builder(&self) -> &KeyBuilder { - &self.kb - } - - /// Returns the length of the key. - #[inline] - pub const fn key_len(&self) -> usize { - self.kb.size() as usize - } - - /// Returns the length of the value. - #[inline] - pub const fn value_len(&self) -> usize { - self.vb.size() as usize - } - - /// Consumes the entry and returns the key and value. - #[inline] - pub fn into_components(self) -> (KeyBuilder, ValueBuilder) { - (self.kb, self.vb) - } -} - -/// The reference to an entry in the [`GenericOrderWal`](crate::swmr::GenericOrderWal). -pub struct GenericEntryRef<'a, K, V> -where - K: ?Sized + Type, - V: ?Sized + Type, -{ - ent: SetEntry<'a, GenericPointer>, - key: K::Ref<'a>, - value: V::Ref<'a>, -} - -impl<'a, K, V> core::fmt::Debug for GenericEntryRef<'a, K, V> -where - K: Type + ?Sized, - K::Ref<'a>: core::fmt::Debug, - V: Type + ?Sized, - V::Ref<'a>: core::fmt::Debug, -{ - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("GenericEntryRef") - .field("key", &self.key()) - .field("value", &self.value()) - .finish() - } -} - -impl<'a, K, V> Clone for GenericEntryRef<'a, K, V> -where - K: ?Sized + Type, - K::Ref<'a>: Clone, - V: ?Sized + Type, - V::Ref<'a>: Clone, -{ - #[inline] - fn clone(&self) -> Self { - Self { - ent: self.ent.clone(), - key: self.key.clone(), - value: self.value.clone(), - } - } -} - -impl<'a, K, V> GenericEntryRef<'a, K, V> -where - K: ?Sized + Type, - V: ?Sized + Type, -{ - #[inline] - pub(super) fn new(ent: SetEntry<'a, GenericPointer>) -> Self { - Self { - key: unsafe { TypeRef::from_slice(ent.value().as_key_slice()) }, - value: unsafe { TypeRef::from_slice(ent.value().as_value_slice()) }, - ent, - } - } -} - -impl GenericEntryRef<'_, K, V> -where - K: Type + Ord + ?Sized, - for<'b> K::Ref<'b>: KeyRef<'b, K>, - V: ?Sized + Type, -{ - /// Returns the next entry in the [`GenericOrderWal`](crate::swmr::GenericOrderWal). - /// - /// This does not move the cursor. - #[inline] - #[allow(clippy::should_implement_trait)] - pub fn next(&self) -> Option { - self.ent.next().map(Self::new) - } - - /// Returns the previous entry in the [`GenericOrderWal`](crate::swmr::GenericOrderWal). - /// - /// This does not move the cursor. - #[inline] - pub fn prev(&self) -> Option { - self.ent.prev().map(Self::new) - } -} - -impl<'a, K, V> GenericEntryRef<'a, K, V> -where - K: ?Sized + Type, - V: Type + ?Sized, -{ - /// Returns the value of the entry. - #[inline] - pub const fn value(&self) -> &V::Ref<'a> { - &self.value - } -} - -impl<'a, K, V> GenericEntryRef<'a, K, V> -where - K: Type + ?Sized, - V: ?Sized + Type, -{ - /// Returns the key of the entry. - #[inline] - pub const fn key(&self) -> &K::Ref<'a> { - &self.key - } -} diff --git a/src/error.rs b/src/error.rs index 3169d91..0e7393b 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,8 +1,16 @@ +use among::Among; +use dbutils::error::InsufficientBuffer; +use derive_where::derive_where; + +use crate::memtable::BaseTable; + +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +use crate::types::Kind; + /// The batch error type. -#[derive(Debug, thiserror::Error)] +#[derive(Debug)] pub enum BatchError { /// Returned when the expected batch encoding size does not match the actual size. - #[error("the expected batch encoding size ({expected}) does not match the actual size {actual}")] EncodedSizeMismatch { /// The expected size. expected: u32, @@ -10,23 +18,40 @@ pub enum BatchError { actual: u32, }, /// Larger encoding size than the expected batch encoding size. - #[error("larger encoding size than the expected batch encoding size {0}")] LargerEncodedSize(u32), } +impl core::fmt::Display for BatchError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Self::EncodedSizeMismatch { expected, actual } => { + write!( + f, + "the expected batch encoding size ({}) does not match the actual size {}", + expected, actual + ) + } + Self::LargerEncodedSize(size) => { + write!( + f, + "larger encoding size than the expected batch encoding size {}", + size + ) + } + } + } +} + +impl core::error::Error for BatchError {} + /// The error type. -#[derive(Debug, thiserror::Error)] -pub enum Error { +#[derive_where(Debug; T::Error)] +pub enum Error { /// Insufficient space in the WAL - #[error("insufficient space in the WAL (requested: {requested}, available: {available})")] - InsufficientSpace { - /// The requested size - requested: u64, - /// The remaining size - available: u32, - }, + InsufficientSpace(InsufficientBuffer), + /// Memtable does not have enough space. + Memtable(T::Error), /// The key is too large. - #[error("the key size is {size} larger than the maximum key size {maximum_key_size}")] KeyTooLarge { /// The size of the key. size: u64, @@ -34,7 +59,6 @@ pub enum Error { maximum_key_size: u32, }, /// The value is too large. - #[error("the value size is {size} larger than the maximum value size {maximum_value_size}")] ValueTooLarge { /// The size of the value. size: u64, @@ -42,34 +66,187 @@ pub enum Error { maximum_value_size: u32, }, /// The entry is too large. - #[error("the entry size is {size} larger than the maximum entry size {maximum_entry_size}")] EntryTooLarge { /// The size of the entry. size: u64, /// The maximum entry size. maximum_entry_size: u64, }, + /// Returned when the expected batch encoding size does not match the actual size. - #[error(transparent)] - Batch(#[from] BatchError), - /// I/O error. - #[error("{0}")] - IO(#[from] std::io::Error), + Batch(BatchError), + /// The WAL is read-only. - #[error("The WAL is read-only")] ReadOnly, + + /// Unknown WAL kind. + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + UnknownKind(UnknownKind), + + /// WAL kind mismatch. + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + KindMismatch { + /// The WAL was created with this kind. + create: Kind, + /// Trying to open the WAL with this kind. + open: Kind, + }, + + /// I/O error. + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + IO(std::io::Error), } -impl Error { +impl From for Error { + #[inline] + fn from(e: BatchError) -> Self { + Self::Batch(e) + } +} + +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +impl From for Error { + #[inline] + fn from(e: UnknownKind) -> Self { + Self::UnknownKind(e) + } +} + +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +impl From for Error { + #[inline] + fn from(e: std::io::Error) -> Self { + Self::IO(e) + } +} + +impl core::fmt::Display for Error +where + T: BaseTable, + T::Error: core::fmt::Display, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Self::InsufficientSpace(e) => write!(f, "insufficient space in the WAL: {e}"), + Self::Memtable(e) => write!(f, "{e}"), + Self::KeyTooLarge { + size, + maximum_key_size, + } => write!( + f, + "the key size is {} larger than the maximum key size {}", + size, maximum_key_size + ), + Self::ValueTooLarge { + size, + maximum_value_size, + } => write!( + f, + "the value size is {} larger than the maximum value size {}", + size, maximum_value_size + ), + Self::EntryTooLarge { + size, + maximum_entry_size, + } => write!( + f, + "the entry size is {} larger than the maximum entry size {}", + size, maximum_entry_size + ), + Self::Batch(e) => write!(f, "{e}"), + Self::ReadOnly => write!(f, "The WAL is read-only"), + + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + Self::UnknownKind(e) => write!(f, "{e}"), + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + Self::KindMismatch { create, open } => write!( + f, + "the wal was {}, cannot be {}", + create.display_created_err_msg(), + open.display_open_err_msg() + ), + #[cfg(feature = "std")] + Self::IO(e) => write!(f, "{e}"), + } + } +} + +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +impl Kind { + #[inline] + const fn display_created_err_msg(&self) -> &'static str { + match self { + Self::Plain => "created without multiple versions support", + Self::MultipleVersion => "created with multiple versions support", + } + } + + #[inline] + const fn display_open_err_msg(&self) -> &'static str { + match self { + Self::Plain => "opened without multiple versions support", + Self::MultipleVersion => "opened with multiple versions support", + } + } +} + +impl core::error::Error for Error +where + T: BaseTable, + T::Error: core::error::Error + 'static, +{ + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { + match self { + Self::InsufficientSpace(e) => Some(e), + Self::Memtable(e) => Some(e), + Self::KeyTooLarge { .. } => None, + Self::ValueTooLarge { .. } => None, + Self::EntryTooLarge { .. } => None, + Self::Batch(e) => Some(e), + Self::ReadOnly => None, + + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + Self::UnknownKind(e) => Some(e), + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + Self::KindMismatch { .. } => None, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + Self::IO(e) => Some(e), + } + } +} + +impl From>> for Error { + #[inline] + fn from(value: Among>) -> Self { + match value { + Among::Left(a) => Self::InsufficientSpace(a), + Among::Middle(b) => Self::InsufficientSpace(b), + Among::Right(c) => c, + } + } +} + +impl Error { /// Create a new `Error::InsufficientSpace` instance. + #[inline] pub(crate) const fn insufficient_space(requested: u64, available: u32) -> Self { - Self::InsufficientSpace { + Self::InsufficientSpace(InsufficientBuffer::with_information( requested, - available, - } + available as u64, + )) + } + + /// Create a new `Error::MemtableInsufficientSpace` instance. + #[inline] + pub(crate) const fn memtable(e: T::Error) -> Self { + Self::Memtable(e) } /// Create a new `Error::KeyTooLarge` instance. + #[inline] pub(crate) const fn key_too_large(size: u64, maximum_key_size: u32) -> Self { Self::KeyTooLarge { size, @@ -78,6 +255,7 @@ impl Error { } /// Create a new `Error::ValueTooLarge` instance. + #[inline] pub(crate) const fn value_too_large(size: u64, maximum_value_size: u32) -> Self { Self::ValueTooLarge { size, @@ -86,6 +264,7 @@ impl Error { } /// Create a new `Error::EntryTooLarge` instance. + #[inline] pub(crate) const fn entry_too_large(size: u64, maximum_entry_size: u64) -> Self { Self::EntryTooLarge { size, @@ -104,9 +283,16 @@ impl Error { } } + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[inline] + pub(crate) const fn wal_kind_mismatch(create: Kind, open: Kind) -> Self { + Self::KindMismatch { create, open } + } + /// Create a new corrupted error. + #[cfg(feature = "std")] #[inline] - pub(crate) fn corrupted(e: E) -> Error + pub(crate) fn corrupted(e: E) -> Self where E: Into>, { @@ -140,21 +326,42 @@ impl Error { } /// Create a read-only error. + #[inline] pub(crate) const fn read_only() -> Self { Self::ReadOnly } - pub(crate) fn magic_text_mismatch() -> Error { + #[cfg(feature = "std")] + #[inline] + pub(crate) fn magic_text_mismatch() -> Self { Self::IO(std::io::Error::new( std::io::ErrorKind::InvalidData, "magic text of orderwal does not match", )) } - pub(crate) fn magic_version_mismatch() -> Error { + #[cfg(feature = "std")] + #[inline] + pub(crate) fn magic_version_mismatch() -> Self { Self::IO(std::io::Error::new( std::io::ErrorKind::InvalidData, "magic version of orderwal does not match", )) } } + +/// Unknown WAL kind error. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +#[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] +pub struct UnknownKind(pub(super) u8); + +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +impl core::fmt::Display for UnknownKind { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "unknown WAL kind: {}", self.0) + } +} + +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +impl core::error::Error for UnknownKind {} diff --git a/src/lib.rs b/src/lib.rs index f5360cc..f15f149 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,28 +1,24 @@ //! An ordered Write-Ahead Log implementation for Rust. #![doc = include_str!("../README.md")] -#![cfg_attr(not(any(feature = "std", test)), no_std)] +#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, allow(unused_attributes))] #![deny(missing_docs)] #![allow(clippy::type_complexity)] -use core::{borrow::Borrow, marker::PhantomData, mem}; +use core::mem; pub use among; -use among::Among; -use crossbeam_skiplist::SkipSet; -use error::Error; -use rarena_allocator::{ - either::{self, Either}, - Allocator, Buffer, Freelist, Options as ArenaOptions, -}; #[cfg(feature = "std")] extern crate std; +#[cfg(not(feature = "std"))] +extern crate alloc as std; + pub use dbutils::{ checksum::{self, Crc32}, - Ascend, CheapClone, Comparator, Descend, + equivalent::{Comparable, ComparableRangeBounds, Equivalent}, }; #[cfg(feature = "xxhash3")] @@ -33,67 +29,49 @@ pub use dbutils::checksum::XxHash3; #[cfg_attr(docsrs, doc(cfg(feature = "xxhash64")))] pub use dbutils::checksum::XxHash64; -const STATUS_SIZE: usize = mem::size_of::(); +const RECORD_FLAG_SIZE: usize = mem::size_of::(); const CHECKSUM_SIZE: usize = mem::size_of::(); const CURRENT_VERSION: u16 = 0; -const MAGIC_TEXT: [u8; 6] = *b"ordwal"; +const MAGIC_TEXT: [u8; 5] = *b"order"; const MAGIC_TEXT_SIZE: usize = MAGIC_TEXT.len(); +const WAL_KIND_SIZE: usize = mem::size_of::(); const MAGIC_VERSION_SIZE: usize = mem::size_of::(); -const HEADER_SIZE: usize = MAGIC_TEXT_SIZE + MAGIC_VERSION_SIZE; - -#[cfg(all( - test, - any( - all_tests, - test_unsync_constructor, - test_unsync_insert, - test_unsync_get, - test_unsync_iters, - test_swmr_constructor, - test_swmr_insert, - test_swmr_get, - test_swmr_iters, - test_swmr_generic_constructor, - test_swmr_generic_insert, - test_swmr_generic_get, - test_swmr_generic_iters, - ) -))] -#[macro_use] -mod tests; +const HEADER_SIZE: usize = MAGIC_TEXT_SIZE + WAL_KIND_SIZE + MAGIC_VERSION_SIZE; +/// The mvcc version size. +const VERSION_SIZE: usize = mem::size_of::(); /// Error types. pub mod error; -mod buffer; -pub use buffer::*; - mod builder; pub use builder::Builder; -mod entry; -pub use entry::*; - -/// Utilities. -pub mod utils; -use utils::*; - -mod wal; -pub use wal::{ImmutableWal, Wal}; +/// Types +pub mod types; mod options; pub use options::Options; +pub use skl::KeySize; + +/// Batch insertions related traits and structs. +pub mod batch; /// A single writer multiple readers ordered write-ahead Log implementation. -pub mod swmr; +mod swmr; +mod wal; +pub use swmr::*; -/// An ordered write-ahead Log implementation. -pub mod unsync; +/// The memory table implementation. +pub mod memtable; -mod pointer; +mod sealed; +pub use sealed::Immutable; + +/// The utilities functions. +pub mod utils; bitflags::bitflags! { - /// The flags of the entry. + /// The flags for each atomic write. struct Flags: u8 { /// First bit: 1 indicates committed, 0 indicates uncommitted const COMMITTED = 0b00000001; diff --git a/src/memtable.rs b/src/memtable.rs new file mode 100644 index 0000000..8807275 --- /dev/null +++ b/src/memtable.rs @@ -0,0 +1,280 @@ +use core::ops::{Bound, RangeBounds}; +use dbutils::equivalent::Comparable; + +use crate::{ + sealed::{WithVersion, WithoutVersion}, + types::Kind, + wal::{KeyPointer, ValuePointer}, +}; + +/// Memtable implementation based on linked based [`SkipMap`][`crossbeam_skiplist`]. +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +pub mod linked; + +/// Memtable implementation based on ARNEA based [`SkipMap`](skl). +pub mod arena; + +/// Sum type for different memtable implementations. +pub mod alternative; + +/// An entry which is stored in the memory table. +pub trait BaseEntry<'a>: Sized { + /// The key type. + type Key: ?Sized; + /// The value type. + type Value: ?Sized; + + /// Returns the key in the entry. + fn key(&self) -> KeyPointer; + + /// Returns the next entry in the memory table. + fn next(&mut self) -> Option; + + /// Returns the previous entry in the memory table. + fn prev(&mut self) -> Option; +} + +/// An entry which is stored in the memory table. +pub trait MemtableEntry<'a>: BaseEntry<'a> + WithoutVersion { + /// Returns the value in the entry. + fn value(&self) -> ValuePointer; +} + +/// An entry which is stored in the multiple versioned memory table. +pub trait VersionedMemtableEntry<'a>: BaseEntry<'a> + WithVersion { + /// Returns the value in the entry. + fn value(&self) -> Option>; + + /// Returns the version of the entry if it is versioned. + fn version(&self) -> u64; +} + +/// A memory table which is used to store pointers to the underlying entries. +pub trait BaseTable { + /// The key type. + type Key: ?Sized; + + /// The value type. + type Value: ?Sized; + + /// The configuration options for the memtable. + type Options; + + /// The error type may be returned when constructing the memtable. + type Error; + + /// The item returned by the iterator or query methods. + type Item<'a>: BaseEntry<'a, Key = Self::Key, Value = Self::Value> + Clone + where + Self: 'a; + + /// The iterator type. + type Iterator<'a>: DoubleEndedIterator> + where + Self: 'a; + + /// The range iterator type. + type Range<'a, Q, R>: DoubleEndedIterator> + where + Self: 'a, + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; + + /// Creates a new memtable with the specified options. + fn new(opts: Self::Options) -> Result + where + Self: Sized; + + /// Inserts a pointer into the memtable. + fn insert( + &self, + version: Option, + kp: KeyPointer, + vp: ValuePointer, + ) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static; + + /// Removes the pointer associated with the key. + fn remove(&self, version: Option, key: KeyPointer) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static; + + /// Returns the kind of the memtable. + fn kind() -> Kind; +} + +/// A memory table which is used to store pointers to the underlying entries. +pub trait Memtable: BaseTable +where + for<'a> Self::Item<'a>: MemtableEntry<'a>, +{ + /// Returns the number of entries in the memtable. + fn len(&self) -> usize; + + /// Returns `true` if the memtable is empty. + fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the upper bound of the memtable. + fn upper_bound(&self, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>; + + /// Returns the lower bound of the memtable. + fn lower_bound(&self, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>; + + /// Returns the first pointer in the memtable. + fn first(&self) -> Option> + where + KeyPointer: Ord; + + /// Returns the last pointer in the memtable. + fn last(&self) -> Option> + where + KeyPointer: Ord; + + /// Returns the pointer associated with the key. + fn get(&self, key: &Q) -> Option> + where + Q: ?Sized + Comparable>; + + /// Returns `true` if the memtable contains the specified pointer. + fn contains(&self, key: &Q) -> bool + where + Q: ?Sized + Comparable>; + + /// Returns an iterator over the memtable. + fn iter(&self) -> Self::Iterator<'_>; + + /// Returns an iterator over a subset of the memtable. + fn range<'a, Q, R>(&'a self, range: R) -> Self::Range<'a, Q, R> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; +} + +/// A memory table which is used to store pointers to the underlying entries. +pub trait MultipleVersionMemtable: BaseTable +where + for<'a> Self::Item<'a>: VersionedMemtableEntry<'a>, +{ + /// The item returned by the iterator or query methods. + type VersionedItem<'a>: VersionedMemtableEntry<'a, Key = Self::Key, Value = Self::Value> + Clone + where + KeyPointer: 'a, + Self: 'a; + + /// The iterator type which can yields all the entries in the memtable. + type IterAll<'a>: DoubleEndedIterator> + where + KeyPointer: 'a, + Self: 'a; + + /// The range iterator type which can yields all the entries in the memtable. + type RangeAll<'a, Q, R>: DoubleEndedIterator> + where + KeyPointer: 'a, + Self: 'a, + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; + + /// Returns the maximum version of the memtable. + fn maximum_version(&self) -> u64; + + /// Returns the minimum version of the memtable. + fn minimum_version(&self) -> u64; + + /// Returns `true` if the memtable may contain an entry whose version is less than or equal to the specified version. + fn may_contain_version(&self, version: u64) -> bool; + + /// Returns the upper bound of the memtable. + fn upper_bound(&self, version: u64, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>; + + /// Returns the upper bound of the memtable. + fn upper_bound_versioned( + &self, + version: u64, + bound: Bound<&Q>, + ) -> Option> + where + Q: ?Sized + Comparable>; + + /// Returns the lower bound of the memtable. + fn lower_bound(&self, version: u64, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>; + + /// Returns the lower bound of the memtable. + fn lower_bound_versioned( + &self, + version: u64, + bound: Bound<&Q>, + ) -> Option> + where + Q: ?Sized + Comparable>; + + /// Returns the first pointer in the memtable. + fn first(&self, version: u64) -> Option> + where + KeyPointer: Ord; + + /// Returns the first pointer in the memtable. + fn first_versioned(&self, version: u64) -> Option> + where + KeyPointer: Ord; + + /// Returns the last pointer in the memtable. + fn last(&self, version: u64) -> Option> + where + KeyPointer: Ord; + + /// Returns the last pointer in the memtable. + fn last_versioned(&self, version: u64) -> Option> + where + KeyPointer: Ord; + + /// Returns the pointer associated with the key. + fn get(&self, version: u64, key: &Q) -> Option> + where + Q: ?Sized + Comparable>; + + /// Returns the pointer associated with the key. + fn get_versioned(&self, version: u64, key: &Q) -> Option> + where + Q: ?Sized + Comparable>; + + /// Returns `true` if the memtable contains the specified pointer. + fn contains(&self, version: u64, key: &Q) -> bool + where + Q: ?Sized + Comparable>; + + /// Returns `true` if the memtable contains the specified pointer. + fn contains_versioned(&self, version: u64, key: &Q) -> bool + where + Q: ?Sized + Comparable>; + + /// Returns an iterator over the memtable. + fn iter(&self, version: u64) -> Self::Iterator<'_>; + + /// Returns an iterator over all the entries in the memtable. + fn iter_all_versions(&self, version: u64) -> Self::IterAll<'_>; + + /// Returns an iterator over a subset of the memtable. + fn range<'a, Q, R>(&'a self, version: u64, range: R) -> Self::Range<'a, Q, R> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; + + /// Returns an iterator over all the entries in a subset of the memtable. + fn range_all_versions<'a, Q, R>(&'a self, version: u64, range: R) -> Self::RangeAll<'a, Q, R> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; +} diff --git a/src/memtable/alternative.rs b/src/memtable/alternative.rs new file mode 100644 index 0000000..ccde21a --- /dev/null +++ b/src/memtable/alternative.rs @@ -0,0 +1,289 @@ +pub use multiple_version::MultipleVersionTable; +pub use table::Table; + +macro_rules! match_op { + ($self:ident.$op:ident($($args:ident),*) $(.map($associated_ty:ident))?) => {{ + match $self { + Self::Arena(e) => e.$op($($args,)*) $(.map(Self::$associated_ty::Arena))?, + #[cfg(feature = "std")] + Self::Linked(e) => e.$op($($args,)*) $(.map(Self::$associated_ty::Linked))?, + }} + }; + (Dispatch::$associated_ty:ident($self:ident.$op:ident($($args:ident),*))) => {{ + match $self { + Self::Arena(e) => Self::$associated_ty::Arena(e.$op($($args,)*)), + #[cfg(feature = "std")] + Self::Linked(e) => Self::$associated_ty::Linked(e.$op($($args,)*)), + }} + }; + (new($opts:ident)) => {{ + match $opts { + Self::Options::Arena(opts) => ArenaTable::new(opts).map(Self::Arena).map_err(Self::Error::Arena), + #[cfg(feature = "std")] + Self::Options::Linked => LinkedTable::new(()) + .map(Self::Linked) + .map_err(|_| Self::Error::Linked), + } + }}; + (update($self:ident.$op:ident($($args:ident),*))) => {{ + match $self { + Self::Arena(t) => t.$op($($args,)*).map_err(Self::Error::Arena), + #[cfg(feature = "std")] + Self::Linked(t) => t.$op($($args,)*).map_err(|_| Self::Error::Linked), + } + }}; +} + +macro_rules! iter { + (enum $name:ident { + Arena($arena:ident), + Linked($linked:ident), + } -> $ent:ident) => { + /// A sum type of iter for different memtable implementations. + #[non_exhaustive] + pub enum $name<'a, K, V> + where + K: ?Sized + Type + Ord, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type, + { + /// Arena iter + Arena($arena<'a, KeyPointer, ValuePointer>), + /// Linked iter + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + Linked($linked<'a, KeyPointer, ValuePointer>), + } + + impl<'a, K, V> Iterator for $name<'a, K, V> + where + K: ?Sized + Type + Ord + 'static, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type + 'static, + { + type Item = $ent<'a, K, V>; + + #[inline] + fn next(&mut self) -> Option { + match_op!(self.next().map(Item)) + } + } + + impl<'a, K, V> DoubleEndedIterator for $name<'a, K, V> + where + K: ?Sized + Type + Ord + 'static, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type + 'static, + { + #[inline] + fn next_back(&mut self) -> Option { + match_op!(self.next_back().map(Item)) + } + } + }; +} + +macro_rules! range { + (enum $name:ident { + Arena($arena:ident), + Linked($linked:ident), + } -> $ent:ident) => { + /// A sum type of range for different memtable implementations. + #[non_exhaustive] + pub enum $name<'a, K, V, Q, R> + where + R: RangeBounds, + Q: ?Sized + Comparable>, + K: ?Sized + Type + Ord, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type, + { + /// Arena range + Arena($arena<'a, KeyPointer, ValuePointer, Q, R>), + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + /// Linked range + Linked($linked<'a, Q, R, KeyPointer, ValuePointer>), + } + + impl<'a, K, V, Q, R> Iterator for $name<'a, K, V, Q, R> + where + R: RangeBounds, + Q: ?Sized + Comparable>, + K: ?Sized + Type + Ord + 'a, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type + 'a, + { + type Item = $ent<'a, K, V>; + + #[inline] + fn next(&mut self) -> Option { + match_op!(self.next().map(Item)) + } + } + + impl<'a, K, V, Q, R> DoubleEndedIterator for $name<'a, K, V, Q, R> + where + R: RangeBounds, + Q: ?Sized + Comparable>, + K: ?Sized + Type + Ord + 'a, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type + 'a, + { + fn next_back(&mut self) -> Option { + match_op!(self.next_back().map(Item)) + } + } + }; +} + +macro_rules! base_entry { + (enum $name:ident { + Arena($arena:ident), + Linked($linked:ident), + }) => { + /// A sum type of entry for different memtable implementations. + #[derive(Debug)] + #[non_exhaustive] + pub enum $name<'a, K, V> + where + K: ?Sized, + V: ?Sized, + { + /// Arena entry + Arena($arena<'a, KeyPointer, ValuePointer>), + /// Linked entry + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + Linked($linked<'a, KeyPointer, ValuePointer>), + } + + impl Clone for $name<'_, K, V> { + #[inline] + fn clone(&self) -> Self { + match self { + Self::Arena(e) => Self::Arena(e.clone()), + #[cfg(feature = "std")] + Self::Linked(e) => Self::Linked(e.clone()), + } + } + } + + impl<'a, K, V> BaseEntry<'a> for $name<'a, K, V> + where + K: ?Sized + Type + Ord, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type, + { + type Key = K; + + type Value = V; + + #[inline] + fn key(&self) -> KeyPointer { + *match_op!(self.key()) + } + + fn next(&mut self) -> Option { + match self { + Self::Arena(e) => e.next().map(Self::Arena), + #[cfg(feature = "std")] + Self::Linked(e) => e.next().map(Self::Linked), + } + } + + fn prev(&mut self) -> Option { + match self { + Self::Arena(e) => e.prev().map(Self::Arena), + #[cfg(feature = "std")] + Self::Linked(e) => e.prev().map(Self::Linked), + } + } + } + }; +} + +/// The sum type for different memtable implementations options. +#[derive(Debug)] +#[non_exhaustive] +pub enum TableOptions { + /// The options for the arena memtable. + Arena(super::arena::TableOptions), + /// The options for the linked memtable. + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + Linked, +} + +#[cfg(feature = "std")] +impl Default for TableOptions { + #[inline] + fn default() -> Self { + Self::linked() + } +} + +#[cfg(not(feature = "std"))] +impl Default for TableOptions { + #[inline] + fn default() -> Self { + Self::arena() + } +} + +impl From for TableOptions { + #[inline] + fn from(opts: super::arena::TableOptions) -> Self { + Self::Arena(opts) + } +} + +impl TableOptions { + /// Create a new arena memtable options with the default values. + #[inline] + pub const fn arena() -> Self { + Self::Arena(super::arena::TableOptions::new()) + } + + /// Create a new linked memtable options with the default values. + #[inline] + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + pub const fn linked() -> Self { + Self::Linked + } +} + +/// The sum type of error for different memtable implementations. +#[derive(Debug)] +#[non_exhaustive] +pub enum Error { + /// The error for the arena memtable. + Arena(skl::error::Error), + /// The error for the linked memtable. + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + Linked, +} + +impl From for Error { + #[inline] + fn from(e: skl::error::Error) -> Self { + Self::Arena(e) + } +} + +impl core::fmt::Display for Error { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Self::Arena(e) => write!(f, "{e}"), + #[cfg(feature = "std")] + Self::Linked => Ok(()), + } + } +} + +impl core::error::Error for Error {} + +mod multiple_version; +mod table; diff --git a/src/memtable/alternative/multiple_version.rs b/src/memtable/alternative/multiple_version.rs new file mode 100644 index 0000000..863b096 --- /dev/null +++ b/src/memtable/alternative/multiple_version.rs @@ -0,0 +1,362 @@ +use core::ops::{Bound, RangeBounds}; + +use crate::{ + memtable::{ + arena::{ + multiple_version::{ + Entry as ArenaEntry, Iter as ArenaIter, IterAll as ArenaIterAll, Range as ArenaRange, + RangeAll as ArenaRangeAll, VersionedEntry as ArenaVersionedEntry, + }, + MultipleVersionTable as ArenaTable, + }, + BaseEntry, BaseTable, MultipleVersionMemtable, VersionedMemtableEntry, + }, + sealed::WithVersion, + types::Kind, + wal::{KeyPointer, ValuePointer}, +}; + +#[cfg(feature = "std")] +use crate::memtable::linked::{ + multiple_version::{ + Entry as LinkedEntry, Iter as LinkedIter, IterAll as LinkedIterAll, Range as LinkedRange, + RangeAll as LinkedRangeAll, VersionedEntry as LinkedVersionedEntry, + }, + MultipleVersionTable as LinkedTable, +}; + +use dbutils::{ + equivalent::Comparable, + types::{KeyRef, Type}, +}; + +use super::TableOptions; + +base_entry!( + enum Entry { + Arena(ArenaEntry), + Linked(LinkedEntry), + } +); + +impl<'a, K, V> VersionedMemtableEntry<'a> for Entry<'a, K, V> +where + K: ?Sized + Type + Ord, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type, +{ + #[inline] + fn value(&self) -> Option> { + Some(*match_op!(self.value())) + } + + #[inline] + fn version(&self) -> u64 { + match_op!(self.version()) + } +} + +impl WithVersion for Entry<'_, K, V> {} + +base_entry!( + enum VersionedEntry { + Arena(ArenaVersionedEntry), + Linked(LinkedVersionedEntry), + } +); + +impl<'a, K, V> VersionedMemtableEntry<'a> for VersionedEntry<'a, K, V> +where + K: ?Sized + Type + Ord, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type, +{ + #[inline] + fn value(&self) -> Option> { + match_op!(self.value()).copied() + } + + #[inline] + fn version(&self) -> u64 { + match_op!(self.version()) + } +} + +impl WithVersion for VersionedEntry<'_, K, V> {} + +iter!( + enum Iter { + Arena(ArenaIter), + Linked(LinkedIter), + } -> Entry +); + +range!( + enum Range { + Arena(ArenaRange), + Linked(LinkedRange), + } -> Entry +); + +iter!( + enum IterAll { + Arena(ArenaIterAll), + Linked(LinkedIterAll), + } -> VersionedEntry +); + +range!( + enum RangeAll { + Arena(ArenaRangeAll), + Linked(LinkedRangeAll), + } -> VersionedEntry +); + +/// A sum type for different memtable implementations. +#[non_exhaustive] +pub enum MultipleVersionTable { + /// Arena memtable + Arena(ArenaTable), + /// Linked memtable + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + Linked(LinkedTable), +} + +impl BaseTable for MultipleVersionTable +where + K: ?Sized + Type + Ord + 'static, + for<'a> K::Ref<'a>: KeyRef<'a, K>, + for<'a> KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type + 'static, +{ + type Key = K; + + type Value = V; + + type Options = TableOptions; + + type Error = super::Error; + + type Item<'a> + = Entry<'a, K, V> + where + Self: 'a; + + type Iterator<'a> + = Iter<'a, K, V> + where + Self: 'a; + + type Range<'a, Q, R> + = Range<'a, K, V, Q, R> + where + Self: 'a, + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; + + #[inline] + fn new(opts: Self::Options) -> Result + where + Self: Sized, + { + match_op!(new(opts)) + } + + #[inline] + fn insert( + &self, + version: Option, + kp: KeyPointer, + vp: ValuePointer, + ) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static, + { + match_op!(update(self.insert(version, kp, vp))) + } + + #[inline] + fn remove(&self, version: Option, key: KeyPointer) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static, + { + match_op!(update(self.remove(version, key))) + } + + #[inline] + fn kind() -> Kind { + Kind::MultipleVersion + } +} + +impl MultipleVersionMemtable for MultipleVersionTable +where + K: ?Sized + Type + Ord + 'static, + for<'a> K::Ref<'a>: KeyRef<'a, K>, + for<'a> KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type + 'static, +{ + type VersionedItem<'a> + = VersionedEntry<'a, K, V> + where + KeyPointer: 'a, + Self: 'a; + + type IterAll<'a> + = IterAll<'a, K, V> + where + KeyPointer: 'a, + Self: 'a; + + type RangeAll<'a, Q, R> + = RangeAll<'a, K, V, Q, R> + where + KeyPointer: 'a, + Self: 'a, + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; + + #[inline] + fn maximum_version(&self) -> u64 { + match_op!(self.maximum_version()) + } + + #[inline] + fn minimum_version(&self) -> u64 { + match_op!(self.minimum_version()) + } + + #[inline] + fn may_contain_version(&self, version: u64) -> bool { + match_op!(self.may_contain_version(version)) + } + + #[inline] + fn upper_bound(&self, version: u64, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>, + { + match_op!(self.upper_bound(version, bound).map(Item)) + } + + fn upper_bound_versioned( + &self, + version: u64, + bound: Bound<&Q>, + ) -> Option> + where + Q: ?Sized + Comparable>, + { + match_op!(self + .upper_bound_versioned(version, bound) + .map(VersionedItem)) + } + + #[inline] + fn lower_bound(&self, version: u64, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>, + { + match_op!(self.lower_bound(version, bound).map(Item)) + } + + fn lower_bound_versioned( + &self, + version: u64, + bound: Bound<&Q>, + ) -> Option> + where + Q: ?Sized + Comparable>, + { + match_op!(self + .lower_bound_versioned(version, bound) + .map(VersionedItem)) + } + + #[inline] + fn first(&self, version: u64) -> Option> + where + KeyPointer: Ord, + { + match_op!(self.first(version).map(Item)) + } + + fn first_versioned(&self, version: u64) -> Option> + where + KeyPointer: Ord, + { + match_op!(self.first_versioned(version).map(VersionedItem)) + } + + #[inline] + fn last(&self, version: u64) -> Option> + where + KeyPointer: Ord, + { + match_op!(self.last(version).map(Item)) + } + + fn last_versioned(&self, version: u64) -> Option> + where + KeyPointer: Ord, + { + match_op!(self.last_versioned(version).map(VersionedItem)) + } + + #[inline] + fn get(&self, version: u64, key: &Q) -> Option> + where + Q: ?Sized + Comparable>, + { + match_op!(self.get(version, key).map(Item)) + } + + fn get_versioned(&self, version: u64, key: &Q) -> Option> + where + Q: ?Sized + Comparable>, + { + match_op!(self.get_versioned(version, key).map(VersionedItem)) + } + + #[inline] + fn contains(&self, version: u64, key: &Q) -> bool + where + Q: ?Sized + Comparable>, + { + match_op!(self.contains(version, key)) + } + + fn contains_versioned(&self, version: u64, key: &Q) -> bool + where + Q: ?Sized + Comparable>, + { + match_op!(self.contains_versioned(version, key)) + } + + #[inline] + fn iter(&self, version: u64) -> Self::Iterator<'_> { + match_op!(Dispatch::Iterator(self.iter(version))) + } + + fn iter_all_versions(&self, version: u64) -> Self::IterAll<'_> { + match_op!(Dispatch::IterAll(self.iter_all_versions(version))) + } + + #[inline] + fn range<'a, Q, R>(&'a self, version: u64, range: R) -> Self::Range<'a, Q, R> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable>, + { + match_op!(Dispatch::Range(self.range(version, range))) + } + + fn range_all_versions<'a, Q, R>(&'a self, version: u64, range: R) -> Self::RangeAll<'a, Q, R> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable>, + { + match_op!(Dispatch::RangeAll(self.range_all_versions(version, range))) + } +} diff --git a/src/memtable/alternative/table.rs b/src/memtable/alternative/table.rs new file mode 100644 index 0000000..ef75af6 --- /dev/null +++ b/src/memtable/alternative/table.rs @@ -0,0 +1,215 @@ +use core::ops::{Bound, RangeBounds}; + +use crate::{ + memtable::{ + arena::{ + table::{Entry as ArenaEntry, Iter as ArenaIter, Range as ArenaRange}, + Table as ArenaTable, + }, + BaseEntry, BaseTable, Memtable, MemtableEntry, + }, + sealed::WithoutVersion, + types::Kind, + wal::{KeyPointer, ValuePointer}, +}; + +#[cfg(feature = "std")] +use crate::memtable::linked::{ + table::{Entry as LinkedEntry, Iter as LinkedIter, Range as LinkedRange}, + Table as LinkedTable, +}; + +use dbutils::{ + equivalent::Comparable, + types::{KeyRef, Type}, +}; + +use super::TableOptions; + +base_entry!( + enum Entry { + Arena(ArenaEntry), + Linked(LinkedEntry), + } +); + +impl<'a, K, V> MemtableEntry<'a> for Entry<'a, K, V> +where + K: ?Sized + Type + Ord, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type, +{ + #[inline] + fn value(&self) -> ValuePointer { + *match_op!(self.value()) + } +} + +impl WithoutVersion for Entry<'_, K, V> {} + +iter!( + enum Iter { + Arena(ArenaIter), + Linked(LinkedIter), + } -> Entry +); + +range!( + enum Range { + Arena(ArenaRange), + Linked(LinkedRange), + } -> Entry +); + +/// A sum type for different memtable implementations. +#[non_exhaustive] +pub enum Table { + /// Arena memtable + Arena(ArenaTable), + /// Linked memtable + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + Linked(LinkedTable), +} + +impl BaseTable for Table +where + K: ?Sized + Type + Ord + 'static, + for<'a> K::Ref<'a>: KeyRef<'a, K>, + for<'a> KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type + 'static, +{ + type Key = K; + + type Value = V; + + type Options = TableOptions; + + type Error = super::Error; + + type Item<'a> + = Entry<'a, K, V> + where + Self: 'a; + + type Iterator<'a> + = Iter<'a, K, V> + where + Self: 'a; + + type Range<'a, Q, R> + = Range<'a, K, V, Q, R> + where + Self: 'a, + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; + + #[inline] + fn new(opts: Self::Options) -> Result + where + Self: Sized, + { + match_op!(new(opts)) + } + + #[inline] + fn insert( + &self, + version: Option, + kp: KeyPointer, + vp: ValuePointer, + ) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static, + { + match_op!(update(self.insert(version, kp, vp))) + } + + #[inline] + fn remove(&self, version: Option, key: KeyPointer) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static, + { + match_op!(update(self.remove(version, key))) + } + + #[inline] + fn kind() -> Kind { + Kind::Plain + } +} + +impl Memtable for Table +where + K: ?Sized + Type + Ord + 'static, + for<'a> K::Ref<'a>: KeyRef<'a, K>, + for<'a> KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type + 'static, +{ + #[inline] + fn len(&self) -> usize { + match_op!(self.len()) + } + + #[inline] + fn upper_bound(&self, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>, + { + match_op!(self.upper_bound(bound).map(Item)) + } + + #[inline] + fn lower_bound(&self, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>, + { + match_op!(self.lower_bound(bound).map(Item)) + } + + #[inline] + fn first(&self) -> Option> + where + KeyPointer: Ord, + { + match_op!(self.first().map(Item)) + } + + #[inline] + fn last(&self) -> Option> + where + KeyPointer: Ord, + { + match_op!(self.last().map(Item)) + } + + #[inline] + fn get(&self, key: &Q) -> Option> + where + Q: ?Sized + Comparable>, + { + match_op!(self.get(key).map(Item)) + } + + #[inline] + fn contains(&self, key: &Q) -> bool + where + Q: ?Sized + Comparable>, + { + match_op!(self.contains(key)) + } + + #[inline] + fn iter(&self) -> Self::Iterator<'_> { + match_op!(Dispatch::Iterator(self.iter())) + } + + #[inline] + fn range<'a, Q, R>(&'a self, range: R) -> Self::Range<'a, Q, R> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable>, + { + match_op!(Dispatch::Range(self.range(range))) + } +} diff --git a/src/memtable/arena.rs b/src/memtable/arena.rs new file mode 100644 index 0000000..d05f414 --- /dev/null +++ b/src/memtable/arena.rs @@ -0,0 +1,102 @@ +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +macro_rules! memmap_or_not { + ($opts:ident($arena_opts:ident)) => {{ + if $opts.map_anon() { + $arena_opts + .map_anon::, ValuePointer, _>() + .map_err(skl::error::Error::IO) + } else { + $arena_opts.alloc::, ValuePointer, _>() + } + .map(|map| Self { map }) + }}; +} + +#[cfg(not(all(feature = "memmap", not(target_family = "wasm"))))] +macro_rules! memmap_or_not { + ($opts:ident($arena_opts:ident)) => {{ + $arena_opts + .alloc::, ValuePointer, _>() + .map(|map| Self { map }) + }}; +} + +pub use skl::Height; + +/// Options to configure the [`Table`] or [`MultipleVersionTable`]. +#[derive(Debug, Copy, Clone)] +pub struct TableOptions { + capacity: u32, + map_anon: bool, + max_height: Height, +} + +impl Default for TableOptions { + #[inline] + fn default() -> Self { + Self::new() + } +} + +impl TableOptions { + /// Creates a new instance of `TableOptions` with the default options. + #[inline] + pub const fn new() -> Self { + Self { + capacity: 8192, + map_anon: false, + max_height: Height::new(), + } + } + + /// Sets the capacity of the table. + /// + /// Default is `8KB`. + #[inline] + pub const fn with_capacity(mut self, capacity: u32) -> Self { + self.capacity = capacity; + self + } + + /// Sets the table to use anonymous memory. + #[inline] + pub const fn with_map_anon(mut self, map_anon: bool) -> Self { + self.map_anon = map_anon; + self + } + + /// Sets the maximum height of the table. + /// + /// Default is `20`. + #[inline] + pub const fn with_max_height(mut self, max_height: Height) -> Self { + self.max_height = max_height; + self + } + + /// Returns the capacity of the table. + #[inline] + pub const fn capacity(&self) -> u32 { + self.capacity + } + + /// Returns `true` if the table is using anonymous memory. + #[inline] + pub const fn map_anon(&self) -> bool { + self.map_anon + } + + /// Returns the maximum height of the table. + #[inline] + pub const fn max_height(&self) -> Height { + self.max_height + } +} + +/// The multiple version memtable implementation. +pub mod multiple_version; +/// The memtable implementation. +pub mod table; + +pub use multiple_version::MultipleVersionTable; +pub use table::Table; diff --git a/src/memtable/arena/multiple_version.rs b/src/memtable/arena/multiple_version.rs new file mode 100644 index 0000000..238f461 --- /dev/null +++ b/src/memtable/arena/multiple_version.rs @@ -0,0 +1,358 @@ +use core::ops::{Bound, RangeBounds}; + +use among::Among; +use dbutils::{ + equivalent::Comparable, + types::{KeyRef, Type}, +}; +use skl::{ + either::Either, + multiple_version::{sync::SkipMap, Map as _}, + Options, +}; + +pub use skl::multiple_version::sync::{Entry, Iter, IterAll, Range, RangeAll, VersionedEntry}; + +use crate::{ + memtable::{BaseEntry, BaseTable, MultipleVersionMemtable, VersionedMemtableEntry}, + sealed::WithVersion, + types::Kind, + wal::{KeyPointer, ValuePointer}, +}; + +use super::TableOptions; + +impl<'a, K, V> BaseEntry<'a> for Entry<'a, KeyPointer, ValuePointer> +where + K: ?Sized + Type + Ord, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type, +{ + type Key = K; + type Value = V; + + #[inline] + fn next(&mut self) -> Option { + Entry::next(self) + } + + #[inline] + fn prev(&mut self) -> Option { + Entry::prev(self) + } + + #[inline] + fn key(&self) -> KeyPointer { + *Entry::key(self) + } +} + +impl<'a, K, V> VersionedMemtableEntry<'a> for Entry<'a, KeyPointer, ValuePointer> +where + K: ?Sized + Type + Ord, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type, +{ + #[inline] + fn value(&self) -> Option> { + Some(*Entry::value(self)) + } + + #[inline] + fn version(&self) -> u64 { + Entry::version(self) + } +} + +impl WithVersion for Entry<'_, KeyPointer, ValuePointer> +where + K: ?Sized, + V: ?Sized, +{ +} + +impl<'a, K, V> BaseEntry<'a> for VersionedEntry<'a, KeyPointer, ValuePointer> +where + K: ?Sized + Type + Ord, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type, +{ + type Key = K; + type Value = V; + + #[inline] + fn next(&mut self) -> Option { + VersionedEntry::next(self) + } + + #[inline] + fn prev(&mut self) -> Option { + VersionedEntry::prev(self) + } + + #[inline] + fn key(&self) -> KeyPointer { + *VersionedEntry::key(self) + } +} + +impl<'a, K, V> VersionedMemtableEntry<'a> for VersionedEntry<'a, KeyPointer, ValuePointer> +where + K: ?Sized + Type + Ord, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type, +{ + #[inline] + fn version(&self) -> u64 { + self.version() + } + + #[inline] + fn value(&self) -> Option> { + VersionedEntry::value(self).copied() + } +} + +impl WithVersion for VersionedEntry<'_, KeyPointer, ValuePointer> +where + K: ?Sized, + V: ?Sized, +{ +} + +/// A memory table implementation based on ARENA [`SkipMap`](skl). +pub struct MultipleVersionTable { + map: SkipMap, ValuePointer>, +} + +impl BaseTable for MultipleVersionTable +where + K: ?Sized + Type + Ord + 'static, + for<'a> KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type + 'static, +{ + type Key = K; + type Value = V; + + type Item<'a> + = Entry<'a, KeyPointer, ValuePointer> + where + Self: 'a; + + type Iterator<'a> + = Iter<'a, KeyPointer, ValuePointer> + where + Self: 'a; + + type Range<'a, Q, R> + = Range<'a, KeyPointer, ValuePointer, Q, R> + where + Self: 'a, + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; + + type Options = TableOptions; + + type Error = skl::error::Error; + + #[inline] + fn new(opts: Self::Options) -> Result { + let arena_opts = Options::new() + .with_capacity(opts.capacity()) + .with_freelist(skl::Freelist::None) + .with_unify(false) + .with_max_height(opts.max_height()); + + memmap_or_not!(opts(arena_opts)) + } + + fn insert( + &self, + version: Option, + kp: KeyPointer, + vp: ValuePointer, + ) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static, + { + self + .map + .insert(version.unwrap_or(0), &kp, &vp) + .map(|_| ()) + .map_err(|e| match e { + Among::Right(e) => e, + _ => unreachable!(), + }) + } + + fn remove(&self, version: Option, key: KeyPointer) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static, + { + match self.map.get_or_remove(version.unwrap_or(0), &key) { + Err(Either::Right(e)) => Err(e), + Err(Either::Left(_)) => unreachable!(), + _ => Ok(()), + } + } + + #[inline] + fn kind() -> Kind { + Kind::MultipleVersion + } +} + +impl MultipleVersionMemtable for MultipleVersionTable +where + K: ?Sized + Type + Ord + 'static, + for<'a> KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type + 'static, +{ + type VersionedItem<'a> + = VersionedEntry<'a, KeyPointer, ValuePointer> + where + Self: 'a; + + type IterAll<'a> + = IterAll<'a, KeyPointer, ValuePointer> + where + Self: 'a; + + type RangeAll<'a, Q, R> + = RangeAll<'a, KeyPointer, ValuePointer, Q, R> + where + Self: 'a, + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; + + #[inline] + fn maximum_version(&self) -> u64 { + self.map.maximum_version() + } + + #[inline] + fn minimum_version(&self) -> u64 { + self.map.minimum_version() + } + + #[inline] + fn may_contain_version(&self, version: u64) -> bool { + self.map.may_contain_version(version) + } + + fn upper_bound(&self, version: u64, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>, + { + self.map.upper_bound(version, bound) + } + + fn upper_bound_versioned( + &self, + version: u64, + bound: Bound<&Q>, + ) -> Option> + where + Q: ?Sized + Comparable>, + { + self.map.upper_bound_versioned(version, bound) + } + + fn lower_bound(&self, version: u64, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>, + { + self.map.lower_bound(version, bound) + } + + fn lower_bound_versioned( + &self, + version: u64, + bound: Bound<&Q>, + ) -> Option> + where + Q: ?Sized + Comparable>, + { + self.map.lower_bound_versioned(version, bound) + } + + fn first(&self, version: u64) -> Option> + where + KeyPointer: Ord, + { + self.map.first(version) + } + + fn first_versioned(&self, version: u64) -> Option> + where + KeyPointer: Ord, + { + self.map.first_versioned(version) + } + + fn last(&self, version: u64) -> Option> + where + KeyPointer: Ord, + { + self.map.last(version) + } + + fn last_versioned(&self, version: u64) -> Option> + where + KeyPointer: Ord, + { + self.map.last_versioned(version) + } + + fn get(&self, version: u64, key: &Q) -> Option> + where + Q: ?Sized + Comparable>, + { + self.map.get(version, key) + } + + fn get_versioned(&self, version: u64, key: &Q) -> Option> + where + Q: ?Sized + Comparable>, + { + self.map.get_versioned(version, key) + } + + fn contains(&self, version: u64, key: &Q) -> bool + where + Q: ?Sized + Comparable>, + { + self.map.contains_key(version, key) + } + + fn contains_versioned(&self, version: u64, key: &Q) -> bool + where + Q: ?Sized + Comparable>, + { + self.map.contains_key_versioned(version, key) + } + + fn iter(&self, version: u64) -> Self::Iterator<'_> { + self.map.iter(version) + } + + fn iter_all_versions(&self, version: u64) -> Self::IterAll<'_> { + self.map.iter_all_versions(version) + } + + fn range<'a, Q, R>(&'a self, version: u64, range: R) -> Self::Range<'a, Q, R> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable>, + { + self.map.range(version, range) + } + + fn range_all_versions<'a, Q, R>(&'a self, version: u64, range: R) -> Self::RangeAll<'a, Q, R> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable>, + { + self.map.range_all_versions(version, range) + } +} diff --git a/src/memtable/arena/table.rs b/src/memtable/arena/table.rs new file mode 100644 index 0000000..b85ce71 --- /dev/null +++ b/src/memtable/arena/table.rs @@ -0,0 +1,204 @@ +use core::ops::{Bound, RangeBounds}; + +use among::Among; +use dbutils::{ + equivalent::Comparable, + types::{KeyRef, Type}, +}; +use skl::{ + either::Either, + map::{sync::SkipMap, Map as _}, + Arena as _, EntryRef, Options, +}; + +use crate::{ + memtable::{BaseEntry, BaseTable, Memtable, MemtableEntry}, + sealed::WithoutVersion, + types::Kind, + wal::{KeyPointer, ValuePointer}, +}; + +use super::TableOptions; + +pub use skl::map::sync::{Entry, Iter, Range}; + +impl<'a, K, V> BaseEntry<'a> for Entry<'a, KeyPointer, ValuePointer> +where + K: ?Sized + Type + Ord, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type, +{ + type Key = K; + type Value = V; + + #[inline] + fn next(&mut self) -> Option { + Entry::next(self) + } + + #[inline] + fn prev(&mut self) -> Option { + Entry::prev(self) + } + + #[inline] + fn key(&self) -> KeyPointer { + *EntryRef::key(self) + } +} + +impl<'a, K, V> MemtableEntry<'a> for Entry<'a, KeyPointer, ValuePointer> +where + K: ?Sized + Type + Ord, + KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type, +{ + #[inline] + fn value(&self) -> ValuePointer { + *EntryRef::value(self) + } +} + +impl WithoutVersion for Entry<'_, KeyPointer, ValuePointer> {} + +/// A memory table implementation based on ARENA [`SkipMap`](skl). +pub struct Table { + map: SkipMap, ValuePointer>, +} + +impl BaseTable for Table +where + K: ?Sized + Type + Ord + 'static, + for<'a> KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type + 'static, +{ + type Key = K; + type Value = V; + type Item<'a> + = Entry<'a, KeyPointer, ValuePointer> + where + Self: 'a; + + type Iterator<'a> + = Iter<'a, KeyPointer, ValuePointer> + where + Self: 'a; + + type Range<'a, Q, R> + = Range<'a, KeyPointer, ValuePointer, Q, R> + where + Self: 'a, + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; + + type Options = TableOptions; + type Error = skl::error::Error; + + #[inline] + fn new(opts: Self::Options) -> Result { + let arena_opts = Options::new() + .with_capacity(opts.capacity()) + .with_freelist(skl::Freelist::None) + .with_unify(false) + .with_max_height(opts.max_height()); + + memmap_or_not!(opts(arena_opts)) + } + + fn insert( + &self, + _: Option, + kp: KeyPointer, + vp: ValuePointer, + ) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static, + { + self.map.insert(&kp, &vp).map(|_| ()).map_err(|e| match e { + Among::Right(e) => e, + _ => unreachable!(), + }) + } + + fn remove(&self, _: Option, key: KeyPointer) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static, + { + match self.map.get_or_remove(&key) { + Err(Either::Right(e)) => Err(e), + Err(Either::Left(_)) => unreachable!(), + _ => Ok(()), + } + } + + #[inline] + fn kind() -> Kind { + Kind::Plain + } +} + +impl Memtable for Table +where + K: ?Sized + Type + Ord + 'static, + for<'a> KeyPointer: Type = KeyPointer> + KeyRef<'a, KeyPointer>, + V: ?Sized + Type + 'static, +{ + #[inline] + fn len(&self) -> usize { + self.map.len() + } + + fn upper_bound(&self, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>, + { + self.map.upper_bound(bound) + } + + fn lower_bound(&self, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>, + { + self.map.lower_bound(bound) + } + + fn first(&self) -> Option> + where + KeyPointer: Ord, + { + self.map.first() + } + + fn last(&self) -> Option> + where + KeyPointer: Ord, + { + self.map.last() + } + + fn get(&self, key: &Q) -> Option> + where + Q: ?Sized + Comparable>, + { + self.map.get(key) + } + + fn contains(&self, key: &Q) -> bool + where + Q: ?Sized + Comparable>, + { + self.map.contains_key(key) + } + + fn iter(&self) -> Self::Iterator<'_> { + self.map.iter() + } + + fn range<'a, Q, R>(&'a self, range: R) -> Self::Range<'a, Q, R> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable>, + { + self.map.range(range) + } +} diff --git a/src/memtable/linked.rs b/src/memtable/linked.rs new file mode 100644 index 0000000..f14906f --- /dev/null +++ b/src/memtable/linked.rs @@ -0,0 +1,7 @@ +/// The multiple version memtable implementation. +pub mod multiple_version; +/// The memtable implementation. +pub mod table; + +pub use multiple_version::MultipleVersionTable; +pub use table::Table; diff --git a/src/memtable/linked/multiple_version.rs b/src/memtable/linked/multiple_version.rs new file mode 100644 index 0000000..9475232 --- /dev/null +++ b/src/memtable/linked/multiple_version.rs @@ -0,0 +1,350 @@ +use core::{ + convert::Infallible, + ops::{Bound, RangeBounds}, +}; + +use crossbeam_skiplist_mvcc::nested::SkipMap; +pub use crossbeam_skiplist_mvcc::nested::{Entry, Iter, IterAll, Range, RangeAll, VersionedEntry}; + +use dbutils::{ + equivalent::Comparable, + types::{KeyRef, Type}, +}; + +use crate::{ + memtable::{self, BaseEntry, VersionedMemtableEntry}, + sealed::WithVersion, + types::Kind, + wal::{KeyPointer, ValuePointer}, +}; + +/// An memory table implementation based on [`crossbeam_skiplist::SkipSet`]. +pub struct MultipleVersionTable(SkipMap, ValuePointer>); + +impl Default for MultipleVersionTable +where + K: ?Sized, + V: ?Sized, +{ + #[inline] + fn default() -> Self { + Self(SkipMap::new()) + } +} + +impl<'a, K, V> BaseEntry<'a> for Entry<'a, KeyPointer, ValuePointer> +where + K: ?Sized + Type + Ord, + K::Ref<'a>: KeyRef<'a, K>, + V: ?Sized, +{ + type Key = K; + type Value = V; + + #[inline] + fn next(&mut self) -> Option { + Entry::next(self) + } + + #[inline] + fn prev(&mut self) -> Option { + Entry::prev(self) + } + + #[inline] + fn key(&self) -> KeyPointer { + *self.key() + } +} + +impl<'a, K, V> memtable::VersionedMemtableEntry<'a> for Entry<'a, KeyPointer, ValuePointer> +where + K: ?Sized + Type + Ord, + K::Ref<'a>: KeyRef<'a, K>, + V: ?Sized, +{ + #[inline] + fn value(&self) -> Option> { + Some(*self.value()) + } + + #[inline] + fn version(&self) -> u64 { + Entry::version(self) + } +} + +impl WithVersion for Entry<'_, KeyPointer, ValuePointer> +where + K: ?Sized, + V: ?Sized, +{ +} + +impl<'a, K, V> BaseEntry<'a> for VersionedEntry<'a, KeyPointer, ValuePointer> +where + K: ?Sized + Type + Ord, + K::Ref<'a>: KeyRef<'a, K>, + V: ?Sized, +{ + type Key = K; + type Value = V; + + #[inline] + fn next(&mut self) -> Option { + VersionedEntry::next(self) + } + + #[inline] + fn prev(&mut self) -> Option { + VersionedEntry::prev(self) + } + + #[inline] + fn key(&self) -> KeyPointer { + *self.key() + } +} + +impl<'a, K, V> VersionedMemtableEntry<'a> for VersionedEntry<'a, KeyPointer, ValuePointer> +where + K: ?Sized + Type + Ord, + K::Ref<'a>: KeyRef<'a, K>, + V: ?Sized, +{ + #[inline] + fn version(&self) -> u64 { + VersionedEntry::version(self) + } + + #[inline] + fn value(&self) -> Option> { + self.value().copied() + } +} + +impl WithVersion for VersionedEntry<'_, KeyPointer, ValuePointer> +where + K: ?Sized, + V: ?Sized, +{ +} + +impl memtable::BaseTable for MultipleVersionTable +where + K: ?Sized + Type + Ord + 'static, + for<'a> K::Ref<'a>: KeyRef<'a, K>, + V: ?Sized + 'static, +{ + type Key = K; + type Value = V; + type Item<'a> + = Entry<'a, KeyPointer, ValuePointer> + where + Self: 'a; + + type Iterator<'a> + = Iter<'a, KeyPointer, ValuePointer> + where + Self: 'a; + + type Range<'a, Q, R> + = Range<'a, Q, R, KeyPointer, ValuePointer> + where + Self: 'a, + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; + + type Options = (); + type Error = Infallible; + + fn new(_: Self::Options) -> Result + where + Self: Sized, + { + Ok(Self(SkipMap::new())) + } + + #[inline] + fn insert( + &self, + version: Option, + kp: KeyPointer, + vp: ValuePointer, + ) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static, + { + self.0.insert_unchecked(version.unwrap_or(0), kp, vp); + Ok(()) + } + + #[inline] + fn remove(&self, version: Option, key: KeyPointer) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static, + { + self.0.remove_unchecked(version.unwrap_or(0), key); + Ok(()) + } + + #[inline] + fn kind() -> Kind { + Kind::MultipleVersion + } +} + +impl memtable::MultipleVersionMemtable for MultipleVersionTable +where + K: ?Sized + Type + Ord + 'static, + for<'a> K::Ref<'a>: KeyRef<'a, K>, + V: ?Sized + 'static, +{ + type VersionedItem<'a> + = VersionedEntry<'a, KeyPointer, ValuePointer> + where + Self: 'a; + + type IterAll<'a> + = IterAll<'a, KeyPointer, ValuePointer> + where + Self: 'a; + + type RangeAll<'a, Q, R> + = RangeAll<'a, Q, R, KeyPointer, ValuePointer> + where + Self: 'a, + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; + + #[inline] + fn maximum_version(&self) -> u64 { + self.0.maximum_version() + } + + #[inline] + fn minimum_version(&self) -> u64 { + self.0.minimum_version() + } + + #[inline] + fn may_contain_version(&self, version: u64) -> bool { + self.0.may_contain_version(version) + } + + fn upper_bound(&self, version: u64, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>, + { + self.0.upper_bound(version, bound) + } + + fn upper_bound_versioned( + &self, + version: u64, + bound: Bound<&Q>, + ) -> Option> + where + Q: ?Sized + Comparable>, + { + self.0.upper_bound_versioned(version, bound) + } + + fn lower_bound(&self, version: u64, bound: Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>, + { + self.0.lower_bound(version, bound) + } + + fn lower_bound_versioned( + &self, + version: u64, + bound: Bound<&Q>, + ) -> Option> + where + Q: ?Sized + Comparable>, + { + self.0.lower_bound_versioned(version, bound) + } + + fn first(&self, version: u64) -> Option> + where + KeyPointer: Ord, + { + self.0.front(version) + } + + fn first_versioned(&self, version: u64) -> Option> + where + KeyPointer: Ord, + { + self.0.front_versioned(version) + } + + fn last(&self, version: u64) -> Option> + where + KeyPointer: Ord, + { + self.0.back(version) + } + + fn last_versioned(&self, version: u64) -> Option> + where + KeyPointer: Ord, + { + self.0.back_versioned(version) + } + + fn get(&self, version: u64, key: &Q) -> Option> + where + Q: ?Sized + Comparable>, + { + self.0.get(version, key) + } + + fn get_versioned(&self, version: u64, key: &Q) -> Option> + where + Q: ?Sized + Comparable>, + { + self.0.get_versioned(version, key) + } + + fn contains(&self, version: u64, key: &Q) -> bool + where + Q: ?Sized + Comparable>, + { + self.0.contains_key(version, key) + } + + fn contains_versioned(&self, version: u64, key: &Q) -> bool + where + Q: ?Sized + Comparable>, + { + self.0.contains_key_versioned(version, key) + } + + fn iter(&self, version: u64) -> Self::Iterator<'_> { + self.0.iter(version) + } + + fn iter_all_versions(&self, version: u64) -> Self::IterAll<'_> { + self.0.iter_all_versions(version) + } + + fn range<'a, Q, R>(&'a self, version: u64, range: R) -> Self::Range<'a, Q, R> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable>, + { + self.0.range(version, range) + } + + fn range_all_versions<'a, Q, R>(&'a self, version: u64, range: R) -> Self::RangeAll<'a, Q, R> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable>, + { + self.0.range_all_versions(version, range) + } +} diff --git a/src/memtable/linked/table.rs b/src/memtable/linked/table.rs new file mode 100644 index 0000000..51acc36 --- /dev/null +++ b/src/memtable/linked/table.rs @@ -0,0 +1,213 @@ +use core::{convert::Infallible, ops::RangeBounds}; + +use crossbeam_skiplist::SkipMap; +use dbutils::{ + equivalent::Comparable, + types::{KeyRef, Type}, +}; + +use crate::{ + memtable, + sealed::WithoutVersion, + types::Kind, + wal::{KeyPointer, ValuePointer}, +}; + +pub use crossbeam_skiplist::map::{Entry, Iter, Range}; + +/// An memory table implementation based on [`crossbeam_skiplist::SkipMap`]. +pub struct Table(SkipMap, ValuePointer>); + +impl core::fmt::Debug for Table +where + K: ?Sized + Type + Ord, + for<'a> K::Ref<'a>: KeyRef<'a, K>, + V: ?Sized, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("Table").field(&self.0).finish() + } +} + +impl Default for Table { + #[inline] + fn default() -> Self { + Self(SkipMap::new()) + } +} + +impl<'a, K, V> memtable::BaseEntry<'a> for Entry<'a, KeyPointer, ValuePointer> +where + K: ?Sized + Type + Ord, + K::Ref<'a>: KeyRef<'a, K>, + V: ?Sized + Type, +{ + type Key = K; + type Value = V; + + #[inline] + fn next(&mut self) -> Option { + Entry::next(self) + } + + #[inline] + fn prev(&mut self) -> Option { + Entry::prev(self) + } + + #[inline] + fn key(&self) -> KeyPointer { + *self.key() + } +} + +impl<'a, K, V> memtable::MemtableEntry<'a> for Entry<'a, KeyPointer, ValuePointer> +where + K: ?Sized + Type + Ord, + K::Ref<'a>: KeyRef<'a, K>, + V: ?Sized + Type, +{ + #[inline] + fn value(&self) -> ValuePointer { + *self.value() + } +} + +impl WithoutVersion for Entry<'_, KeyPointer, ValuePointer> +where + K: ?Sized, + V: ?Sized, +{ +} + +impl memtable::BaseTable for Table +where + K: ?Sized + Type + Ord, + for<'a> K::Ref<'a>: KeyRef<'a, K>, + V: ?Sized + Type + 'static, +{ + type Key = K; + type Value = V; + type Item<'a> + = Entry<'a, KeyPointer, ValuePointer> + where + Self: 'a; + + type Iterator<'a> + = Iter<'a, KeyPointer, ValuePointer> + where + Self: 'a; + + type Range<'a, Q, R> + = Range<'a, Q, R, KeyPointer, ValuePointer> + where + Self: 'a, + R: RangeBounds + 'a, + Q: ?Sized + Comparable>; + + type Options = (); + type Error = Infallible; + + fn new(_: Self::Options) -> Result + where + Self: Sized, + { + Ok(Self(SkipMap::new())) + } + + #[inline] + fn insert( + &self, + _: Option, + kp: KeyPointer, + vp: ValuePointer, + ) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static, + { + self.0.insert(kp, vp); + Ok(()) + } + + #[inline] + fn remove(&self, _: Option, key: KeyPointer) -> Result<(), Self::Error> + where + KeyPointer: Ord + 'static, + { + self.0.remove(&key); + Ok(()) + } + + #[inline] + fn kind() -> Kind { + Kind::Plain + } +} + +impl memtable::Memtable for Table +where + K: ?Sized + Type + Ord + 'static, + for<'a> K::Ref<'a>: KeyRef<'a, K>, + V: ?Sized + Type + 'static, +{ + #[inline] + fn len(&self) -> usize { + self.0.len() + } + + #[inline] + fn upper_bound(&self, bound: core::ops::Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>, + { + self.0.upper_bound(bound) + } + + #[inline] + fn lower_bound(&self, bound: core::ops::Bound<&Q>) -> Option> + where + Q: ?Sized + Comparable>, + { + self.0.lower_bound(bound) + } + + #[inline] + fn first(&self) -> Option> { + self.0.front() + } + + #[inline] + fn last(&self) -> Option> { + self.0.back() + } + + #[inline] + fn get(&self, key: &Q) -> Option> + where + Q: ?Sized + Comparable>, + { + self.0.get(key) + } + + #[inline] + fn contains(&self, key: &Q) -> bool + where + Q: ?Sized + Comparable>, + { + self.0.contains_key(key) + } + + #[inline] + fn iter(&self) -> Self::Iterator<'_> { + self.0.iter() + } + + #[inline] + fn range<'a, Q, R>(&'a self, range: R) -> Self::Range<'a, Q, R> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable>, + { + self.0.range(range) + } +} diff --git a/src/options.rs b/src/options.rs index 5e23d36..d3859de 100644 --- a/src/options.rs +++ b/src/options.rs @@ -1,23 +1,45 @@ +use rarena_allocator::{Freelist, Options as ArenaOptions}; +pub use skl::KeySize; + +use super::{CURRENT_VERSION, HEADER_SIZE}; + +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +#[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] +mod memmap; + +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +pub(crate) use memmap::*; + /// Options for the WAL. #[derive(Debug, Clone)] pub struct Options { - maximum_key_size: u32, + maximum_key_size: KeySize, maximum_value_size: u32, sync: bool, magic_version: u16, cap: Option, reserved: u32, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] pub(crate) lock_meta: bool, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] pub(crate) read: bool, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] pub(crate) write: bool, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] pub(crate) create_new: bool, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] pub(crate) create: bool, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] pub(crate) truncate: bool, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] pub(crate) append: bool, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] pub(crate) stack: bool, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] pub(crate) populate: bool, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] pub(crate) huge: Option, } @@ -44,22 +66,32 @@ impl Options { #[inline] pub const fn new() -> Self { Self { - maximum_key_size: u16::MAX as u32, + maximum_key_size: KeySize::new(), maximum_value_size: u32::MAX, sync: true, magic_version: 0, - huge: None, cap: None, reserved: 0, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] lock_meta: false, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] read: false, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] write: false, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] create_new: false, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] create: false, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] truncate: false, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] append: false, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] stack: false, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] populate: false, + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + huge: None, } } @@ -104,50 +136,6 @@ impl Options { self.reserved } - /// Set if lock the meta of the WAL in the memory to prevent OS from swapping out the header of WAL. - /// When using memory map backed WAL, the meta of the WAL - /// is in the header, meta is frequently accessed, - /// lock (`mlock` on the header) the meta can reduce the page fault, - /// but yes, this means that one WAL will have one page are locked in memory, - /// and will not be swapped out. So, this is a trade-off between performance and memory usage. - /// - /// Default is `true`. - /// - /// This configuration has no effect on windows and vec backed WAL. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_lock_meta(false); - /// ``` - #[inline] - pub const fn with_lock_meta(mut self, lock_meta: bool) -> Self { - self.lock_meta = lock_meta; - self - } - - /// Get if lock the meta of the WAL in the memory to prevent OS from swapping out the header of WAL. - /// When using memory map backed WAL, the meta of the WAL - /// is in the header, meta is frequently accessed, - /// lock (`mlock` on the header) the meta can reduce the page fault, - /// but yes, this means that one WAL will have one page are locked in memory, - /// and will not be swapped out. So, this is a trade-off between performance and memory usage. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_lock_meta(false); - /// assert_eq!(opts.lock_meta(), false); - /// ``` - #[inline] - pub const fn lock_meta(&self) -> bool { - self.lock_meta - } - /// Returns the magic version. /// /// The default value is `0`. @@ -192,13 +180,13 @@ impl Options { /// ## Example /// /// ```rust - /// use orderwal::Options; + /// use orderwal::{Options, KeySize}; /// - /// let options = Options::new().with_maximum_key_size(1024); - /// assert_eq!(options.maximum_key_size(), 1024); + /// let options = Options::new().with_maximum_key_size(KeySize::with(1024)); + /// assert_eq!(options.maximum_key_size(), KeySize::with(1024)); /// ``` #[inline] - pub const fn maximum_key_size(&self) -> u32 { + pub const fn maximum_key_size(&self) -> KeySize { self.maximum_key_size } @@ -261,13 +249,13 @@ impl Options { /// ## Example /// /// ```rust - /// use orderwal::Options; + /// use orderwal::{Options, KeySize}; /// - /// let options = Options::new().with_maximum_key_size(1024); - /// assert_eq!(options.maximum_key_size(), 1024); + /// let options = Options::new().with_maximum_key_size(KeySize::with(1024)); + /// assert_eq!(options.maximum_key_size(), KeySize::with(1024)); /// ``` #[inline] - pub const fn with_maximum_key_size(mut self, size: u32) -> Self { + pub const fn with_maximum_key_size(mut self, size: KeySize) -> Self { self.maximum_key_size = size; self } @@ -325,392 +313,11 @@ impl Options { } } -impl Options { - /// Sets the option for read access. - /// - /// This option, when true, will indicate that the file should be - /// `read`-able if opened. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_read(true); - /// ``` - #[inline] - pub fn with_read(mut self, read: bool) -> Self { - self.read = read; - self - } - - /// Sets the option for write access. - /// - /// This option, when true, will indicate that the file should be - /// `write`-able if opened. - /// - /// If the file already exists, any write calls on it will overwrite its - /// contents, without truncating it. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_write(true); - /// ``` - #[inline] - pub fn with_write(mut self, write: bool) -> Self { - self.write = write; - self - } - - /// Sets the option for the append mode. - /// - /// This option, when true, means that writes will append to a file instead - /// of overwriting previous contents. - /// Note that setting `.write(true).append(true)` has the same effect as - /// setting only `.append(true)`. - /// - /// For most filesystems, the operating system guarantees that all writes are - /// atomic: no writes get mangled because another process writes at the same - /// time. - /// - /// One maybe obvious note when using append-mode: make sure that all data - /// that belongs together is written to the file in one operation. This - /// can be done by concatenating strings before passing them to [`write()`], - /// or using a buffered writer (with a buffer of adequate size), - /// and calling [`flush()`] when the message is complete. - /// - /// If a file is opened with both read and append access, beware that after - /// opening, and after every write, the position for reading may be set at the - /// end of the file. So, before writing, save the current position (using - /// [seek]\([SeekFrom](std::io::SeekFrom)::[Current]\(opts))), and restore it before the next read. - /// - /// ## Note - /// - /// This function doesn't create the file if it doesn't exist. Use the - /// [`Options::with_create`] method to do so. - /// - /// [`write()`]: std::io::Write::write "io::Write::write" - /// [`flush()`]: std::io::Write::flush "io::Write::flush" - /// [seek]: std::io::Seek::seek "io::Seek::seek" - /// [Current]: std::io::SeekFrom::Current "io::SeekFrom::Current" - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_append(true); - /// ``` - #[inline] - pub fn with_append(mut self, append: bool) -> Self { - self.write = true; - self.append = append; - self - } - - /// Sets the option for truncating a previous file. - /// - /// If a file is successfully opened with this option set it will truncate - /// the file to opts length if it already exists. - /// - /// The file must be opened with write access for truncate to work. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_write(true).with_truncate(true); - /// ``` - #[inline] - pub fn with_truncate(mut self, truncate: bool) -> Self { - self.truncate = truncate; - self.write = true; - self - } - - /// Sets the option to create a new file, or open it if it already exists. - /// If the file does not exist, it is created and set the lenght of the file to the given size. - /// - /// In order for the file to be created, [`Options::with_write`] or - /// [`Options::with_append`] access must be used. - /// - /// See also [`std::fs::write()`][std::fs::write] for a simple function to - /// create a file with some given data. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_write(true).with_create(true); - /// ``` - #[inline] - pub fn with_create(mut self, val: bool) -> Self { - self.create = val; - self - } - - /// Sets the option to create a new file and set the file length to the given value, failing if it already exists. - /// - /// No file is allowed to exist at the target location, also no (dangling) symlink. In this - /// way, if the call succeeds, the file returned is guaranteed to be new. - /// - /// This option is useful because it is atomic. Otherwise between checking - /// whether a file exists and creating a new one, the file may have been - /// created by another process (a TOCTOU race condition / attack). - /// - /// If `.with_create_new(true)` is set, [`.with_create()`] and [`.with_truncate()`] are - /// ignored. - /// - /// The file must be opened with write or append access in order to create - /// a new file. - /// - /// [`.with_create()`]: Options::with_create - /// [`.with_truncate()`]: Options::with_truncate - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let file = Options::new() - /// .with_write(true) - /// .with_create_new(true); - /// ``` - #[inline] - pub fn with_create_new(mut self, val: bool) -> Self { - self.create_new = val; - self - } - - /// Configures the anonymous memory map to be suitable for a process or thread stack. - /// - /// This option corresponds to the `MAP_STACK` flag on Linux. It has no effect on Windows. - /// - /// This option has no effect on file-backed memory maps and vec backed [`Wal`](crate::Wal). - /// - /// ## Example - /// - /// ``` - /// use orderwal::Options; - /// - /// let stack = Options::new().with_stack(true); - /// ``` - #[inline] - pub fn with_stack(mut self, stack: bool) -> Self { - self.stack = stack; - self - } - - /// Configures the anonymous memory map to be allocated using huge pages. - /// - /// This option corresponds to the `MAP_HUGETLB` flag on Linux. It has no effect on Windows. - /// - /// The size of the requested page can be specified in page bits. If not provided, the system - /// default is requested. The requested length should be a multiple of this, or the mapping - /// will fail. - /// - /// This option has no effect on file-backed memory maps and vec backed [`Wal`](crate::Wal). - /// - /// ## Example - /// - /// ``` - /// use orderwal::Options; - /// - /// let stack = Options::new().with_huge(Some(8)); - /// ``` - #[inline] - pub fn with_huge(mut self, page_bits: Option) -> Self { - self.huge = page_bits; - self - } - - /// Populate (prefault) page tables for a mapping. - /// - /// For a file mapping, this causes read-ahead on the file. This will help to reduce blocking on page faults later. - /// - /// This option corresponds to the `MAP_POPULATE` flag on Linux. It has no effect on Windows. - /// - /// This option has no effect on vec backed [`Wal`](crate::Wal). - /// - /// ## Example - /// - /// ``` - /// use orderwal::Options; - /// - /// let opts = Options::new().with_populate(true); - /// ``` - #[inline] - pub fn with_populate(mut self, populate: bool) -> Self { - self.populate = populate; - self - } -} - -impl Options { - /// Returns `true` if the file should be opened with read access. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_read(true); - /// assert_eq!(opts.read(), true); - /// ``` - #[inline] - pub const fn read(&self) -> bool { - self.read - } - - /// Returns `true` if the file should be opened with write access. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_write(true); - /// assert_eq!(opts.write(), true); - /// ``` - #[inline] - pub const fn write(&self) -> bool { - self.write - } - - /// Returns `true` if the file should be opened with append access. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_append(true); - /// assert_eq!(opts.append(), true); - /// ``` - #[inline] - pub const fn append(&self) -> bool { - self.append - } - - /// Returns `true` if the file should be opened with truncate access. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_truncate(true); - /// assert_eq!(opts.truncate(), true); - /// ``` - #[inline] - pub const fn truncate(&self) -> bool { - self.truncate - } - - /// Returns `true` if the file should be created if it does not exist. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_create(true); - /// assert_eq!(opts.create(), true); - /// ``` - #[inline] - pub const fn create(&self) -> bool { - self.create - } - - /// Returns `true` if the file should be created if it does not exist and fail if it does. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_create_new(true); - /// assert_eq!(opts.create_new(), true); - /// ``` - #[inline] - pub const fn create_new(&self) -> bool { - self.create_new - } - - /// Returns `true` if the memory map should be suitable for a process or thread stack. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_stack(true); - /// assert_eq!(opts.stack(), true); - /// ``` - #[inline] - pub const fn stack(&self) -> bool { - self.stack - } - - /// Returns the page bits of the memory map. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_huge(Some(8)); - /// assert_eq!(opts.huge(), Some(8)); - /// ``` - #[inline] - pub const fn huge(&self) -> Option { - self.huge - } - - /// Returns `true` if the memory map should populate (prefault) page tables for a mapping. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::Options; - /// - /// let opts = Options::new().with_populate(true); - /// assert_eq!(opts.populate(), true); - /// ``` - #[inline] - pub const fn populate(&self) -> bool { - self.populate - } -} - -pub(crate) trait ArenaOptionsExt { - fn merge(self, opts: &Options) -> Self; -} - -impl ArenaOptionsExt for super::ArenaOptions { - #[inline] - fn merge(self, opts: &Options) -> Self { - let new = self - .with_read(opts.read()) - .with_write(opts.write()) - .with_create(opts.create()) - .with_create_new(opts.create_new()) - .with_truncate(opts.truncate()) - .with_append(opts.append()) - .with_stack(opts.stack()) - .with_populate(opts.populate()) - .with_huge(opts.huge()) - .with_lock_meta(opts.lock_meta()); - - if let Some(cap) = opts.cap { - new.with_capacity(cap) - } else { - new - } - } +#[inline] +pub(crate) const fn arena_options(reserved: u32) -> ArenaOptions { + ArenaOptions::new() + .with_magic_version(CURRENT_VERSION) + .with_freelist(Freelist::None) + .with_reserved((HEADER_SIZE + reserved as usize) as u32) + .with_unify(true) } diff --git a/src/options/memmap.rs b/src/options/memmap.rs new file mode 100644 index 0000000..23e0c14 --- /dev/null +++ b/src/options/memmap.rs @@ -0,0 +1,475 @@ +use super::*; + +impl Options { + /// Set if lock the meta of the WAL in the memory to prevent OS from swapping out the header of WAL. + /// When using memory map backed WAL, the meta of the WAL + /// is in the header, meta is frequently accessed, + /// lock (`mlock` on the header) the meta can reduce the page fault, + /// but yes, this means that one WAL will have one page are locked in memory, + /// and will not be swapped out. So, this is a trade-off between performance and memory usage. + /// + /// Default is `true`. + /// + /// This configuration has no effect on windows and vec backed WAL. + /// + /// ## Example + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_lock_meta(false); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn with_lock_meta(mut self, lock_meta: bool) -> Self { + self.lock_meta = lock_meta; + self + } + + /// Sets the option for read access. + /// + /// This option, when true, will indicate that the file should be + /// `read`-able if opened. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_read(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_read(mut self, read: bool) -> Self { + self.read = read; + self + } + + /// Sets the option for write access. + /// + /// This option, when true, will indicate that the file should be + /// `write`-able if opened. + /// + /// If the file already exists, any write calls on it will overwrite its + /// contents, without truncating it. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_write(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_write(mut self, write: bool) -> Self { + self.write = write; + self + } + + /// Sets the option for the append mode. + /// + /// This option, when true, means that writes will append to a file instead + /// of overwriting previous contents. + /// Note that setting `.write(true).append(true)` has the same effect as + /// setting only `.append(true)`. + /// + /// For most filesystems, the operating system guarantees that all writes are + /// atomic: no writes get mangled because another process writes at the same + /// time. + /// + /// One maybe obvious note when using append-mode: make sure that all data + /// that belongs together is written to the file in one operation. This + /// can be done by concatenating strings before passing them to [`write()`], + /// or using a buffered writer (with a buffer of adequate size), + /// and calling [`flush()`] when the message is complete. + /// + /// If a file is opened with both read and append access, beware that after + /// opening, and after every write, the position for reading may be set at the + /// end of the file. So, before writing, save the current position (using + /// [seek]\([SeekFrom](std::io::SeekFrom)::[Current]\(opts))), and restore it before the next read. + /// + /// ## Note + /// + /// This function doesn't create the file if it doesn't exist. Use the + /// [`Options::with_create`] method to do so. + /// + /// [`write()`]: std::io::Write::write "io::Write::write" + /// [`flush()`]: std::io::Write::flush "io::Write::flush" + /// [seek]: std::io::Seek::seek "io::Seek::seek" + /// [Current]: std::io::SeekFrom::Current "io::SeekFrom::Current" + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_append(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_append(mut self, append: bool) -> Self { + self.write = true; + self.append = append; + self + } + + /// Sets the option for truncating a previous file. + /// + /// If a file is successfully opened with this option set it will truncate + /// the file to opts length if it already exists. + /// + /// The file must be opened with write access for truncate to work. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_write(true).with_truncate(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_truncate(mut self, truncate: bool) -> Self { + self.truncate = truncate; + self.write = true; + self + } + + /// Sets the option to create a new file, or open it if it already exists. + /// If the file does not exist, it is created and set the lenght of the file to the given size. + /// + /// In order for the file to be created, [`Options::with_write`] or + /// [`Options::with_append`] access must be used. + /// + /// See also [`std::fs::write()`][std::fs::write] for a simple function to + /// create a file with some given data. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_write(true).with_create(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_create(mut self, val: bool) -> Self { + self.create = val; + self + } + + /// Sets the option to create a new file and set the file length to the given value, failing if it already exists. + /// + /// No file is allowed to exist at the target location, also no (dangling) symlink. In this + /// way, if the call succeeds, the file returned is guaranteed to be new. + /// + /// This option is useful because it is atomic. Otherwise between checking + /// whether a file exists and creating a new one, the file may have been + /// created by another process (a TOCTOU race condition / attack). + /// + /// If `.with_create_new(true)` is set, [`.with_create()`] and [`.with_truncate()`] are + /// ignored. + /// + /// The file must be opened with write or append access in order to create + /// a new file. + /// + /// [`.with_create()`]: Options::with_create + /// [`.with_truncate()`]: Options::with_truncate + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let file = Options::new() + /// .with_write(true) + /// .with_create_new(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_create_new(mut self, val: bool) -> Self { + self.create_new = val; + self + } + + /// Configures the anonymous memory map to be suitable for a process or thread stack. + /// + /// This option corresponds to the `MAP_STACK` flag on Linux. It has no effect on Windows. + /// + /// This option has no effect on file-backed memory maps and vec backed `Wal`. + /// + /// ## Example + /// + /// ``` + /// use orderwal::Options; + /// + /// let stack = Options::new().with_stack(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_stack(mut self, stack: bool) -> Self { + self.stack = stack; + self + } + + /// Configures the anonymous memory map to be allocated using huge pages. + /// + /// This option corresponds to the `MAP_HUGETLB` flag on Linux. It has no effect on Windows. + /// + /// The size of the requested page can be specified in page bits. If not provided, the system + /// default is requested. The requested length should be a multiple of this, or the mapping + /// will fail. + /// + /// This option has no effect on file-backed memory maps and vec backed `Wal`. + /// + /// ## Example + /// + /// ``` + /// use orderwal::Options; + /// + /// let stack = Options::new().with_huge(Some(8)); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_huge(mut self, page_bits: Option) -> Self { + self.huge = page_bits; + self + } + + /// Populate (prefault) page tables for a mapping. + /// + /// For a file mapping, this causes read-ahead on the file. This will help to reduce blocking on page faults later. + /// + /// This option corresponds to the `MAP_POPULATE` flag on Linux. It has no effect on Windows. + /// + /// This option has no effect on vec backed `Wal`. + /// + /// ## Example + /// + /// ``` + /// use orderwal::Options; + /// + /// let opts = Options::new().with_populate(true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub fn with_populate(mut self, populate: bool) -> Self { + self.populate = populate; + self + } +} + +impl Options { + /// Get if lock the meta of the WAL in the memory to prevent OS from swapping out the header of WAL. + /// When using memory map backed WAL, the meta of the WAL + /// is in the header, meta is frequently accessed, + /// lock (`mlock` on the header) the meta can reduce the page fault, + /// but yes, this means that one WAL will have one page are locked in memory, + /// and will not be swapped out. So, this is a trade-off between performance and memory usage. + /// + /// ## Example + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_lock_meta(false); + /// assert_eq!(opts.lock_meta(), false); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn lock_meta(&self) -> bool { + self.lock_meta + } + + /// Returns `true` if the file should be opened with read access. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_read(true); + /// assert_eq!(opts.read(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn read(&self) -> bool { + self.read + } + + /// Returns `true` if the file should be opened with write access. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_write(true); + /// assert_eq!(opts.write(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn write(&self) -> bool { + self.write + } + + /// Returns `true` if the file should be opened with append access. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_append(true); + /// assert_eq!(opts.append(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn append(&self) -> bool { + self.append + } + + /// Returns `true` if the file should be opened with truncate access. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_truncate(true); + /// assert_eq!(opts.truncate(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn truncate(&self) -> bool { + self.truncate + } + + /// Returns `true` if the file should be created if it does not exist. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_create(true); + /// assert_eq!(opts.create(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn create(&self) -> bool { + self.create + } + + /// Returns `true` if the file should be created if it does not exist and fail if it does. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_create_new(true); + /// assert_eq!(opts.create_new(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn create_new(&self) -> bool { + self.create_new + } + + /// Returns `true` if the memory map should be suitable for a process or thread stack. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_stack(true); + /// assert_eq!(opts.stack(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn stack(&self) -> bool { + self.stack + } + + /// Returns the page bits of the memory map. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_huge(Some(8)); + /// assert_eq!(opts.huge(), Some(8)); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn huge(&self) -> Option { + self.huge + } + + /// Returns `true` if the memory map should populate (prefault) page tables for a mapping. + /// + /// ## Examples + /// + /// ```rust + /// use orderwal::Options; + /// + /// let opts = Options::new().with_populate(true); + /// assert_eq!(opts.populate(), true); + /// ``` + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + pub const fn populate(&self) -> bool { + self.populate + } +} + +pub(crate) trait ArenaOptionsExt { + fn merge(self, opts: &Options) -> Self; +} + +impl ArenaOptionsExt for ArenaOptions { + #[inline] + fn merge(self, opts: &Options) -> Self { + let new = self + .with_read(opts.read()) + .with_write(opts.write()) + .with_create(opts.create()) + .with_create_new(opts.create_new()) + .with_truncate(opts.truncate()) + .with_append(opts.append()) + .with_stack(opts.stack()) + .with_populate(opts.populate()) + .with_huge(opts.huge()) + .with_lock_meta(opts.lock_meta()); + + if let Some(cap) = opts.cap { + new.with_capacity(cap) + } else { + new + } + } +} diff --git a/src/pointer.rs b/src/pointer.rs deleted file mode 100644 index 1b1ae9a..0000000 --- a/src/pointer.rs +++ /dev/null @@ -1,195 +0,0 @@ -use core::{borrow::Borrow, cmp, marker::PhantomData, slice}; - -use dbutils::{ - traits::{KeyRef, Type}, - Comparator, -}; - -#[doc(hidden)] -pub struct Pointer { - /// The pointer to the start of the entry. - ptr: *const u8, - /// The length of the key. - key_len: usize, - /// The length of the value. - value_len: usize, - cmp: C, -} - -unsafe impl Send for Pointer {} -unsafe impl Sync for Pointer {} - -impl Pointer { - #[inline] - pub(crate) const fn new(key_len: usize, value_len: usize, ptr: *const u8, cmp: C) -> Self { - Self { - ptr, - key_len, - value_len, - cmp, - } - } - - #[inline] - pub const fn as_key_slice<'a>(&self) -> &'a [u8] { - if self.key_len == 0 { - return &[]; - } - - // SAFETY: `ptr` is a valid pointer to `len` bytes. - unsafe { slice::from_raw_parts(self.ptr, self.key_len) } - } - - #[inline] - pub const fn as_value_slice<'a, 'b: 'a>(&'a self) -> &'b [u8] { - if self.value_len == 0 { - return &[]; - } - - // SAFETY: `ptr` is a valid pointer to `len` bytes. - unsafe { slice::from_raw_parts(self.ptr.add(self.key_len), self.value_len) } - } -} - -impl PartialEq for Pointer { - fn eq(&self, other: &Self) -> bool { - self - .cmp - .compare(self.as_key_slice(), other.as_key_slice()) - .is_eq() - } -} - -impl Eq for Pointer {} - -impl PartialOrd for Pointer { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Pointer { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.cmp.compare(self.as_key_slice(), other.as_key_slice()) - } -} - -impl Borrow for Pointer -where - [u8]: Borrow, - Q: ?Sized + Ord, -{ - fn borrow(&self) -> &Q { - self.as_key_slice().borrow() - } -} - -impl super::wal::sealed::Pointer for Pointer { - type Comparator = C; - - #[inline] - fn new(klen: usize, vlen: usize, ptr: *const u8, cmp: C) -> Self { - Pointer::::new(klen, vlen, ptr, cmp) - } -} - -#[doc(hidden)] -#[derive(Debug)] -pub struct GenericPointer { - /// The pointer to the start of the entry. - ptr: *const u8, - /// The length of the key. - key_len: usize, - /// The length of the value. - value_len: usize, - _m: PhantomData<(fn() -> K, fn() -> V)>, -} - -impl crate::wal::sealed::Pointer for GenericPointer { - type Comparator = (); - - #[inline] - fn new(klen: usize, vlen: usize, ptr: *const u8, _cmp: Self::Comparator) -> Self { - Self::new(klen, vlen, ptr) - } -} - -impl PartialEq for GenericPointer { - fn eq(&self, other: &Self) -> bool { - self.as_key_slice() == other.as_key_slice() - } -} - -impl Eq for GenericPointer {} - -impl PartialOrd for GenericPointer -where - K: Type + Ord + ?Sized, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: ?Sized, -{ - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for GenericPointer -where - K: Type + Ord + ?Sized, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: ?Sized, -{ - fn cmp(&self, other: &Self) -> cmp::Ordering { - // SAFETY: WALs guarantee that the self and other must be the same as the result returned by `::encode`. - unsafe { as KeyRef>::compare_binary(self.as_key_slice(), other.as_key_slice()) } - } -} - -unsafe impl Send for GenericPointer -where - K: ?Sized, - V: ?Sized, -{ -} -unsafe impl Sync for GenericPointer -where - K: ?Sized, - V: ?Sized, -{ -} - -impl GenericPointer -where - K: ?Sized, - V: ?Sized, -{ - #[inline] - pub(crate) const fn new(key_len: usize, value_len: usize, ptr: *const u8) -> Self { - Self { - ptr, - key_len, - value_len, - _m: PhantomData, - } - } - - #[inline] - pub const fn as_key_slice<'a>(&self) -> &'a [u8] { - if self.key_len == 0 { - return &[]; - } - - // SAFETY: `ptr` is a valid pointer to `len` bytes. - unsafe { slice::from_raw_parts(self.ptr, self.key_len) } - } - - #[inline] - pub const fn as_value_slice<'a, 'b: 'a>(&'a self) -> &'b [u8] { - if self.value_len == 0 { - return &[]; - } - - // SAFETY: `ptr` is a valid pointer to `len` bytes. - unsafe { slice::from_raw_parts(self.ptr.add(self.key_len), self.value_len) } - } -} diff --git a/src/sealed.rs b/src/sealed.rs new file mode 100644 index 0000000..490225e --- /dev/null +++ b/src/sealed.rs @@ -0,0 +1,1393 @@ +use core::{ + ops::{Bound, RangeBounds}, + ptr::NonNull, +}; + +use among::Among; +use dbutils::{ + buffer::VacantBuffer, + equivalent::Comparable, + leb128::encoded_u64_varint_len, + types::{KeyRef, Type}, +}; +use rarena_allocator::{either::Either, Allocator, ArenaPosition, Buffer}; +use skl::KeySize; + +use crate::{ + memtable::{MemtableEntry, VersionedMemtableEntry}, + utils::merge_lengths, + wal::{KeyPointer, ValuePointer}, +}; + +use super::{ + batch::Batch, + checksum::{BuildChecksumer, Checksumer}, + error::Error, + memtable::{BaseTable, Memtable, MultipleVersionMemtable}, + options::Options, + types::{BufWriter, EncodedEntryMeta, EntryFlags}, + Flags, CHECKSUM_SIZE, HEADER_SIZE, MAGIC_TEXT, MAGIC_TEXT_SIZE, RECORD_FLAG_SIZE, VERSION_SIZE, + WAL_KIND_SIZE, +}; + +/// A marker trait which indicates that such pointer has a version. +pub trait WithVersion {} + +/// A marker trait which indicates that such pointer does not have a version. +pub trait WithoutVersion {} + +/// A marker trait which indicates that such WAL is immutable. +pub trait Immutable {} + +pub trait WalReader { + type Allocator: Allocator; + type Memtable; + + fn memtable(&self) -> &Self::Memtable; + + /// Returns the number of entries in the WAL. + fn len(&self) -> usize + where + Self::Memtable: Memtable, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + self.memtable().len() + } + + /// Returns `true` if the WAL is empty. + #[inline] + fn is_empty(&self) -> bool + where + Self::Memtable: Memtable, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + self.memtable().is_empty() + } + + #[inline] + fn iter(&self) -> ::Iterator<'_> + where + Self::Memtable: Memtable, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + Memtable::iter(self.memtable()) + } + + #[inline] + fn range<'a, Q, R>(&'a self, range: R) -> ::Range<'a, Q, R> + where + R: RangeBounds, + Q: ?Sized + Comparable::Key>>, + Self::Memtable: Memtable, + for<'b> ::Item<'b>: MemtableEntry<'b>, + { + Memtable::range(self.memtable(), range) + } + + /// Returns the first key-value pair in the map. The key in this pair is the minimum key in the wal. + #[inline] + fn first(&self) -> Option<::Item<'_>> + where + Self::Memtable: Memtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + Memtable::first(self.memtable()) + } + + /// Returns the last key-value pair in the map. The key in this pair is the maximum key in the wal. + #[inline] + fn last(&self) -> Option<::Item<'_>> + where + Self::Memtable: Memtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + Memtable::last(self.memtable()) + } + + /// Returns `true` if the WAL contains the specified key. + fn contains_key(&self, key: &Q) -> bool + where + Q: ?Sized + Comparable::Key>>, + Self::Memtable: Memtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + Memtable::contains(self.memtable(), key) + } + + /// Returns the value associated with the key. + #[inline] + fn get(&self, key: &Q) -> Option<::Item<'_>> + where + Q: ?Sized + Comparable::Key>>, + Self::Memtable: Memtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + Memtable::get(self.memtable(), key) + } + + #[inline] + fn upper_bound(&self, bound: Bound<&Q>) -> Option<::Item<'_>> + where + Q: ?Sized + Comparable::Key>>, + Self::Memtable: Memtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + Memtable::upper_bound(self.memtable(), bound) + } + + #[inline] + fn lower_bound(&self, bound: Bound<&Q>) -> Option<::Item<'_>> + where + Q: ?Sized + Comparable::Key>>, + Self::Memtable: Memtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + Memtable::lower_bound(self.memtable(), bound) + } +} + +pub trait MultipleVersionWalReader { + type Allocator: Allocator; + type Memtable; + + fn memtable(&self) -> &Self::Memtable; + + #[inline] + fn iter(&self, version: u64) -> ::Iterator<'_> + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + MultipleVersionMemtable::iter(self.memtable(), version) + } + + #[inline] + fn range(&self, version: u64, range: R) -> ::Range<'_, Q, R> + where + R: RangeBounds, + Q: ?Sized + Comparable::Key>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + MultipleVersionMemtable::range(self.memtable(), version, range) + } + + #[inline] + fn iter_all_versions( + &self, + version: u64, + ) -> ::IterAll<'_> + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + MultipleVersionMemtable::iter_all_versions(self.memtable(), version) + } + + #[inline] + fn range_all_versions( + &self, + version: u64, + range: R, + ) -> ::RangeAll<'_, Q, R> + where + R: RangeBounds, + Q: ?Sized + Comparable::Key>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().range_all_versions(version, range) + } + + /// Returns the first key-value pair in the map. The key in this pair is the minimum key in the wal. + #[inline] + fn first(&self, version: u64) -> Option<::Item<'_>> + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().first(version) + } + + /// Returns the first key-value pair in the map. The key in this pair is the minimum key in the wal. + /// + /// Compared to [`first`](MultipleVersionWalReader::first), this method returns a versioned item, which means that the returned item + /// may already be marked as removed. + #[inline] + fn first_versioned( + &self, + version: u64, + ) -> Option<::VersionedItem<'_>> + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().first_versioned(version) + } + + /// Returns the last key-value pair in the map. The key in this pair is the maximum key in the wal. + fn last(&self, version: u64) -> Option<::Item<'_>> + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().last(version) + } + + /// Returns the last key-value pair in the map. The key in this pair is the maximum key in the wal. + /// + /// Compared to [`last`](MultipleVersionWalReader::last), this method returns a versioned item, which means that the returned item + /// may already be marked as removed. + fn last_versioned( + &self, + version: u64, + ) -> Option<::VersionedItem<'_>> + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().last_versioned(version) + } + + /// Returns `true` if the WAL contains the specified key. + fn contains_key(&self, version: u64, key: &Q) -> bool + where + Q: ?Sized + Comparable::Key>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().contains(version, key) + } + + /// Returns `true` if the WAL contains the specified key. + /// + /// Compared to [`contains_key`](MultipleVersionWalReader::contains_key), this method returns a versioned item, which means that the returned item + /// may already be marked as removed. + fn contains_key_versioned(&self, version: u64, key: &Q) -> bool + where + Q: ?Sized + Comparable::Key>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().contains_versioned(version, key) + } + + /// Returns the entry associated with the key. The returned entry is the latest version of the key. + #[inline] + fn get(&self, version: u64, key: &Q) -> Option<::Item<'_>> + where + Q: ?Sized + Comparable::Key>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().get(version, key) + } + + /// Returns the value associated with the key. + /// + /// Compared to [`get`](MultipleVersionWalReader::get), this method returns a versioned item, which means that the returned item + /// may already be marked as removed. + fn get_versioned( + &self, + version: u64, + key: &Q, + ) -> Option<::VersionedItem<'_>> + where + Q: ?Sized + Comparable::Key>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().get_versioned(version, key) + } + + fn upper_bound( + &self, + version: u64, + bound: Bound<&Q>, + ) -> Option<::Item<'_>> + where + Q: ?Sized + Comparable::Key>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().upper_bound(version, bound) + } + + fn upper_bound_versioned( + &self, + version: u64, + bound: Bound<&Q>, + ) -> Option<::VersionedItem<'_>> + where + Q: ?Sized + Comparable::Key>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().upper_bound_versioned(version, bound) + } + + fn lower_bound( + &self, + version: u64, + bound: Bound<&Q>, + ) -> Option<::Item<'_>> + where + Q: ?Sized + Comparable::Key>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().lower_bound(version, bound) + } + + fn lower_bound_versioned( + &self, + version: u64, + bound: Bound<&Q>, + ) -> Option<::VersionedItem<'_>> + where + Q: ?Sized + Comparable::Key>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + self.memtable().lower_bound_versioned(version, bound) + } +} + +pub trait Wal { + type Allocator: Allocator; + type Memtable; + + fn construct(arena: Self::Allocator, base: Self::Memtable, opts: Options, checksumer: S) -> Self; + + fn allocator(&self) -> &Self::Allocator; + + fn options(&self) -> &Options; + + fn memtable(&self) -> &Self::Memtable; + + fn memtable_mut(&mut self) -> &mut Self::Memtable; + + fn hasher(&self) -> &S; + + /// Returns `true` if this WAL instance is read-only. + #[inline] + fn read_only(&self) -> bool { + self.allocator().read_only() + } + + /// Returns the path of the WAL if it is backed by a file. + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + #[inline] + fn path<'a>(&'a self) -> Option<&'a ::Path> + where + Self::Allocator: 'a, + { + self.allocator().path() + } + + /// Returns the maximum key size allowed in the WAL. + #[inline] + fn maximum_key_size(&self) -> KeySize { + self.options().maximum_key_size() + } + + /// Returns the maximum value size allowed in the WAL. + #[inline] + fn maximum_value_size(&self) -> u32 { + self.options().maximum_value_size() + } + + /// Returns the remaining capacity of the WAL. + #[inline] + fn remaining(&self) -> u32 { + self.allocator().remaining() as u32 + } + + /// Returns the capacity of the WAL. + #[inline] + fn capacity(&self) -> u32 { + self.options().capacity() + } + + /// Returns the reserved space in the WAL. + /// + /// ## Safety + /// - The writer must ensure that the returned slice is not modified. + /// - This method is not thread-safe, so be careful when using it. + unsafe fn reserved_slice<'a>(&'a self) -> &'a [u8] + where + Self::Allocator: 'a, + { + let reserved = self.options().reserved(); + if reserved == 0 { + return &[]; + } + + let allocator = self.allocator(); + let reserved_slice = allocator.reserved_slice(); + &reserved_slice[HEADER_SIZE..] + } + + /// Returns the mutable reference to the reserved slice. + /// + /// ## Safety + /// - The caller must ensure that the there is no others accessing reserved slice for either read or write. + /// - This method is not thread-safe, so be careful when using it. + #[allow(clippy::mut_from_ref)] + unsafe fn reserved_slice_mut<'a>(&'a self) -> &'a mut [u8] + where + Self::Allocator: 'a, + { + let reserved = self.options().reserved(); + if reserved == 0 { + return &mut []; + } + + let allocator = self.allocator(); + let reserved_slice = allocator.reserved_slice_mut(); + &mut reserved_slice[HEADER_SIZE..] + } + + /// Flushes the to disk. + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + fn flush(&self) -> Result<(), Error> + where + Self::Memtable: BaseTable, + { + if !self.read_only() { + self.allocator().flush().map_err(Into::into) + } else { + Err(Error::read_only()) + } + } + + /// Flushes the to disk. + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + fn flush_async(&self) -> Result<(), Error> + where + Self::Memtable: BaseTable, + { + if !self.read_only() { + self.allocator().flush_async().map_err(Into::into) + } else { + Err(Error::read_only()) + } + } + + #[inline] + fn insert_pointer<'a>( + &'a self, + version: Option, + kp: KeyPointer<::Key>, + vp: Option::Value>>, + ) -> Result<(), Error> + where + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + let t = self.memtable(); + if let Some(vp) = vp { + t.insert(version, kp, vp).map_err(Error::memtable) + } else { + t.remove(version, kp).map_err(Error::memtable) + } + } + + #[inline] + fn insert_pointers<'a>( + &'a self, + mut ptrs: impl Iterator< + Item = ( + Option, + KeyPointer<::Key>, + Option::Value>>, + ), + >, + ) -> Result<(), Error> + where + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + ptrs.try_for_each(|(version, kp, vp)| self.insert_pointer(version, kp, vp)) + } + + fn insert<'a, KE, VE>( + &'a self, + version: Option, + kb: KE, + vb: VE, + ) -> Result<(), Among>> + where + KE: super::types::BufWriterOnce, + VE: super::types::BufWriterOnce, + S: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + self.update(version, kb, Some(vb)) + } + + fn remove<'a, KE>( + &'a self, + version: Option, + kb: KE, + ) -> Result<(), Either>> + where + KE: super::types::BufWriterOnce, + S: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + struct Noop; + + impl super::types::BufWriterOnce for Noop { + type Error = (); + + #[inline(never)] + #[cold] + fn encoded_len(&self) -> usize { + 0 + } + + #[inline(never)] + #[cold] + fn write_once(self, _: &mut VacantBuffer<'_>) -> Result { + Ok(0) + } + } + + self + .update::(version, kb, None) + .map_err(Among::into_left_right) + } + + fn update<'a, KE, VE>( + &'a self, + version: Option, + kb: KE, + vb: Option, + ) -> Result<(), Among>> + where + KE: super::types::BufWriterOnce, + VE: super::types::BufWriterOnce, + S: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + if self.read_only() { + return Err(Among::Right(Error::read_only())); + } + + let res = { + let klen = kb.encoded_len(); + let (vlen, remove) = vb + .as_ref() + .map(|vb| (vb.encoded_len(), false)) + .unwrap_or((0, true)); + let encoded_entry_meta = check( + klen, + vlen, + version.is_some(), + self.maximum_key_size().to_u32(), + self.maximum_value_size(), + self.read_only(), + ) + .map_err(Either::Right)?; + + let allocator = self.allocator(); + + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + let is_ondisk = allocator.is_ondisk(); + + let buf = allocator.alloc_bytes(encoded_entry_meta.entry_size); + let mut cks = self.hasher().build_checksumer(); + + match buf { + Err(e) => Err(Among::Right(Error::from_insufficient_space(e))), + Ok(mut buf) => { + unsafe { + // We allocate the buffer with the exact size, so it's safe to write to the buffer. + let flag = Flags::COMMITTED.bits(); + + cks.update(&[flag]); + + buf.put_u8_unchecked(Flags::empty().bits()); + let written = buf.put_u64_varint_unchecked(encoded_entry_meta.packed_kvlen); + debug_assert_eq!( + written, encoded_entry_meta.packed_kvlen_size, + "the precalculated size should be equal to the written size" + ); + + let mut entry_flag = if !remove { + EntryFlags::empty() + } else { + EntryFlags::REMOVED + }; + + if let Some(version) = version { + entry_flag |= EntryFlags::VERSIONED; + buf.put_u8_unchecked(entry_flag.bits()); + buf.put_u64_le_unchecked(version); + } else { + buf.put_u8_unchecked(entry_flag.bits()); + } + + let ko = encoded_entry_meta.key_offset(); + let ptr = buf.as_mut_ptr().add(ko); + buf.set_len(encoded_entry_meta.entry_size as usize - VERSION_SIZE); + + let mut key_buf = VacantBuffer::new( + encoded_entry_meta.klen as usize, + NonNull::new_unchecked(ptr), + ); + let written = kb.write_once(&mut key_buf).map_err(Among::Left)?; + debug_assert_eq!( + written, encoded_entry_meta.klen as usize, + "the actual bytes written to the key buffer not equal to the expected size, expected {} but got {}.", + encoded_entry_meta.klen, written, + ); + + if let Some(vb) = vb { + let vo = encoded_entry_meta.value_offset(); + let mut value_buf = VacantBuffer::new( + encoded_entry_meta.vlen as usize, + NonNull::new_unchecked(buf.as_mut_ptr().add(vo)), + ); + let written = vb.write_once(&mut value_buf).map_err(Among::Middle)?; + + debug_assert_eq!( + written, encoded_entry_meta.vlen as usize, + "the actual bytes written to the value buffer not equal to the expected size, expected {} but got {}.", + encoded_entry_meta.vlen, written, + ); + } + + let cks = { + cks.update(&buf[1..]); + cks.digest() + }; + buf.put_u64_le_unchecked(cks); + + // commit the entry + buf[0] |= Flags::COMMITTED.bits(); + + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + if self.options().sync() && is_ondisk { + allocator + .flush_header_and_range(buf.offset(), encoded_entry_meta.entry_size as usize) + .map_err(|e| Among::Right(e.into()))?; + } + + buf.detach(); + let ptr = buf.as_ptr().add(encoded_entry_meta.key_offset() as usize); + let kp = KeyPointer::new(entry_flag, encoded_entry_meta.klen, ptr); + let vp = (!remove).then(|| { + ValuePointer::new(encoded_entry_meta.vlen, ptr.add(encoded_entry_meta.klen)) + }); + Ok((buf.buffer_offset(), kp, vp)) + } + } + } + }; + + res.and_then(|(offset, kp, vp)| { + self.insert_pointer(version, kp, vp).map_err(|e| { + unsafe { + self.allocator().rewind(ArenaPosition::Start(offset as u32)); + }; + Among::Right(e) + }) + }) + } + + fn insert_batch<'a, W, B>( + &'a self, + batch: &mut B, + ) -> Result< + (), + Among<::Error, ::Error, Error>, + > + where + B: Batch, + B::Key: BufWriter, + B::Value: BufWriter, + S: BuildChecksumer, + W: Constructable, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + if self.read_only() { + return Err(Among::Right(Error::read_only())); + } + + let opts = self.options(); + let maximum_key_size = opts.maximum_key_size().to_u32(); + let minimum_value_size = opts.maximum_value_size(); + let start_offset = unsafe { + let (mut cursor, _allocator, mut buf) = batch + .iter_mut() + .try_fold((0u32, 0u64), |(num_entries, size), ent| { + let klen = ent.encoded_key_len(); + let vlen = ent.value_len(); + check_batch_entry(klen, vlen, maximum_key_size, minimum_value_size, ent.internal_version().is_some()).map(|meta| { + let ent_size = meta.entry_size as u64; + ent.set_encoded_meta(meta); + (num_entries + 1, size + ent_size) + }) + }) + .and_then(|(num_entries, batch_encoded_size)| { + // safe to cast batch_encoded_size to u32 here, we already checked it's less than capacity (less than u32::MAX). + let batch_meta = merge_lengths(num_entries, batch_encoded_size as u32); + let batch_meta_size = encoded_u64_varint_len(batch_meta); + let allocator = self.allocator(); + let remaining = allocator.remaining() as u64; + let total_size = RECORD_FLAG_SIZE as u64 + + batch_meta_size as u64 + + batch_encoded_size + + CHECKSUM_SIZE as u64; + if total_size > remaining { + return Err(Error::insufficient_space(total_size, remaining as u32)); + } + + let mut buf = allocator + .alloc_bytes(total_size as u32) + .map_err(Error::from_insufficient_space)?; + + let flag = Flags::BATCHING; + + buf.put_u8_unchecked(flag.bits()); + let size = buf.put_u64_varint_unchecked(batch_meta); + debug_assert_eq!( + size, batch_meta_size, + "the actual encoded u64 varint length ({}) doos not match the length ({}) returned by `dbutils::leb128::encoded_u64_varint_len`, please report bug to https://github.com/al8n/layer0/issues", + size, batch_meta_size, + ); + + Ok((RECORD_FLAG_SIZE + batch_meta_size, allocator, buf)) + }) + .map_err(Among::Right)?; + + for ent in batch.iter_mut() { + let meta = ent.encoded_meta(); + let version_size = if ent.internal_version().is_some() { + VERSION_SIZE + } else { + 0 + }; + + let remaining = buf.remaining(); + if remaining + < meta.packed_kvlen_size + EntryFlags::SIZE + version_size + meta.klen + meta.vlen + { + return Err(Among::Right( + Error::larger_batch_size(buf.capacity() as u32), + )); + } + + let ent_len_size = buf.put_u64_varint_unchecked(meta.packed_kvlen); + debug_assert_eq!( + ent_len_size, meta.packed_kvlen_size, + "the actual encoded u64 varint length ({}) doos not match the length ({}) returned by `dbutils::leb128::encoded_u64_varint_len`, please report bug to https://github.com/al8n/layer0/issues", + ent_len_size, meta.packed_kvlen_size, + ); + + buf.put_u8_unchecked(ent.flag.bits()); + let ptr = buf.as_mut_ptr(); + let (key_ptr, val_ptr) = if let Some(version) = ent.internal_version() { + buf.put_u64_le_unchecked(version); + + ( + ptr.add(cursor + meta.key_offset()), + ptr.add(cursor + meta.value_offset()), + ) + } else { + ( + ptr.add(cursor + meta.key_offset()), + ptr.add(cursor + meta.value_offset()), + ) + }; + buf.set_len(cursor + meta.value_offset()); + + let (kb, vb) = (ent.key(), ent.value()); + let mut key_buf = VacantBuffer::new(meta.klen, NonNull::new_unchecked(key_ptr)); + let written = kb.write(&mut key_buf).map_err(Among::Left)?; + debug_assert_eq!( + written, meta.klen, + "the actual bytes written to the key buffer not equal to the expected size, expected {} but got {}.", + meta.klen, written, + ); + + buf.set_len(cursor + meta.checksum_offset()); + if let Some(vb) = vb { + let mut value_buf = VacantBuffer::new(meta.vlen, NonNull::new_unchecked(val_ptr)); + let written = vb.write(&mut value_buf).map_err(Among::Middle)?; + + debug_assert_eq!( + written, meta.vlen, + "the actual bytes written to the value buffer not equal to the expected size, expected {} but got {}.", + meta.vlen, written, + ); + } + + let entry_size = meta.entry_size as usize; + let kp = KeyPointer::new(ent.flag, meta.klen, key_ptr); + let vp = vb.is_some().then(|| ValuePointer::new(meta.vlen, val_ptr)); + ent.set_pointer(kp, vp); + cursor += entry_size; + } + + let total_size = buf.capacity(); + if cursor + CHECKSUM_SIZE != total_size { + return Err(Among::Right(Error::batch_size_mismatch( + total_size as u32 - CHECKSUM_SIZE as u32, + cursor as u32, + ))); + } + + let mut cks = self.hasher().build_checksumer(); + let committed_flag = Flags::BATCHING | Flags::COMMITTED; + cks.update(&[committed_flag.bits()]); + cks.update(&buf[1..]); + let checksum = cks.digest(); + buf.put_u64_le_unchecked(checksum); + + // commit the entry + buf[0] = committed_flag.bits(); + + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + if self.options().sync() && _allocator.is_ondisk() { + _allocator + .flush_header_and_range(Buffer::offset(&buf), buf.capacity()) + .map_err(|e| Among::Right(e.into()))?; + } + buf.detach(); + Buffer::buffer_offset(&buf) + }; + + self + .insert_pointers(batch.iter_mut().map(|e| { + let (kp, vp) = e.take_pointer().unwrap(); + (e.internal_version(), kp, vp) + })) + .map_err(|e| { + // Safety: the writer is single threaded, the memory chunk in buf cannot be accessed by other threads, + // so it's safe to rewind the arena. + unsafe { + self + .allocator() + .rewind(ArenaPosition::Start(start_offset as u32)); + } + Among::Right(e) + }) + } +} + +impl WalReader for T +where + T: Wal, + T::Memtable: Memtable, + for<'a> ::Item<'a>: MemtableEntry<'a>, +{ + type Allocator = T::Allocator; + + type Memtable = T::Memtable; + + #[inline] + fn memtable(&self) -> &Self::Memtable { + T::memtable(self) + } +} + +impl MultipleVersionWalReader for T +where + T: Wal, + T::Memtable: MultipleVersionMemtable, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + for<'a> ::VersionedItem<'a>: VersionedMemtableEntry<'a>, +{ + type Allocator = T::Allocator; + + type Memtable = T::Memtable; + + #[inline] + fn memtable(&self) -> &Self::Memtable { + T::memtable(self) + } +} + +pub trait Constructable: Sized { + type Allocator: Allocator + 'static; + type Wal: Wal + 'static; + type Memtable: BaseTable; + type Checksumer; + type Reader; + + #[inline] + fn allocator<'a>(&'a self) -> &'a Self::Allocator + where + Self::Allocator: 'a, + Self::Wal: 'a, + { + self.as_wal().allocator() + } + + fn as_wal(&self) -> &Self::Wal; + + fn new_in( + arena: Self::Allocator, + opts: Options, + memtable_opts: ::Options, + cks: Self::Checksumer, + ) -> Result> { + unsafe { + let slice = arena.reserved_slice_mut(); + let mut cursor = 0; + slice[0..MAGIC_TEXT_SIZE].copy_from_slice(&MAGIC_TEXT); + cursor += MAGIC_TEXT_SIZE; + slice[MAGIC_TEXT_SIZE] = ::kind() as u8; + cursor += WAL_KIND_SIZE; + slice[cursor..HEADER_SIZE].copy_from_slice(&opts.magic_version().to_le_bytes()); + } + + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + let this = arena + .flush_range(0, HEADER_SIZE) + .map_err(Into::into) + .and_then(|_| { + Self::Memtable::new(memtable_opts) + .map(|memtable| { + >::construct(arena, memtable, opts, cks) + }) + .map_err(Error::memtable) + }); + + #[cfg(not(all(feature = "memmap", not(target_family = "wasm"))))] + let this = Self::Memtable::new(memtable_opts) + .map(|memtable| >::construct(arena, memtable, opts, cks)) + .map_err(Error::memtable); + + this + } + + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + fn replay<'a>( + arena: Self::Allocator, + opts: Options, + memtable_opts: ::Options, + ro: bool, + checksumer: Self::Checksumer, + ) -> Result> + where + Self::Checksumer: BuildChecksumer, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + { + use super::{types::Kind, utils::split_lengths}; + use dbutils::leb128::decode_u64_varint; + + let slice = arena.reserved_slice(); + let mut cursor = 0; + let magic_text = &slice[0..MAGIC_TEXT_SIZE]; + if magic_text != MAGIC_TEXT { + return Err(Error::magic_text_mismatch()); + } + cursor += MAGIC_TEXT_SIZE; + let kind = Kind::try_from(slice[cursor])?; + let created_kind = ::kind(); + if kind != created_kind { + return Err(Error::wal_kind_mismatch(kind, created_kind)); + } + cursor += WAL_KIND_SIZE; + + let magic_version = u16::from_le_bytes(slice[cursor..HEADER_SIZE].try_into().unwrap()); + if magic_version != opts.magic_version() { + return Err(Error::magic_version_mismatch()); + } + + let set = >::Memtable::new(memtable_opts) + .map_err(Error::memtable)?; + + let mut cursor = arena.data_offset(); + let allocated = arena.allocated(); + let mut minimum_version = u64::MAX; + let mut maximum_version = 0; + + loop { + unsafe { + // we reached the end of the arena, if we have any remaining, then if means two possibilities: + // 1. the remaining is a partial entry, but it does not be persisted to the disk, so following the write-ahead log principle, we should discard it. + // 2. our file may be corrupted, so we discard the remaining. + if cursor + RECORD_FLAG_SIZE > allocated { + if !ro && cursor < allocated { + arena.rewind(ArenaPosition::Start(cursor as u32)); + arena.flush()?; + } + break; + } + + let header = arena.get_u8(cursor).unwrap(); + let flag = Flags::from_bits_retain(header); + + if !flag.contains(Flags::BATCHING) { + let (readed, encoded_len) = + arena + .get_u64_varint(cursor + RECORD_FLAG_SIZE) + .map_err(|e| { + #[cfg(feature = "tracing")] + tracing::error!(err=%e); + + Error::corrupted(e) + })?; + let (key_len, value_len) = split_lengths(encoded_len); + let key_len = key_len as usize; + let value_len = value_len as usize; + let entry_flag = arena + .get_u8(cursor + RECORD_FLAG_SIZE + readed) + .map_err(|e| { + #[cfg(feature = "tracing")] + tracing::error!(err=%e); + + Error::corrupted(e) + })?; + + let entry_flag = EntryFlags::from_bits_retain(entry_flag); + let version_size = if entry_flag.contains(EntryFlags::VERSIONED) { + VERSION_SIZE + } else { + 0 + }; + // Same as above, if we reached the end of the arena, we should discard the remaining. + let cks_offset = + RECORD_FLAG_SIZE + readed + EntryFlags::SIZE + version_size + key_len + value_len; + if cks_offset + CHECKSUM_SIZE > allocated { + // If the entry is committed, then it means our file is truncated, so we should report corrupted. + if flag.contains(Flags::COMMITTED) { + return Err(Error::corrupted("file is truncated")); + } + + if !ro { + arena.rewind(ArenaPosition::Start(cursor as u32)); + arena.flush()?; + } + + break; + } + + let cks = arena.get_u64_le(cursor + cks_offset).unwrap(); + if cks != checksumer.checksum_one(arena.get_bytes(cursor, cks_offset)) { + return Err(Error::corrupted("checksum mismatch")); + } + + // If the entry is not committed, we should not rewind + if !flag.contains(Flags::COMMITTED) { + if !ro { + arena.rewind(ArenaPosition::Start(cursor as u32)); + arena.flush()?; + } + + break; + } + + let ptr = arena.get_pointer(cursor + RECORD_FLAG_SIZE + readed); + let flag = EntryFlags::from_bits_retain(*ptr); + + let (version, ptr) = if flag.contains(EntryFlags::VERSIONED) { + let version_ptr = ptr.add(EntryFlags::SIZE); + let version = u64::from_le_bytes( + core::slice::from_raw_parts(version_ptr, VERSION_SIZE) + .try_into() + .unwrap(), + ); + minimum_version = minimum_version.min(version); + maximum_version = maximum_version.max(version); + (Some(version), version_ptr.add(VERSION_SIZE)) + } else { + (None, ptr.add(EntryFlags::SIZE)) + }; + + let kp = KeyPointer::new(flag, key_len, ptr); + if flag.contains(EntryFlags::REMOVED) { + set.remove(version, kp).map_err(Error::memtable)?; + } else { + let vp = ValuePointer::new(value_len, ptr.add(key_len)); + set.insert(version, kp, vp).map_err(Error::memtable)?; + } + + cursor += cks_offset + CHECKSUM_SIZE; + } else { + let (readed, encoded_len) = + arena + .get_u64_varint(cursor + RECORD_FLAG_SIZE) + .map_err(|e| { + #[cfg(feature = "tracing")] + tracing::error!(err=%e); + + Error::corrupted(e) + })?; + + let (num_entries, encoded_data_len) = split_lengths(encoded_len); + // Same as above, if we reached the end of the arena, we should discard the remaining. + let cks_offset = RECORD_FLAG_SIZE + readed + encoded_data_len as usize; + let total_size = cks_offset + CHECKSUM_SIZE; + + if total_size > allocated { + // If the entry is committed, then it means our file is truncated, so we should report corrupted. + if flag.contains(Flags::COMMITTED) { + return Err(Error::corrupted("file is truncated")); + } + + if !ro { + arena.rewind(ArenaPosition::Start(cursor as u32)); + arena.flush()?; + } + + break; + } + let cks = arena.get_u64_le(cursor + cks_offset).unwrap(); + let mut batch_data_buf = arena.get_bytes(cursor, cks_offset); + if cks != checksumer.checksum_one(batch_data_buf) { + return Err(Error::corrupted("checksum mismatch")); + } + + let mut sub_cursor = 0; + batch_data_buf = &batch_data_buf[RECORD_FLAG_SIZE + readed..]; + for _ in 0..num_entries { + let (kvlen, ent_len) = decode_u64_varint(batch_data_buf).map_err(|e| { + #[cfg(feature = "tracing")] + tracing::error!(err=%e); + + Error::corrupted(e) + })?; + + let (klen, vlen) = split_lengths(ent_len); + let klen = klen as usize; + let vlen = vlen as usize; + + let ptr = arena.get_pointer(cursor + RECORD_FLAG_SIZE + readed + sub_cursor + kvlen); + let flag = EntryFlags::from_bits_retain(*ptr); + + let (version, ptr, ent_len) = if flag.contains(EntryFlags::VERSIONED) { + let version_ptr = ptr.add(EntryFlags::SIZE); + let version = u64::from_le_bytes( + core::slice::from_raw_parts(version_ptr, VERSION_SIZE) + .try_into() + .unwrap(), + ); + minimum_version = minimum_version.min(version); + maximum_version = maximum_version.max(version); + let ent_len = kvlen + EntryFlags::SIZE + VERSION_SIZE + klen + vlen; + (Some(version), version_ptr.add(VERSION_SIZE), ent_len) + } else { + let ent_len = kvlen + EntryFlags::SIZE + klen + vlen; + (None, ptr.add(EntryFlags::SIZE), ent_len) + }; + + let kp = KeyPointer::new(flag, klen, ptr); + if flag.contains(EntryFlags::REMOVED) { + set.remove(version, kp).map_err(Error::memtable)?; + } else { + let vp = ValuePointer::new(vlen, ptr.add(klen)); + set.insert(version, kp, vp).map_err(Error::memtable)?; + } + + sub_cursor += ent_len; + batch_data_buf = &batch_data_buf[ent_len..]; + } + + debug_assert_eq!( + encoded_data_len as usize, sub_cursor, + "expected encoded batch data size ({}) is not equal to the actual size ({})", + encoded_data_len, sub_cursor, + ); + + cursor += total_size; + } + } + } + + Ok(>::construct( + arena, set, opts, checksumer, + )) + } + + fn from_core(core: Self::Wal) -> Self; +} + +#[inline] +const fn min_u64(a: u64, b: u64) -> u64 { + if a < b { + a + } else { + b + } +} + +#[inline] +const fn check( + klen: usize, + vlen: usize, + versioned: bool, + max_key_size: u32, + max_value_size: u32, + ro: bool, +) -> Result> { + if ro { + return Err(Error::read_only()); + } + + let max_ksize = min_u64(max_key_size as u64, u32::MAX as u64); + let max_vsize = min_u64(max_value_size as u64, u32::MAX as u64); + + if max_ksize < klen as u64 { + return Err(Error::key_too_large(klen as u64, max_key_size)); + } + + if max_vsize < vlen as u64 { + return Err(Error::value_too_large(vlen as u64, max_value_size)); + } + + let encoded_entry_meta = EncodedEntryMeta::new(klen, vlen, versioned); + if encoded_entry_meta.entry_size == u32::MAX { + let version_size = if versioned { VERSION_SIZE } else { 0 }; + return Err(Error::entry_too_large( + encoded_entry_meta.entry_size as u64, + min_u64( + RECORD_FLAG_SIZE as u64 + + 10 + + EntryFlags::SIZE as u64 + + version_size as u64 + + max_key_size as u64 + + max_value_size as u64, + u32::MAX as u64, + ), + )); + } + + Ok(encoded_entry_meta) +} + +#[inline] +fn check_batch_entry( + klen: usize, + vlen: usize, + max_key_size: u32, + max_value_size: u32, + versioned: bool, +) -> Result> { + let max_ksize = min_u64(max_key_size as u64, u32::MAX as u64); + let max_vsize = min_u64(max_value_size as u64, u32::MAX as u64); + + if max_ksize < klen as u64 { + return Err(Error::key_too_large(klen as u64, max_key_size)); + } + + if max_vsize < vlen as u64 { + return Err(Error::value_too_large(vlen as u64, max_value_size)); + } + + let encoded_entry_meta = EncodedEntryMeta::batch(klen, vlen, versioned); + if encoded_entry_meta.entry_size == u32::MAX { + let version_size = if versioned { VERSION_SIZE } else { 0 }; + return Err(Error::entry_too_large( + encoded_entry_meta.entry_size as u64, + min_u64( + 10 + EntryFlags::SIZE as u64 + + version_size as u64 + + max_key_size as u64 + + max_value_size as u64, + u32::MAX as u64, + ), + )); + } + + Ok(encoded_entry_meta) +} diff --git a/src/swmr.rs b/src/swmr.rs index c3758f8..2993533 100644 --- a/src/swmr.rs +++ b/src/swmr.rs @@ -1,7 +1,136 @@ -/// The ordered write-ahead log only supports bytes. -pub mod wal; -pub use wal::{Builder, OrderWal}; +mod reader; +mod wal; +mod writer; -/// The generic implementation of the ordered write-ahead log. -pub mod generic; -pub use generic::{GenericBuilder, GenericOrderWal}; +#[cfg(all( + test, + any( + all_orderwal_tests, + test_swmr_constructor, + test_swmr_insert, + test_swmr_get, + test_swmr_iters, + ) +))] +mod tests; + +/// The ordered write-ahead log without multiple version support. +pub mod base { + use dbutils::checksum::Crc32; + + use super::{reader, writer}; + #[cfg(feature = "std")] + use crate::memtable::linked::Table as BaseLinkedTable; + use crate::memtable::{ + alternative::Table as BaseAlternativeTable, arena::Table as BaseArenaTable, + }; + + pub use crate::{ + memtable::arena::TableOptions as ArenaTableOptions, + types::base::{Entry, Key, Value}, + wal::base::{Iter, Keys, RangeKeys, RangeValues, Reader, Writer}, + }; + + /// An memory table for [`OrderWal`] or [`OrderWalReader`] based on [`linked::Table`](BaseLinkedTable). + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + pub type LinkedTable = BaseLinkedTable; + + /// An memory table for [`OrderWal`] or [`OrderWalReader`] based on [`arena::Table`](BaseArenaTable). + pub type ArenaTable = BaseArenaTable; + + /// An memory table for [`OrderWal`] or [`OrderWalReader`] based on [`alternative::Table`](BaseAlternativeTable). + pub type AlternativeTable = BaseAlternativeTable; + + /// The default memory table used by [`OrderWal`] or [`OrderWalReader`]. + #[cfg(feature = "std")] + pub type DefaultTable = LinkedTable; + + /// The default memory table used by [`OrderWal`] or [`OrderWalReader`]. + #[cfg(not(feature = "std"))] + pub type DefaultTable = ArenaTable; + + /// A generic ordered write-ahead log implementation for multiple threads environments. + /// + /// ```text + /// +----------------------+-------------------------+--------------------+ + /// | magic text (6 bytes) | magic version (2 bytes) | header (8 bytes) | + /// +----------------------+-------------------------+--------------------+---------------------+-----------------+--------------------+ + /// | flag (1 byte) | key len (4 bytes) | key (n bytes) | value len (4 bytes) | value (n bytes) | checksum (8 bytes) | + /// +----------------------+-------------------------+--------------------+---------------------+-----------------|--------------------+ + /// | flag (1 byte) | key len (4 bytes) | key (n bytes) | value len (4 bytes) | value (n bytes) | checksum (8 bytes) | + /// +----------------------+-------------------------+--------------------+---------------------+-----------------+--------------------+ + /// | flag (1 byte) | key len (4 bytes) | key (n bytes) | value len (4 bytes) | value (n bytes) | checksum (8 bytes) | + /// +----------------------+-------------------------+--------------------+---------------------+-----------------+--------------------+ + /// | ... | ... | ... | ... | ... | ... | + /// +----------------------+-------------------------+--------------------+---------------------+-----------------+--------------------+ + /// | ... | ... | ... | ... | ... | ... | + /// +----------------------+-------------------------+--------------------+---------------------+-----------------+--------------------+ + /// ``` + pub type OrderWal, S = Crc32> = writer::OrderWal; + + /// Immutable reader for the generic ordered write-ahead log [`OrderWal`]. + pub type OrderWalReader, S = Crc32> = + reader::OrderWalReader; +} + +/// A multiple version ordered write-ahead log implementation for multiple threads environments. +pub mod multiple_version { + use dbutils::checksum::Crc32; + + use super::{reader, writer}; + #[cfg(feature = "std")] + use crate::memtable::linked::MultipleVersionTable as BaseLinkedTable; + use crate::memtable::{ + alternative::MultipleVersionTable as BaseAlternativeTable, + arena::MultipleVersionTable as BaseArenaTable, + }; + + pub use crate::{ + memtable::arena::TableOptions as ArenaTableOptions, + types::multiple_version::{Entry, Key, MultipleVersionEntry, Value}, + wal::multiple_version::{ + Iter, Keys, MultipleVersionIter, MultipleVersionRange, RangeKeys, RangeValues, Reader, Writer, + }, + }; + + /// An memory table for multiple version [`OrderWal`] or [`OrderWalReader`] based on [`linked::MultipleVersionTable`](BaseLinkedTable). + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + pub type LinkedTable = BaseLinkedTable; + + /// An memory table for multiple version [`OrderWal`] or [`OrderWalReader`] based on [`arena::MultipleVersionTable`](BaseArenaTable). + pub type ArenaTable = BaseArenaTable; + + /// An memory table for multiple version [`OrderWal`] or [`OrderWalReader`] based on [`alternative::MultipleVersionTable`](BaseAlternativeTable). + pub type AlternativeTable = BaseAlternativeTable; + + /// The default memory table used by [`OrderWal`] or [`OrderWalReader`]. + #[cfg(feature = "std")] + pub type DefaultTable = LinkedTable; + + /// The default memory table used by [`OrderWal`] or [`OrderWalReader`]. + #[cfg(not(feature = "std"))] + pub type DefaultTable = ArenaTable; + + /// A multiple versioned generic ordered write-ahead log implementation for multiple threads environments. + /// + /// ```text + /// +----------------------+-------------------------+--------------------+ + /// | magic text (6 bytes) | magic version (2 bytes) | header (8 bytes) | + /// +----------------------+-------------------------+--------------------+---------------------+---------------------+-----------------+--------------------+ + /// | flag (1 byte) | version (8 bytes) | key len (4 bytes) | key (n bytes) | value len (4 bytes) | value (n bytes) | checksum (8 bytes) | + /// +----------------------+-------------------------+--------------------+---------------------+---------------------+-----------------+--------------------+ + /// | flag (1 byte) | version (8 bytes) | key len (4 bytes) | key (n bytes) | value len (4 bytes) | value (n bytes) | checksum (8 bytes) | + /// +----------------------+-------------------------+--------------------+---------------------+---------------------+-----------------+--------------------+ + /// | flag (1 byte) | version (8 bytes) | key len (4 bytes) | key (n bytes) | value len (4 bytes) | value (n bytes) | checksum (8 bytes) | + /// +----------------------+-------------------------+--------------------+---------------------+---------------------+-----------------+--------------------+ + /// | ... | ... | ... | ... | ... | ... | ,,, | + /// +----------------------+-------------------------+--------------------+---------------------+---------------------+-----------------+--------------------+ + /// ``` + pub type OrderWal, S = Crc32> = writer::OrderWal; + + /// Immutable reader for the multiple versioned generic ordered write-ahead log [`OrderWal`]. + pub type OrderWalReader, S = Crc32> = + reader::OrderWalReader; +} diff --git a/src/swmr/generic.rs b/src/swmr/generic.rs deleted file mode 100644 index 834b2f4..0000000 --- a/src/swmr/generic.rs +++ /dev/null @@ -1,1155 +0,0 @@ -use core::{ - cmp, - marker::PhantomData, - ops::Bound, - ptr::NonNull, - slice, - sync::atomic::{AtomicPtr, Ordering}, -}; -use std::{ - path::{Path, PathBuf}, - sync::Arc, -}; - -use among::Among; -use crossbeam_skiplist::SkipSet; -use dbutils::{ - buffer::VacantBuffer, - checksum::{BuildChecksumer, Checksumer, Crc32}, - leb128::encoded_u64_varint_len, -}; -use rarena_allocator::{either::Either, sync::Arena, Allocator, Buffer, BytesRefMut}; -use ref_cast::RefCast; - -use crate::{ - arena_options, check, entry_size, - error::{self, Error}, - merge_lengths, - pointer::GenericPointer, - wal::sealed::Constructor, - BatchEncodedEntryMeta, EntryWithBuilders, EntryWithKeyBuilder, EntryWithValueBuilder, Flags, - KeyBuilder, Options, ValueBuilder, CHECKSUM_SIZE, HEADER_SIZE, STATUS_SIZE, -}; - -pub use crate::{ - entry::{Generic, GenericEntry, GenericEntryRef}, - wal::{BatchWithBuilders, BatchWithKeyBuilder, BatchWithValueBuilder, GenericBatch}, -}; - -pub use dbutils::{ - equivalent::{Comparable, Equivalent}, - traits::{KeyRef, Type, TypeRef}, -}; - -mod reader; -pub use reader::*; - -mod iter; -pub use iter::*; - -mod builder; -pub use builder::*; - -#[cfg(all( - test, - any( - all_tests, - test_swmr_generic_constructor, - test_swmr_generic_insert, - test_swmr_generic_get, - test_swmr_generic_iters, - ) -))] -mod tests; - -#[derive(ref_cast::RefCast)] -#[repr(transparent)] -struct Slice { - _k: PhantomData, - data: [u8], -} - -impl PartialEq for Slice { - fn eq(&self, other: &Self) -> bool { - self.data == other.data - } -} - -impl Eq for Slice {} - -impl PartialOrd for Slice -where - K: Type + Ord + ?Sized, - for<'a> K::Ref<'a>: KeyRef<'a, K>, -{ - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Slice -where - K: Type + Ord + ?Sized, - for<'a> K::Ref<'a>: KeyRef<'a, K>, -{ - fn cmp(&self, other: &Self) -> cmp::Ordering { - unsafe { as KeyRef>::compare_binary(&self.data, &other.data) } - } -} - -impl Equivalent> for Slice -where - K: Type + Ord + ?Sized, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: ?Sized, -{ - fn equivalent(&self, key: &GenericPointer) -> bool { - self.compare(key).is_eq() - } -} - -impl Comparable> for Slice -where - K: Type + Ord + ?Sized, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: ?Sized, -{ - fn compare(&self, p: &GenericPointer) -> cmp::Ordering { - unsafe { - let kr: K::Ref<'_> = TypeRef::from_slice(p.as_key_slice()); - let or: K::Ref<'_> = TypeRef::from_slice(&self.data); - KeyRef::compare(&kr, &or).reverse() - } - } -} - -#[derive(PartialEq, Eq, PartialOrd, Ord)] -#[repr(transparent)] -struct Query<'a, K, Q> -where - K: ?Sized, - Q: ?Sized, -{ - key: &'a Q, - _k: PhantomData, -} - -impl<'a, K, Q> Query<'a, K, Q> -where - K: ?Sized, - Q: ?Sized, -{ - #[inline] - const fn new(key: &'a Q) -> Self { - Self { - key, - _k: PhantomData, - } - } -} - -impl<'a, 'b: 'a, K, Q, V> Equivalent> for Query<'a, K, Q> -where - K: Type + Ord + ?Sized, - V: ?Sized, - Q: ?Sized + Ord + Equivalent>, -{ - #[inline] - fn equivalent(&self, p: &GenericPointer) -> bool { - let kr = unsafe { as TypeRef<'b>>::from_slice(p.as_key_slice()) }; - Equivalent::equivalent(self.key, &kr) - } -} - -impl<'a, 'b: 'a, K, Q, V> Comparable> for Query<'a, K, Q> -where - K: Type + Ord + ?Sized, - V: ?Sized, - Q: ?Sized + Ord + Comparable>, -{ - #[inline] - fn compare(&self, p: &GenericPointer) -> cmp::Ordering { - let kr = unsafe { as TypeRef<'b>>::from_slice(p.as_key_slice()) }; - Comparable::compare(self.key, &kr) - } -} -#[doc(hidden)] -pub struct GenericOrderWalCore { - arena: Arena, - map: SkipSet>, - opts: Options, - cks: S, -} - -impl crate::wal::sealed::WalCore<(), S> for GenericOrderWalCore -where - K: ?Sized, - V: ?Sized, -{ - type Allocator = Arena; - - type Base = SkipSet>; - - type Pointer = GenericPointer; - - #[inline] - fn construct(arena: Self::Allocator, base: Self::Base, opts: Options, _cmp: (), cks: S) -> Self { - Self { - arena, - map: base, - opts, - cks, - } - } -} - -impl GenericOrderWalCore -where - K: ?Sized, - V: ?Sized, -{ - #[inline] - fn len(&self) -> usize { - self.map.len() - } - - #[inline] - fn is_empty(&self) -> bool { - self.map.is_empty() - } - - #[inline] - fn first(&self) -> Option> - where - K: Type + Ord, - for<'b> K::Ref<'b>: KeyRef<'b, K>, - V: Type, - { - self.map.front().map(GenericEntryRef::new) - } - - #[inline] - fn last(&self) -> Option> - where - K: Type + Ord, - for<'b> K::Ref<'b>: KeyRef<'b, K>, - V: Type, - { - self.map.back().map(GenericEntryRef::new) - } - - #[inline] - fn iter(&self) -> Iter<'_, K, V> - where - K: Type + Ord, - for<'b> K::Ref<'b>: KeyRef<'b, K>, - { - Iter::new(self.map.iter()) - } - - #[inline] - fn range<'a, Q>( - &'a self, - start_bound: Bound<&'a Q>, - end_bound: Bound<&'a Q>, - ) -> Range<'a, Q, K, V> - where - K: Type + Ord, - for<'b> K::Ref<'b>: KeyRef<'b, K>, - Q: Ord + ?Sized + for<'b> Comparable>, - { - Range::new( - self - .map - .range((start_bound.map(Query::new), end_bound.map(Query::new))), - ) - } -} - -impl Constructor<(), S> for GenericOrderWal -where - K: ?Sized, - V: ?Sized, -{ - type Allocator = Arena; - - type Core = GenericOrderWalCore; - - type Pointer = GenericPointer; - - fn allocator(&self) -> &Self::Allocator { - &self.core.arena - } - - fn from_core(core: Self::Core) -> Self { - Self { - core: Arc::new(core), - ro: false, - } - } -} - -impl GenericOrderWalCore -where - K: Type + Ord + ?Sized, - for<'a> ::Ref<'a>: KeyRef<'a, K>, - V: ?Sized, -{ - #[inline] - fn contains_key<'a, Q>(&'a self, key: &Q) -> bool - where - Q: ?Sized + Ord + Comparable>, - { - self.map.contains::>(&Query::new(key)) - } - - #[inline] - unsafe fn contains_key_by_bytes(&self, key: &[u8]) -> bool { - self.map.contains(Slice::ref_cast(key)) - } - - #[inline] - fn get<'a, Q>(&'a self, key: &Q) -> Option> - where - Q: ?Sized + Ord + Comparable>, - V: Type, - { - self - .map - .get::>(&Query::new(key)) - .map(GenericEntryRef::new) - } - - #[inline] - unsafe fn get_by_bytes(&self, key: &[u8]) -> Option> - where - V: Type, - { - self.map.get(Slice::ref_cast(key)).map(GenericEntryRef::new) - } - - #[inline] - fn upper_bound<'a, Q>(&'a self, key: Bound<&Q>) -> Option> - where - Q: ?Sized + Ord + Comparable>, - V: Type, - { - self - .map - .upper_bound(key.map(Query::new).as_ref()) - .map(GenericEntryRef::new) - } - - #[inline] - unsafe fn upper_bound_by_bytes(&self, key: Bound<&[u8]>) -> Option> - where - V: Type, - { - self - .map - .upper_bound(key.map(Slice::ref_cast)) - .map(GenericEntryRef::new) - } - - #[inline] - fn lower_bound<'a, Q>(&'a self, key: Bound<&Q>) -> Option> - where - Q: ?Sized + Ord + Comparable>, - V: Type, - { - self - .map - .lower_bound(key.map(Query::new).as_ref()) - .map(GenericEntryRef::new) - } - - #[inline] - unsafe fn lower_bound_by_bytes(&self, key: Bound<&[u8]>) -> Option> - where - V: Type, - { - self - .map - .lower_bound(key.map(Slice::ref_cast)) - .map(GenericEntryRef::new) - } -} - -/// Generic ordered write-ahead log implementation, which supports structured keys and values. -/// -/// Both read and write operations of this WAL are zero-cost (no allocation will happen for both read and write). -/// -/// Users can create multiple readers from the WAL by [`GenericOrderWal::reader`], but only one writer is allowed. -pub struct GenericOrderWal { - core: Arc>, - ro: bool, -} - -impl GenericOrderWal -where - K: Type + Ord + ?Sized + 'static, - for<'a> ::Ref<'a>: KeyRef<'a, K>, - V: ?Sized, -{ - /// Returns the first key-value pair in the map. The key in this pair is the minimum key in the wal. - #[inline] - pub fn first(&self) -> Option> - where - V: Type, - { - self.core.first() - } - - /// Returns the last key-value pair in the map. The key in this pair is the maximum key in the wal. - #[inline] - pub fn last(&self) -> Option> - where - V: Type, - { - self.core.last() - } - - /// Returns an iterator over the entries in the WAL. - #[inline] - pub fn iter(&self) -> Iter<'_, K, V> { - self.core.iter() - } - /// Returns an iterator over a subset of the entries in the WAL. - #[inline] - pub fn range<'a, Q>( - &'a self, - start_bound: Bound<&'a Q>, - end_bound: Bound<&'a Q>, - ) -> Range<'a, Q, K, V> - where - Q: Ord + ?Sized + for<'b> Comparable>, - { - self.core.range(start_bound, end_bound) - } -} - -impl GenericOrderWal -where - K: ?Sized, - V: ?Sized, -{ - /// Returns a read-only WAL instance. - #[inline] - pub fn reader(&self) -> GenericOrderWalReader { - GenericOrderWalReader::new(self.core.clone()) - } - - /// Returns the path of the WAL if it is backed by a file. - #[inline] - pub fn path(&self) -> Option<&std::sync::Arc> { - self.core.arena.path() - } - - /// Returns the reserved space in the WAL. - /// - /// ## Safety - /// - The writer must ensure that the returned slice is not modified. - /// - This method is not thread-safe, so be careful when using it. - #[inline] - pub unsafe fn reserved_slice(&self) -> &[u8] { - if self.core.opts.reserved() == 0 { - return &[]; - } - - &self.core.arena.reserved_slice()[HEADER_SIZE..] - } - - /// Returns the mutable reference to the reserved slice. - /// - /// ## Safety - /// - The caller must ensure that the there is no others accessing reserved slice for either read or write. - /// - This method is not thread-safe, so be careful when using it. - #[inline] - pub unsafe fn reserved_slice_mut(&mut self) -> &mut [u8] { - if self.core.opts.reserved() == 0 { - return &mut []; - } - - &mut self.core.arena.reserved_slice_mut()[HEADER_SIZE..] - } - - /// Returns number of entries in the WAL. - #[inline] - pub fn len(&self) -> usize { - self.core.len() - } - - /// Returns `true` if the WAL is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.core.is_empty() - } -} - -impl GenericOrderWal -where - K: Type + Ord + ?Sized, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: ?Sized, -{ - /// Returns `true` if the key exists in the WAL. - #[inline] - pub fn contains_key<'a, Q>(&'a self, key: &Q) -> bool - where - Q: ?Sized + Ord + Comparable>, - { - self.core.contains_key(key) - } - - /// Returns `true` if the key exists in the WAL. - /// - /// ## Safety - /// - The given `key` must be valid to construct to `K::Ref` without remaining. - #[inline] - pub unsafe fn contains_key_by_bytes(&self, key: &[u8]) -> bool { - self.core.contains_key_by_bytes(key) - } - - /// Gets the value associated with the key. - #[inline] - pub fn get<'a, Q>(&'a self, key: &Q) -> Option> - where - Q: ?Sized + Ord + Comparable>, - V: Type, - { - self.core.get(key) - } - - /// Gets the value associated with the key. - /// - /// ## Safety - /// - The given `key` must be valid to construct to `K::Ref` without remaining. - #[inline] - pub unsafe fn get_by_bytes(&self, key: &[u8]) -> Option> - where - V: Type, - { - self.core.get_by_bytes(key) - } - - /// Returns a value associated to the highest element whose key is below the given bound. - /// If no such element is found then `None` is returned. - #[inline] - pub fn upper_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> - where - Q: ?Sized + Ord + Comparable>, - V: Type, - { - self.core.upper_bound(bound) - } - - /// Returns a value associated to the highest element whose key is below the given bound. - /// If no such element is found then `None` is returned. - /// - /// ## Safety - /// - The given `key` in `Bound` must be valid to construct to `K::Ref` without remaining. - #[inline] - pub unsafe fn upper_bound_by_bytes( - &self, - bound: Bound<&[u8]>, - ) -> Option> - where - V: Type, - { - self.core.upper_bound_by_bytes(bound) - } - - /// Returns a value associated to the lowest element whose key is above the given bound. - /// If no such element is found then `None` is returned. - #[inline] - pub fn lower_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> - where - Q: ?Sized + Ord + Comparable>, - V: Type, - { - self.core.lower_bound(bound) - } - - /// Returns a value associated to the lowest element whose key is above the given bound. - /// If no such element is found then `None` is returned. - /// - /// ## Safety - /// - The given `key` in `Bound` must be valid to construct to `K::Ref` without remaining. - #[inline] - pub unsafe fn lower_bound_by_bytes( - &self, - bound: Bound<&[u8]>, - ) -> Option> - where - V: Type, - { - self.core.lower_bound_by_bytes(bound) - } -} - -impl GenericOrderWal -where - K: Type + Ord + for<'a> Comparable> + ?Sized + 'static, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: Type + ?Sized + 'static, - S: BuildChecksumer, -{ - /// Gets or insert the key value pair. - #[inline] - pub fn get_or_insert<'a>( - &mut self, - key: impl Into>, - val: impl Into>, - ) -> Either, Result<(), Among>> { - let key: Generic<'a, K> = key.into(); - let map = &self.core.map; - let ent = match key.data() { - Either::Left(k) => map.get(&Query::new(k)), - Either::Right(key) => map.get(Slice::ref_cast(key)), - }; - - match ent.map(|e| Either::Left(GenericEntryRef::new(e))) { - Some(e) => e, - None => { - let klen = key.encoded_len() as u32; - let kb: KeyBuilder<_> = KeyBuilder::once(klen, |buf| key.encode_to_buffer(buf).map(|_| ())); - - let val: Generic<'_, V> = val.into(); - let vlen = val.encoded_len() as u32; - let vb: ValueBuilder<_> = - ValueBuilder::once(vlen, |buf| val.encode_to_buffer(buf).map(|_| ())); - - Either::Right(self.insert_in(kb, vb)) - } - } - } - - /// Gets or insert the key with a value builder. - #[inline] - pub fn get_or_insert_with<'a>( - &mut self, - key: impl Into>, - value: impl FnOnce() -> V, - ) -> Either, Result<(), Among>> - where - V: Sized, - { - let key: Generic<'a, K> = key.into(); - let map = &self.core.map; - let ent = match key.data() { - Either::Left(k) => map.get(&Query::new(k)), - Either::Right(key) => map.get(Slice::ref_cast(key)), - }; - - match ent.map(|e| Either::Left(GenericEntryRef::new(e))) { - Some(e) => e, - None => { - let klen = key.encoded_len() as u32; - let kb: KeyBuilder<_> = KeyBuilder::once(klen, |buf| key.encode_to_buffer(buf).map(|_| ())); - let val = value(); - let vlen = val.encoded_len() as u32; - let vb: ValueBuilder<_> = - ValueBuilder::once(vlen, |buf| val.encode_to_buffer(buf).map(|_| ())); - - Either::Right(self.insert_in(kb, vb)) - } - } - } -} - -trait GenericEntryWithKeyBuilderLength { - fn value_len(&self) -> usize; -} - -impl GenericEntryWithKeyBuilderLength for EntryWithKeyBuilder, P> -where - V: Type + ?Sized, -{ - #[inline] - fn value_len(&self) -> usize { - self.value.encoded_len() - } -} - -trait GenericEntryWithValueBuilderLength { - fn key_len(&self) -> usize; -} - -impl GenericEntryWithValueBuilderLength for EntryWithValueBuilder, VB, P> -where - K: Type + ?Sized, -{ - #[inline] - fn key_len(&self) -> usize { - self.key.encoded_len() - } -} - -macro_rules! process_batch { - ($this:ident($batch:ident, $key:expr, $value:expr)) => {{ - let batch_ptr = AtomicPtr::new($batch); - let batch = batch_ptr.load(Ordering::Acquire); - (*batch) - .iter_mut() - .try_fold((0u32, 0u64), |(num_entries, size), ent| { - let klen = ent.key_len(); - let vlen = ent.value_len(); - crate::utils::check_batch_entry( - klen, - vlen, - $this.core.opts.maximum_key_size(), - $this.core.opts.maximum_value_size(), - ).map(|_| { - let merged_len = merge_lengths(klen as u32, vlen as u32); - let merged_len_size = encoded_u64_varint_len(merged_len); - let ent_size = klen as u64 + vlen as u64 + merged_len_size as u64; - ent.meta = BatchEncodedEntryMeta::new(klen, vlen, merged_len, merged_len_size); - (num_entries + 1, size + ent_size) - }) - .map_err(Among::Right) - }) - .and_then(|(num_entries, batch_encoded_size)| { - // safe to cast batch_encoded_size to u32 here, we already checked it's less than capacity (less than u32::MAX). - let batch_meta = merge_lengths(num_entries, batch_encoded_size as u32); - let batch_meta_size = encoded_u64_varint_len(batch_meta); - let allocator = &$this.core.arena; - let remaining = allocator.remaining() as u64; - let total_size = - STATUS_SIZE as u64 + batch_meta_size as u64 + batch_encoded_size + CHECKSUM_SIZE as u64; - if total_size > remaining { - return Err(Among::Right(Error::insufficient_space(total_size, remaining as u32))); - } - - let mut buf = allocator - .alloc_bytes(total_size as u32) - .map_err(|e| Among::Right(Error::from_insufficient_space(e)))?; - - let flag = Flags::BATCHING; - - buf.put_u8_unchecked(flag.bits()); - buf.put_u64_varint_unchecked(batch_meta); - - let mut cursor = 1 + batch_meta_size; - - for ent in (*batch).iter_mut() { - let remaining = buf.remaining(); - if remaining < ent.meta.kvlen_size + ent.meta.klen + ent.meta.vlen { - return Err(Among::Right( - Error::larger_batch_size(buf.capacity() as u32), - )); - } - - let ent_len_size = buf.put_u64_varint_unchecked(ent.meta.kvlen); - let ko = cursor + ent_len_size; - buf.set_len(ko + ent.meta.klen + ent.meta.vlen); - let ptr = buf.as_mut_ptr().add(ko); - - $key(ptr, &ent)?; - $value(ptr.add(ent.meta.klen), &ent)?; - - cursor += ent_len_size + ent.meta.klen + ent.meta.vlen; - ent.pointer = Some(GenericPointer::new(ent.meta.klen, ent.meta.vlen, ptr)); - } - - $this - .insert_batch_helper(&$this.core.arena, buf, cursor as usize, || { - (*batch).iter_mut().map(|ent| ent.pointer.take().unwrap()) - }) - .map_err(Among::Right) - }) - }}; -} - -impl GenericOrderWal -where - K: Type + Ord + ?Sized, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: Type + ?Sized, - S: BuildChecksumer, -{ - /// Inserts a key-value pair into the write-ahead log. - /// - /// See also [`insert_with_key_builder`](GenericOrderWal::insert_with_key_builder), [`insert_with_value_builder`](GenericOrderWal::insert_with_value_builder), and [`insert_with_builders`](GenericOrderWal::insert_with_builders). - /// - /// ## Example - /// - /// Here are two examples of how flexible the `insert` method is: - /// - /// The `Person` struct implementation can be found [here](https://github.com/al8n/orderwal/blob/main/src/swmr/generic/tests.rs#L24). - /// - /// 1. **Inserting a key-value pair, key and value are references** - /// - /// ```rust,ignore - /// use orderwal::swmr::{*, generic::*}; - /// - /// let mut wal = GenericBuilder::new().with_capacity(1024).alloc::().unwrap(); - /// - /// let person = Person { - /// id: 1, - /// name: "Alice".to_string(), - /// }; - /// - /// wal.insert(&person, &"value".to_string()); - /// ``` - /// - /// 2. **Inserting a key-value pair, both of them are in encoded format** - /// - /// ```rust,ignore - /// use orderwal::swmr::{*, generic::*}; - /// - /// let mut wal = GenericBuilder::new().with_capacity(1024).alloc::().unwrap(); - /// - /// let person = Person { - /// id: 1, - /// name: "Alice".to_string(), - /// }.encode_into_vec(); - /// - /// - /// unsafe { - /// let key = Generic::from_slice(person.as_ref()); - /// let value = Generic::from_slice("Hello, Alice!".as_bytes()); - /// wal.insert(key, value).unwrap(); - /// } - /// ``` - #[inline] - pub fn insert<'a>( - &mut self, - key: impl Into>, - val: impl Into>, - ) -> Result<(), Among> - where - GenericPointer: 'static, - { - let key: Generic<'_, K> = key.into(); - let klen = key.encoded_len() as u32; - let kb: KeyBuilder<_> = KeyBuilder::once(klen, |buf| key.encode_to_buffer(buf).map(|_| ())); - - let val: Generic<'_, V> = val.into(); - let vlen = val.encoded_len() as u32; - let vb: ValueBuilder<_> = ValueBuilder::once(vlen, |buf| val.encode_to_buffer(buf).map(|_| ())); - self.insert_in(kb, vb) - } - - /// Inserts a key-value pair into the WAL. This method - /// allows the caller to build the key in place. - /// - /// This method is useful when playing with `?Sized` key types. See details in the example. - /// - /// ## Safety - /// - The bytes written to the buffer must be valid to decode by [`K::from_slice`](TypeRef::from_slice). - /// - /// ## Example - /// - /// See [`examples/generic_not_sized.rs`](https://github.com/al8n/orderwal/tree/main/examples/generic_not_sized.rs). - #[inline] - pub unsafe fn insert_with_key_builder<'a, E>( - &mut self, - kb: KeyBuilder) -> Result<(), E> + 'a>, - val: impl Into>, - ) -> Result<(), Among> - where - GenericPointer: 'static, - { - let val: Generic<'_, V> = val.into(); - let vlen = val.encoded_len() as u32; - let vb = ValueBuilder::once(vlen, |buf| val.encode_to_buffer(buf).map(|_| ())); - - self.insert_in(kb, vb) - } - - /// Inserts a key-value pair into the WAL. This method - /// allows the caller to build the value in place. - /// - /// This method is useful when playing with `?Sized` value types. See details in the example. - /// - /// ## Safety - /// - The bytes written to the buffer must be valid to decode by [`V::from_slice`](TypeRef::from_slice). - /// - /// ## Example - /// - /// See [`examples/generic_not_sized.rs`](https://github.com/al8n/orderwal/tree/main/examples/generic_not_sized.rs). - #[inline] - pub unsafe fn insert_with_value_builder<'a, E>( - &mut self, - key: impl Into>, - vb: ValueBuilder) -> Result<(), E> + 'a>, - ) -> Result<(), Among> - where - GenericPointer: 'static, - { - let key: Generic<'_, K> = key.into(); - let klen = key.encoded_len() as u32; - let kb: KeyBuilder<_> = KeyBuilder::once(klen, |buf| key.encode_to_buffer(buf).map(|_| ())); - - self.insert_in::(kb, vb) - } - - /// Inserts a key-value pair into the WAL. This method - /// allows the caller to build the key and value in place. - /// - /// This method is useful when playing with `?Sized` key and value types. See details in the example. - /// - /// ## Safety - /// - The bytes written to the buffer must be valid to decode by [`K::from_slice`](TypeRef::from_slice) - /// and [`V::from_slice`](TypeRef::from_slice) respectively. - /// - /// ## Example - /// - /// See [`examples/generic_not_sized.rs`](https://github.com/al8n/orderwal/tree/main/examples/generic_not_sized.rs). - #[inline] - pub unsafe fn insert_with_builders<'a, KE, VE>( - &mut self, - kb: KeyBuilder) -> Result<(), KE> + 'a>, - vb: ValueBuilder) -> Result<(), VE> + 'a>, - ) -> Result<(), Among> - where - GenericPointer: 'static, - { - self.insert_in(kb, vb) - } - - /// Inserts a batch of entries into the write-ahead log. - pub fn insert_batch_with_key_builder<'a, B>( - &'a mut self, - batch: &'a mut B, - ) -> Result<(), Among> - where - B: BatchWithKeyBuilder, Value = Generic<'a, V>>, - GenericPointer: 'static, - { - unsafe { - process_batch!(self( - batch, - |ptr, ent: &EntryWithKeyBuilder, _>| { - let f = ent.kb.builder(); - f(&mut VacantBuffer::new( - ent.meta.klen, - NonNull::new_unchecked(ptr), - )) - .map_err(Among::Left) - }, - |ptr, ent: &EntryWithKeyBuilder, _>| { - let value_buf = slice::from_raw_parts_mut(ptr, ent.meta.vlen); - ent.value.encode(value_buf).map_err(Among::Middle) - } - )) - } - } - - /// Inserts a batch of entries into the write-ahead log. - pub fn insert_batch_with_value_builder<'a, B>( - &'a mut self, - batch: &'a mut B, - ) -> Result<(), Among> - where - B: BatchWithValueBuilder, Key = Generic<'a, K>>, - GenericPointer: 'static, - { - unsafe { - process_batch!(self( - batch, - |ptr, ent: &EntryWithValueBuilder, B::ValueBuilder, _>| { - let key_buf = slice::from_raw_parts_mut(ptr, ent.meta.klen); - ent.key.encode(key_buf).map_err(Among::Left) - }, - |ptr, ent: &EntryWithValueBuilder, B::ValueBuilder, _>| { - let f = ent.vb.builder(); - f(&mut VacantBuffer::new( - ent.meta.vlen, - NonNull::new_unchecked(ptr), - )) - .map_err(Among::Middle) - } - )) - } - } - - /// Inserts a batch of entries into the write-ahead log. - pub fn insert_batch_with_builders<'a, B>( - &'a mut self, - batch: &'a mut B, - ) -> Result<(), Among> - where - B: BatchWithBuilders>, - GenericPointer: 'static, - { - unsafe { - process_batch!(self( - batch, - |ptr, ent: &EntryWithBuilders| { - let f = ent.kb.builder(); - f(&mut VacantBuffer::new( - ent.meta.klen, - NonNull::new_unchecked(ptr), - )) - .map_err(Among::Left) - }, - |ptr, ent: &EntryWithBuilders| { - let f = ent.vb.builder(); - f(&mut VacantBuffer::new( - ent.meta.vlen, - NonNull::new_unchecked(ptr), - )) - .map_err(Among::Middle) - } - )) - } - } - - /// Inserts a batch of entries into the write-ahead log. - pub fn insert_batch<'a, 'b: 'a, B: GenericBatch<'b, Key = K, Value = V>>( - &'a mut self, - batch: &'b mut B, - ) -> Result<(), Among> - where - GenericPointer: 'static, - { - unsafe { - process_batch!(self( - batch, - |ptr, ent: &GenericEntry<'_, K, V>| { - let key_buf = slice::from_raw_parts_mut(ptr, ent.meta.klen); - ent.key.encode(key_buf).map_err(Among::Left) - }, - |ptr, ent: &GenericEntry<'_, K, V>| { - let value_buf = slice::from_raw_parts_mut(ptr, ent.meta.vlen); - ent.value.encode(value_buf).map_err(Among::Middle) - } - )) - } - } - - unsafe fn insert_batch_helper<'a, I>( - &'a self, - allocator: &'a Arena, - mut buf: BytesRefMut<'a, Arena>, - cursor: usize, - on_success: impl FnOnce() -> I, - ) -> Result<(), Error> - where - GenericPointer: 'static, - I: Iterator>, - S: BuildChecksumer, - { - let total_size = buf.capacity(); - if cursor + CHECKSUM_SIZE != total_size { - return Err(Error::batch_size_mismatch( - total_size as u32 - CHECKSUM_SIZE as u32, - cursor as u32, - )); - } - - let mut cks = self.core.cks.build_checksumer(); - let committed_flag = Flags::BATCHING | Flags::COMMITTED; - cks.update(&[committed_flag.bits()]); - cks.update(&buf[1..]); - let checksum = cks.digest(); - buf.put_u64_le_unchecked(checksum); - - // commit the entry - buf[0] = committed_flag.bits(); - let buf_cap = buf.capacity(); - - if self.core.opts.sync() && allocator.is_ondisk() { - allocator.flush_header_and_range(buf.offset(), buf_cap)?; - } - buf.detach(); - - on_success().for_each(|p| { - self.core.map.insert(p); - }); - - Ok(()) - } - - fn insert_in( - &self, - kb: KeyBuilder) -> Result<(), KE>>, - vb: ValueBuilder) -> Result<(), VE>>, - ) -> Result<(), Among> - where - GenericPointer: 'static, - { - let (klen, kb) = kb.into_components(); - let (vlen, vb) = vb.into_components(); - - let klen = klen as usize; - let vlen = vlen as usize; - - self.check(klen, vlen).map_err(Among::Right)?; - - let (len_size, kvlen, elen) = entry_size(klen as u32, vlen as u32); - - let buf = self.core.arena.alloc_bytes(elen); - - match buf { - Err(e) => Err(Among::Right(Error::from_insufficient_space(e))), - Ok(mut buf) => { - unsafe { - // We allocate the buffer with the exact size, so it's safe to write to the buffer. - let flag = Flags::COMMITTED.bits(); - - let mut cks = self.core.cks.build_checksumer(); - cks.update(&[flag]); - - buf.put_u8_unchecked(Flags::empty().bits()); - let written = buf.put_u64_varint_unchecked(kvlen); - debug_assert_eq!( - written, len_size, - "the precalculated size should be equal to the written size" - ); - - let ko = STATUS_SIZE + written; - buf.set_len(ko + klen + vlen); - - let kptr = NonNull::new_unchecked(buf.as_mut_ptr().add(ko)); - let mut key_buf = VacantBuffer::new(klen, kptr); - kb(&mut key_buf).map_err(Among::Left)?; - - let vo = STATUS_SIZE + written + klen; - let vptr = NonNull::new_unchecked(buf.as_mut_ptr().add(vo)); - let mut value_buf = VacantBuffer::new(vlen, vptr); - vb(&mut value_buf).map_err(Among::Middle)?; - - let cks = { - cks.update(&buf[1..]); - cks.digest() - }; - buf.put_u64_le_unchecked(cks); - - // commit the entry - buf[0] |= Flags::COMMITTED.bits(); - - if self.core.opts.sync() && self.core.arena.is_ondisk() { - self - .core - .arena - .flush_header_and_range(buf.offset(), elen as usize) - .map_err(|e| Among::Right(e.into()))?; - } - buf.detach(); - - let p = GenericPointer::new(klen, vlen, buf.as_ptr().add(ko)); - self.core.map.insert(p); - Ok(()) - } - } - } - } - - #[inline] - fn check(&self, klen: usize, vlen: usize) -> Result<(), error::Error> { - check( - klen, - vlen, - self.core.opts.maximum_key_size(), - self.core.opts.maximum_value_size(), - self.ro, - ) - } -} - -#[inline] -fn dummy_path_builder(p: impl AsRef) -> Result { - Ok(p.as_ref().to_path_buf()) -} diff --git a/src/swmr/generic/builder.rs b/src/swmr/generic/builder.rs deleted file mode 100644 index b44ea44..0000000 --- a/src/swmr/generic/builder.rs +++ /dev/null @@ -1,906 +0,0 @@ -use std::path::{Path, PathBuf}; - -use crate::options::ArenaOptionsExt; - -use super::*; - -/// A write-ahead log builder. -pub struct GenericBuilder { - pub(super) opts: Options, - pub(super) cks: S, -} - -impl Default for GenericBuilder { - #[inline] - fn default() -> Self { - Self::new() - } -} - -impl GenericBuilder { - /// Returns a new write-ahead log builder with the given options. - #[inline] - pub fn new() -> Self { - Self { - opts: Options::default(), - cks: Crc32::default(), - } - } -} - -impl GenericBuilder { - /// Returns a new write-ahead log builder with the new checksumer - /// - /// ## Example - /// - /// ```rust - /// use orderwal::{swmr::GenericBuilder, Crc32}; - /// - /// let opts = GenericBuilder::new().with_checksumer(Crc32::new()); - /// ``` - #[inline] - pub fn with_checksumer(self, cks: NS) -> GenericBuilder { - GenericBuilder { - opts: self.opts, - cks, - } - } - - /// Returns a new write-ahead log builder with the new options - /// - /// ## Example - /// - /// ```rust - /// use orderwal::{swmr::GenericBuilder, Options}; - /// - /// let opts = GenericBuilder::new().with_options(Options::default()); - /// ``` - #[inline] - pub fn with_options(self, opts: Options) -> Self { - Self { - opts, - cks: self.cks, - } - } - - /// Set the reserved bytes of the WAL. - /// - /// The `reserved` is used to configure the start position of the WAL. This is useful - /// when you want to add some bytes as your own WAL's header. - /// - /// The default reserved is `0`. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_reserved(8); - /// ``` - #[inline] - pub const fn with_reserved(mut self, reserved: u32) -> Self { - self.opts = self.opts.with_reserved(reserved); - self - } - - /// Get the reserved of the WAL. - /// - /// The `reserved` is used to configure the start position of the WAL. This is useful - /// when you want to add some bytes as your own WAL's header. - /// - /// The default reserved is `0`. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_reserved(8); - /// - /// assert_eq!(opts.reserved(), 8); - /// ``` - #[inline] - pub const fn reserved(&self) -> u32 { - self.opts.reserved() - } - - /// Returns the magic version. - /// - /// The default value is `0`. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let options = GenericBuilder::new().with_magic_version(1); - /// assert_eq!(options.magic_version(), 1); - /// ``` - #[inline] - pub const fn magic_version(&self) -> u16 { - self.opts.magic_version() - } - - /// Returns the capacity of the WAL. - /// - /// The default value is `0`. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let options = GenericBuilder::new().with_capacity(1000); - /// assert_eq!(options.capacity(), 1000); - /// ``` - #[inline] - pub const fn capacity(&self) -> u32 { - self.opts.capacity() - } - - /// Returns the maximum key length. - /// - /// The default value is `u16::MAX`. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let options = GenericBuilder::new().with_maximum_key_size(1024); - /// assert_eq!(options.maximum_key_size(), 1024); - /// ``` - #[inline] - pub const fn maximum_key_size(&self) -> u32 { - self.opts.maximum_key_size() - } - - /// Returns the maximum value length. - /// - /// The default value is `u32::MAX`. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let options = GenericBuilder::new().with_maximum_value_size(1024); - /// assert_eq!(options.maximum_value_size(), 1024); - /// ``` - #[inline] - pub const fn maximum_value_size(&self) -> u32 { - self.opts.maximum_value_size() - } - - /// Returns `true` if the WAL syncs on write. - /// - /// The default value is `true`. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let options = GenericBuilder::new(); - /// assert_eq!(options.sync(), true); - /// ``` - #[inline] - pub const fn sync(&self) -> bool { - self.opts.sync() - } - - /// Sets the capacity of the WAL. - /// - /// This configuration will be ignored when using file-backed memory maps. - /// - /// The default value is `0`. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let options = GenericBuilder::new().with_capacity(100); - /// assert_eq!(options.capacity(), 100); - /// ``` - #[inline] - pub const fn with_capacity(mut self, cap: u32) -> Self { - self.opts = self.opts.with_capacity(cap); - self - } - - /// Sets the maximum key length. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let options = GenericBuilder::new().with_maximum_key_size(1024); - /// assert_eq!(options.maximum_key_size(), 1024); - /// ``` - #[inline] - pub const fn with_maximum_key_size(mut self, size: u32) -> Self { - self.opts = self.opts.with_maximum_key_size(size); - self - } - - /// Sets the maximum value length. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let options = GenericBuilder::new().with_maximum_value_size(1024); - /// assert_eq!(options.maximum_value_size(), 1024); - /// ``` - #[inline] - pub const fn with_maximum_value_size(mut self, size: u32) -> Self { - self.opts = self.opts.with_maximum_value_size(size); - self - } - - /// Sets the WAL to sync on write. - /// - /// The default value is `true`. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let options = GenericBuilder::new().with_sync(false); - /// assert_eq!(options.sync(), false); - /// ``` - #[inline] - pub const fn with_sync(mut self, sync: bool) -> Self { - self.opts = self.opts.with_sync(sync); - self - } - - /// Sets the magic version. - /// - /// The default value is `0`. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let options = GenericBuilder::new().with_magic_version(1); - /// assert_eq!(options.magic_version(), 1); - /// ``` - #[inline] - pub const fn with_magic_version(mut self, version: u16) -> Self { - self.opts = self.opts.with_magic_version(version); - self - } -} - -impl GenericBuilder { - /// Sets the option for read access. - /// - /// This option, when true, will indicate that the file should be - /// `read`-able if opened. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_read(true); - /// ``` - #[inline] - pub fn with_read(mut self, read: bool) -> Self { - self.opts.read = read; - self - } - - /// Sets the option for write access. - /// - /// This option, when true, will indicate that the file should be - /// `write`-able if opened. - /// - /// If the file already exists, any write calls on it will overwrite its - /// contents, without truncating it. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_write(true); - /// ``` - #[inline] - pub fn with_write(mut self, write: bool) -> Self { - self.opts.write = write; - self - } - - /// Sets the option for the append mode. - /// - /// This option, when true, means that writes will append to a file instead - /// of overwriting previous contents. - /// Note that setting `.write(true).append(true)` has the same effect as - /// setting only `.append(true)`. - /// - /// For most filesystems, the operating system guarantees that all writes are - /// atomic: no writes get mangled because another process writes at the same - /// time. - /// - /// One maybe obvious note when using append-mode: make sure that all data - /// that belongs together is written to the file in one operation. This - /// can be done by concatenating strings before passing them to [`write()`], - /// or using a buffered writer (with a buffer of adequate size), - /// and calling [`flush()`] when the message is complete. - /// - /// If a file is opened with both read and append access, beware that after - /// opening, and after every write, the position for reading may be set at the - /// end of the file. So, before writing, save the current position (using - /// [seek]\([SeekFrom](std::io::SeekFrom)::[Current]\(opts))), and restore it before the next read. - /// - /// ## Note - /// - /// This function doesn't create the file if it doesn't exist. Use the - /// [`Options::with_create`] method to do so. - /// - /// [`write()`]: std::io::Write::write "io::Write::write" - /// [`flush()`]: std::io::Write::flush "io::Write::flush" - /// [seek]: std::io::Seek::seek "io::Seek::seek" - /// [Current]: std::io::SeekFrom::Current "io::SeekFrom::Current" - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_append(true); - /// ``` - #[inline] - pub fn with_append(mut self, append: bool) -> Self { - self.opts.write = true; - self.opts.append = append; - self - } - - /// Sets the option for truncating a previous file. - /// - /// If a file is successfully opened with this option set it will truncate - /// the file to opts length if it already exists. - /// - /// The file must be opened with write access for truncate to work. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_write(true).with_truncate(true); - /// ``` - #[inline] - pub fn with_truncate(mut self, truncate: bool) -> Self { - self.opts.truncate = truncate; - self.opts.write = true; - self - } - - /// Sets the option to create a new file, or open it if it already exists. - /// If the file does not exist, it is created and set the lenght of the file to the given size. - /// - /// In order for the file to be created, [`Options::with_write`] or - /// [`Options::with_append`] access must be used. - /// - /// See also [`std::fs::write()`][std::fs::write] for a simple function to - /// create a file with some given data. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_write(true).with_create(true); - /// ``` - #[inline] - pub fn with_create(mut self, val: bool) -> Self { - self.opts.create = val; - self - } - - /// Sets the option to create a new file and set the file length to the given value, failing if it already exists. - /// - /// No file is allowed to exist at the target location, also no (dangling) symlink. In this - /// way, if the call succeeds, the file returned is guaranteed to be new. - /// - /// This option is useful because it is atomic. Otherwise between checking - /// whether a file exists and creating a new one, the file may have been - /// created by another process (a TOCTOU race condition / attack). - /// - /// If `.with_create_new(true)` is set, [`.with_create()`] and [`.with_truncate()`] are - /// ignored. - /// - /// The file must be opened with write or append access in order to create - /// a new file. - /// - /// [`.with_create()`]: GenericBuilder::with_create - /// [`.with_truncate()`]: GenericBuilder::with_truncate - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new() - /// .with_write(true) - /// .with_create_new(true); - /// ``` - #[inline] - pub fn with_create_new(mut self, val: bool) -> Self { - self.opts.create_new = val; - self - } - - /// Configures the anonymous memory map to be suitable for a process or thread stack. - /// - /// This option corresponds to the `MAP_STACK` flag on Linux. It has no effect on Windows. - /// - /// This option has no effect on file-backed memory maps and vec backed [`GenericOrderWal`]. - /// - /// ## Example - /// - /// ``` - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_stack(true); - /// ``` - #[inline] - pub fn with_stack(mut self, stack: bool) -> Self { - self.opts.stack = stack; - self - } - - /// Configures the anonymous memory map to be allocated using huge pages. - /// - /// This option corresponds to the `MAP_HUGETLB` flag on Linux. It has no effect on Windows. - /// - /// The size of the requested page can be specified in page bits. If not provided, the system - /// default is requested. The requested length should be a multiple of this, or the mapping - /// will fail. - /// - /// This option has no effect on file-backed memory maps and vec backed [`GenericOrderWal`]. - /// - /// ## Example - /// - /// ``` - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_huge(Some(8)); - /// ``` - #[inline] - pub fn with_huge(mut self, page_bits: Option) -> Self { - self.opts.huge = page_bits; - self - } - - /// Populate (prefault) page tables for a mapping. - /// - /// For a file mapping, this causes read-ahead on the file. This will help to reduce blocking on page faults later. - /// - /// This option corresponds to the `MAP_POPULATE` flag on Linux. It has no effect on Windows. - /// - /// This option has no effect on vec backed [`GenericOrderWal`]. - /// - /// ## Example - /// - /// ``` - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_populate(true); - /// ``` - #[inline] - pub fn with_populate(mut self, populate: bool) -> Self { - self.opts.populate = populate; - self - } -} - -impl GenericBuilder { - /// Returns `true` if the file should be opened with read access. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_read(true); - /// assert_eq!(opts.read(), true); - /// ``` - #[inline] - pub const fn read(&self) -> bool { - self.opts.read - } - - /// Returns `true` if the file should be opened with write access. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_write(true); - /// assert_eq!(opts.write(), true); - /// ``` - #[inline] - pub const fn write(&self) -> bool { - self.opts.write - } - - /// Returns `true` if the file should be opened with append access. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_append(true); - /// assert_eq!(opts.append(), true); - /// ``` - #[inline] - pub const fn append(&self) -> bool { - self.opts.append - } - - /// Returns `true` if the file should be opened with truncate access. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_truncate(true); - /// assert_eq!(opts.truncate(), true); - /// ``` - #[inline] - pub const fn truncate(&self) -> bool { - self.opts.truncate - } - - /// Returns `true` if the file should be created if it does not exist. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_create(true); - /// assert_eq!(opts.create(), true); - /// ``` - #[inline] - pub const fn create(&self) -> bool { - self.opts.create - } - - /// Returns `true` if the file should be created if it does not exist and fail if it does. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_create_new(true); - /// assert_eq!(opts.create_new(), true); - /// ``` - #[inline] - pub const fn create_new(&self) -> bool { - self.opts.create_new - } - - /// Returns `true` if the memory map should be suitable for a process or thread stack. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_stack(true); - /// assert_eq!(opts.stack(), true); - /// ``` - #[inline] - pub const fn stack(&self) -> bool { - self.opts.stack - } - - /// Returns the page bits of the memory map. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_huge(Some(8)); - /// assert_eq!(opts.huge(), Some(8)); - /// ``` - #[inline] - pub const fn huge(&self) -> Option { - self.opts.huge - } - - /// Returns `true` if the memory map should populate (prefault) page tables for a mapping. - /// - /// ## Examples - /// - /// ```rust - /// use orderwal::swmr::GenericBuilder; - /// - /// let opts = GenericBuilder::new().with_populate(true); - /// assert_eq!(opts.populate(), true); - /// ``` - #[inline] - pub const fn populate(&self) -> bool { - self.opts.populate - } -} - -impl GenericBuilder { - /// Creates a new in-memory write-ahead log backed by an aligned vec with the given capacity and options. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::{GenericOrderWal, GenericBuilder}; - /// - /// let wal = GenericBuilder::new().with_capacity(1024).alloc::().unwrap(); - /// ``` - #[inline] - pub fn alloc(self) -> Result, Error> - where - K: ?Sized, - V: ?Sized, - { - let Self { opts, cks } = self; - - arena_options(opts.reserved()) - .with_capacity(opts.capacity()) - .alloc() - .map_err(Error::from_insufficient_space) - .and_then(|arena| { - GenericOrderWal::new_in(arena, opts, (), cks).map(GenericOrderWal::from_core) - }) - } - - /// Creates a new in-memory write-ahead log backed by an anonymous memory map with the given options. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::{GenericOrderWal, GenericBuilder}; - /// - /// let wal = GenericBuilder::new().with_capacity(1024).map_anon::().unwrap(); - /// ``` - #[inline] - pub fn map_anon(self) -> Result, Error> - where - K: ?Sized, - V: ?Sized, - { - let Self { opts, cks } = self; - - arena_options(opts.reserved()) - .merge(&opts) - .map_anon() - .map_err(Into::into) - .and_then(|arena| { - GenericOrderWal::new_in(arena, opts, (), cks).map(GenericOrderWal::from_core) - }) - } - - /// Open a write-ahead log backed by a file backed memory map in read only mode. - /// - /// ## Safety - /// - /// All file-backed memory map constructors are marked `unsafe` because of the potential for - /// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or - /// out of process. Applications must consider the risk and take appropriate precautions when - /// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g. - /// unlinked) files exist but are platform specific and limited. - /// - /// ## Example - /// - /// ```rust - /// - /// use orderwal::swmr::{GenericOrderWal, GenericBuilder, generic::*}; - /// # let dir = tempfile::tempdir().unwrap(); - /// # let path = dir - /// # .path() - /// # .join("generic_wal_map_mut_with_checksumer"); - /// # - /// # let mut wal = unsafe { - /// # GenericBuilder::new() - /// # .with_capacity(1024) - /// # .with_create_new(true) - /// # .with_read(true) - /// # .with_write(true) - /// # .map_mut::( - /// # &path, - /// # ) - /// # .unwrap() - /// # }; - /// - /// let reader = unsafe { GenericBuilder::new().map::(path).unwrap() }; - /// ``` - #[inline] - pub unsafe fn map>( - self, - path: P, - ) -> Result, Error> - where - K: Type + Ord + ?Sized + 'static, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: ?Sized + 'static, - { - self - .map_with_path_builder::(|| dummy_path_builder(path)) - .map_err(|e| e.unwrap_right()) - } - - /// Open a write-ahead log backed by a file backed memory map in read only mode with the given [`Checksumer`]. - /// - /// ## Safety - /// - /// All file-backed memory map constructors are marked `unsafe` because of the potential for - /// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or - /// out of process. Applications must consider the risk and take appropriate precautions when - /// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g. - /// unlinked) files exist but are platform specific and limited. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::{GenericOrderWal, GenericBuilder, generic::*}; - /// # let dir = tempfile::tempdir().unwrap(); - /// # let path = dir - /// # .path() - /// # .join("generic_wal_map_mut_with_checksumer"); - /// # - /// # let mut wal = unsafe { - /// # GenericBuilder::new() - /// # .with_capacity(1024) - /// # .with_create_new(true) - /// # .with_read(true) - /// # .with_write(true) - /// # .map_mut::( - /// # &path, - /// # ) - /// # .unwrap() - /// # }; - /// - /// let reader = unsafe { GenericBuilder::new().map_with_path_builder::(|| Ok(path)).unwrap() }; - /// ``` - #[inline] - pub unsafe fn map_with_path_builder( - self, - path_builder: PB, - ) -> Result, Either> - where - K: Type + Ord + ?Sized + 'static, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: ?Sized + 'static, - PB: FnOnce() -> Result, - { - let Self { cks, opts } = self; - - arena_options(opts.reserved()) - .merge(&opts) - .with_read(true) - .map_with_path_builder(path_builder) - .map_err(|e| e.map_right(Into::into)) - .and_then(|arena| { - GenericOrderWal::replay(arena, opts, true, (), cks) - .map(|core| GenericOrderWalReader::new(Arc::new(core))) - .map_err(Either::Right) - }) - } - - /// Creates a new write-ahead log backed by a file backed memory map with the given options. - /// - /// ## Safety - /// - /// All file-backed memory map constructors are marked `unsafe` because of the potential for - /// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or - /// out of process. Applications must consider the risk and take appropriate precautions when - /// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g. - /// unlinked) files exist but are platform specific and limited. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::{GenericOrderWal, GenericBuilder, generic::*}; - /// - /// # let dir = tempfile::tempdir().unwrap(); - /// # let path = dir - /// # .path() - /// # .join("generic_wal_map_mut"); - /// - /// let mut wal = unsafe { - /// GenericBuilder::new() - /// .with_capacity(1024) - /// .with_create_new(true) - /// .with_read(true) - /// .with_write(true) - /// .map_mut::(&path) - /// .unwrap() - /// }; - /// ``` - #[inline] - pub unsafe fn map_mut>( - self, - path: P, - ) -> Result, Error> - where - K: Type + Ord + ?Sized + 'static, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: ?Sized + 'static, - { - self - .map_mut_with_path_builder::(|| dummy_path_builder(path)) - .map_err(|e| e.unwrap_right()) - } - - /// Returns a write-ahead log backed by a file backed memory map with the given options and [`Checksumer`]. - /// - /// ## Safety - /// - /// All file-backed memory map constructors are marked `unsafe` because of the potential for - /// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or - /// out of process. Applications must consider the risk and take appropriate precautions when - /// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g. - /// unlinked) files exist but are platform specific and limited. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::swmr::{GenericOrderWal, GenericBuilder, generic::*}; - /// - /// let dir = tempfile::tempdir().unwrap(); - /// - /// let mut wal = unsafe { - /// GenericBuilder::new() - /// .with_create_new(true) - /// .with_write(true) - /// .with_read(true) - /// .with_capacity(1024) - /// .map_mut_with_path_builder::( - /// || { - /// Ok(dir.path().join("generic_wal_map_mut_with_path_builder_and_checksumer")) - /// }, - /// ) - /// .unwrap() - /// }; - /// ``` - pub unsafe fn map_mut_with_path_builder( - self, - path_builder: PB, - ) -> Result, Either> - where - K: Type + Ord + ?Sized + 'static, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: ?Sized + 'static, - PB: FnOnce() -> Result, - { - let Self { opts, cks } = self; - let path = path_builder().map_err(Either::Left)?; - let exist = path.exists(); - let arena = arena_options(opts.reserved()) - .merge(&opts) - .map_mut_with_path_builder(|| Ok(path)) - .map_err(|e| e.map_right(Into::into))?; - - if !exist { - GenericOrderWal::new_in(arena, opts, (), cks) - } else { - GenericOrderWal::replay(arena, opts, false, (), cks) - } - .map(GenericOrderWal::from_core) - .map_err(Either::Right) - } -} diff --git a/src/swmr/generic/iter.rs b/src/swmr/generic/iter.rs deleted file mode 100644 index f7401bb..0000000 --- a/src/swmr/generic/iter.rs +++ /dev/null @@ -1,111 +0,0 @@ -use core::ops::Bound; - -use crossbeam_skiplist::Comparable; - -use super::{GenericEntryRef, GenericPointer as Pointer, KeyRef, Query, Type}; - -type SetRange<'a, Q, K, V> = crossbeam_skiplist::set::Range< - 'a, - Query<'a, K, Q>, - (Bound>, Bound>), - Pointer, ->; - -/// An iterator over the entries in the WAL. -pub struct Iter<'a, K: ?Sized, V: ?Sized> { - iter: crossbeam_skiplist::set::Iter<'a, Pointer>, -} - -impl<'a, K, V> Iter<'a, K, V> -where - K: ?Sized, - V: ?Sized, -{ - #[inline] - pub(super) fn new(iter: crossbeam_skiplist::set::Iter<'a, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, K, V> Iterator for Iter<'a, K, V> -where - K: Type + Ord + ?Sized, - for<'b> K::Ref<'b>: KeyRef<'b, K>, - V: ?Sized + Type, -{ - type Item = GenericEntryRef<'a, K, V>; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| GenericEntryRef::new(ptr)) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl DoubleEndedIterator for Iter<'_, K, V> -where - K: Type + Ord + ?Sized, - for<'b> K::Ref<'b>: KeyRef<'b, K>, - V: ?Sized + Type, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| GenericEntryRef::new(ptr)) - } -} - -/// An iterator over a subset of the entries in the WAL. -pub struct Range<'a, Q, K, V> -where - K: Type + Ord + ?Sized, - for<'b> K::Ref<'b>: KeyRef<'b, K>, - V: ?Sized, - Q: Ord + ?Sized + for<'b> Comparable>, -{ - iter: SetRange<'a, Q, K, V>, -} - -impl<'a, Q, K, V> Range<'a, Q, K, V> -where - K: Type + Ord + ?Sized, - for<'b> K::Ref<'b>: KeyRef<'b, K>, - V: ?Sized, - Q: Ord + ?Sized + for<'b> Comparable>, -{ - #[inline] - pub(super) fn new(iter: SetRange<'a, Q, K, V>) -> Self { - Self { iter } - } -} - -impl<'a, Q, K, V> Iterator for Range<'a, Q, K, V> -where - K: Type + Ord + ?Sized, - for<'b> K::Ref<'b>: KeyRef<'b, K>, - V: ?Sized + Type, - Q: Ord + ?Sized + for<'b> Comparable>, -{ - type Item = GenericEntryRef<'a, K, V>; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| GenericEntryRef::new(ptr)) - } -} - -impl DoubleEndedIterator for Range<'_, Q, K, V> -where - K: Type + Ord + ?Sized, - for<'b> K::Ref<'b>: KeyRef<'b, K>, - V: ?Sized + Type, - Q: Ord + ?Sized + for<'b> Comparable>, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| GenericEntryRef::new(ptr)) - } -} diff --git a/src/swmr/generic/reader.rs b/src/swmr/generic/reader.rs deleted file mode 100644 index d190239..0000000 --- a/src/swmr/generic/reader.rs +++ /dev/null @@ -1,207 +0,0 @@ -use core::ops::Bound; -use std::sync::Arc; - -use dbutils::equivalent::Comparable; -use rarena_allocator::Allocator; - -use super::{GenericEntryRef, GenericOrderWalCore, Iter, KeyRef, Range, Type, HEADER_SIZE}; - -/// A read-only view of a generic single-writer, multi-reader WAL. -pub struct GenericOrderWalReader(Arc>); - -impl Clone for GenericOrderWalReader -where - K: ?Sized, - V: ?Sized, -{ - fn clone(&self) -> Self { - Self(self.0.clone()) - } -} - -impl GenericOrderWalReader -where - K: ?Sized, - V: ?Sized, -{ - pub(super) fn new(wal: Arc>) -> Self { - Self(wal) - } - - /// Returns the path of the WAL if it is backed by a file. - #[inline] - pub fn path(&self) -> Option<&std::sync::Arc> { - self.0.arena.path() - } - - /// Returns the reserved space in the WAL. - /// - /// ## Safety - /// - The writer must ensure that the returned slice is not modified. - /// - This method is not thread-safe, so be careful when using it. - #[inline] - pub unsafe fn reserved_slice(&self) -> &[u8] { - if self.0.opts.reserved() == 0 { - return &[]; - } - - &self.0.arena.reserved_slice()[HEADER_SIZE..] - } - - /// Returns number of entries in the WAL. - #[inline] - pub fn len(&self) -> usize { - self.0.len() - } - - /// Returns `true` if the WAL is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - -impl GenericOrderWalReader -where - K: Type + Ord + ?Sized, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: ?Sized, -{ - /// Returns the first key-value pair in the map. The key in this pair is the minimum key in the wal. - #[inline] - pub fn first(&self) -> Option> - where - V: Type, - { - self.0.first() - } - - /// Returns the last key-value pair in the map. The key in this pair is the maximum key in the wal. - #[inline] - pub fn last(&self) -> Option> - where - V: Type, - { - self.0.last() - } - - /// Returns an iterator over the entries in the WAL. - #[inline] - pub fn iter(&self) -> Iter<'_, K, V> { - self.0.iter() - } - - /// Returns an iterator over a subset of the entries in the WAL. - #[inline] - pub fn range<'a, Q>( - &'a self, - start_bound: Bound<&'a Q>, - end_bound: Bound<&'a Q>, - ) -> Range<'a, Q, K, V> - where - Q: Ord + ?Sized + for<'b> Comparable>, - { - self.0.range(start_bound, end_bound) - } -} - -impl GenericOrderWalReader -where - K: Type + Ord + ?Sized, - for<'a> K::Ref<'a>: KeyRef<'a, K>, - V: ?Sized, -{ - /// Returns `true` if the key exists in the WAL. - #[inline] - pub fn contains_key<'a, Q>(&'a self, key: &Q) -> bool - where - Q: ?Sized + Ord + Comparable>, - { - self.0.contains_key(key) - } - - /// Returns `true` if the key exists in the WAL. - /// - /// ## Safety - /// - The given `key` must be valid to construct to `K::Ref` without remaining. - #[inline] - pub unsafe fn contains_key_by_bytes(&self, key: &[u8]) -> bool { - self.0.contains_key_by_bytes(key) - } - - /// Gets the value associated with the key. - #[inline] - pub fn get<'a, Q>(&'a self, key: &Q) -> Option> - where - Q: ?Sized + Ord + Comparable>, - V: Type, - { - self.0.get(key) - } - - /// Gets the value associated with the key. - /// - /// ## Safety - /// - The given `key` must be valid to construct to `K::Ref` without remaining. - #[inline] - pub unsafe fn get_by_bytes(&self, key: &[u8]) -> Option> - where - V: Type, - { - self.0.get_by_bytes(key) - } - - /// Returns a value associated to the highest element whose key is below the given bound. - /// If no such element is found then `None` is returned. - #[inline] - pub fn upper_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> - where - Q: ?Sized + Ord + Comparable>, - V: Type, - { - self.0.upper_bound(bound) - } - - /// Returns a value associated to the highest element whose key is below the given bound. - /// If no such element is found then `None` is returned. - /// - /// ## Safety - /// - The given `key` in `Bound` must be valid to construct to `K::Ref` without remaining. - #[inline] - pub unsafe fn upper_bound_by_bytes( - &self, - bound: Bound<&[u8]>, - ) -> Option> - where - V: Type, - { - self.0.upper_bound_by_bytes(bound) - } - - /// Returns a value associated to the lowest element whose key is above the given bound. - /// If no such element is found then `None` is returned. - #[inline] - pub fn lower_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> - where - Q: ?Sized + Ord + Comparable>, - V: Type, - { - self.0.lower_bound(bound) - } - - /// Returns a value associated to the lowest element whose key is above the given bound. - /// If no such element is found then `None` is returned. - /// - /// ## Safety - /// - The given `key` in `Bound` must be valid to construct to `K::Ref` without remaining. - #[inline] - pub unsafe fn lower_bound_by_bytes( - &self, - bound: Bound<&[u8]>, - ) -> Option> - where - V: Type, - { - self.0.lower_bound_by_bytes(bound) - } -} diff --git a/src/swmr/generic/tests.rs b/src/swmr/generic/tests.rs deleted file mode 100644 index 3a0c063..0000000 --- a/src/swmr/generic/tests.rs +++ /dev/null @@ -1,180 +0,0 @@ -use std::{collections::BTreeMap, thread::spawn}; - -use dbutils::leb128::{decode_u64_varint, encode_u64_varint, encoded_u64_varint_len}; -use tempfile::tempdir; - -use super::*; - -const MB: u32 = 1024 * 1024; - -#[cfg(all(test, any(test_swmr_generic_constructor, all_tests)))] -mod constructor; - -#[cfg(all(test, any(test_swmr_generic_insert, all_tests)))] -mod insert; - -#[cfg(all(test, any(test_swmr_generic_iters, all_tests)))] -mod iters; - -#[cfg(all(test, any(test_swmr_generic_get, all_tests)))] -mod get; - -#[doc(hidden)] -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub struct Person { - #[doc(hidden)] - pub id: u64, - #[doc(hidden)] - pub name: String, -} - -impl Person { - #[doc(hidden)] - #[cfg(test)] - pub fn random() -> Self { - Self { - id: rand::random(), - name: names::Generator::default().next().unwrap(), - } - } - - #[doc(hidden)] - pub fn as_ref(&self) -> PersonRef<'_> { - PersonRef { - id: self.id, - name: &self.name, - } - } - - #[doc(hidden)] - #[cfg(test)] - #[allow(dead_code)] - fn to_vec(&self) -> Vec { - let mut buf = vec![0; self.encoded_len()]; - self.encode(&mut buf).unwrap(); - buf - } -} - -#[doc(hidden)] -#[derive(Debug)] -pub struct PersonRef<'a> { - id: u64, - name: &'a str, -} - -impl PartialEq for PersonRef<'_> { - fn eq(&self, other: &Self) -> bool { - self.id == other.id && self.name == other.name - } -} - -impl Eq for PersonRef<'_> {} - -impl PartialOrd for PersonRef<'_> { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for PersonRef<'_> { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self - .id - .cmp(&other.id) - .then_with(|| self.name.cmp(other.name)) - } -} - -impl Equivalent for PersonRef<'_> { - fn equivalent(&self, key: &Person) -> bool { - self.id == key.id && self.name == key.name - } -} - -impl Comparable for PersonRef<'_> { - fn compare(&self, key: &Person) -> std::cmp::Ordering { - self.id.cmp(&key.id).then_with(|| self.name.cmp(&key.name)) - } -} - -impl Equivalent> for Person { - fn equivalent(&self, key: &PersonRef<'_>) -> bool { - self.id == key.id && self.name == key.name - } -} - -impl Comparable> for Person { - fn compare(&self, key: &PersonRef<'_>) -> std::cmp::Ordering { - self - .id - .cmp(&key.id) - .then_with(|| self.name.as_str().cmp(key.name)) - } -} - -impl KeyRef<'_, Person> for PersonRef<'_> { - fn compare(&self, a: &Q) -> cmp::Ordering - where - Q: ?Sized + Ord + Comparable, - { - Comparable::compare(a, self).reverse() - } - - unsafe fn compare_binary(this: &[u8], other: &[u8]) -> cmp::Ordering { - let (this_id_size, this_id) = decode_u64_varint(this).unwrap(); - let (other_id_size, other_id) = decode_u64_varint(other).unwrap(); - PersonRef { - id: this_id, - name: std::str::from_utf8(&this[this_id_size..]).unwrap(), - } - .cmp(&PersonRef { - id: other_id, - name: std::str::from_utf8(&other[other_id_size..]).unwrap(), - }) - } -} - -impl Type for Person { - type Ref<'a> = PersonRef<'a>; - type Error = dbutils::error::InsufficientBuffer; - - fn encoded_len(&self) -> usize { - encoded_u64_varint_len(self.id) + self.name.len() - } - - fn encode(&self, buf: &mut [u8]) -> Result { - let id_size = encode_u64_varint(self.id, buf)?; - buf[id_size..].copy_from_slice(self.name.as_bytes()); - Ok(id_size + self.name.len()) - } - - #[inline] - fn encode_to_buffer( - &self, - buf: &mut dbutils::buffer::VacantBuffer<'_>, - ) -> Result { - let id_size = buf.put_u64_varint(self.id)?; - buf.put_slice_unchecked(self.name.as_bytes()); - Ok(id_size + self.name.len()) - } -} - -impl<'a> TypeRef<'a> for PersonRef<'a> { - unsafe fn from_slice(src: &'a [u8]) -> Self { - let (id_size, id) = decode_u64_varint(src).unwrap(); - let name = std::str::from_utf8(&src[id_size..]).unwrap(); - PersonRef { id, name } - } -} - -impl PersonRef<'_> { - #[cfg(test)] - #[allow(dead_code)] - fn encode_into_vec(&self) -> Result, dbutils::error::InsufficientBuffer> { - let mut buf = vec![0; encoded_u64_varint_len(self.id) + self.name.len()]; - let id_size = encode_u64_varint(self.id, &mut buf)?; - buf[id_size..].copy_from_slice(self.name.as_bytes()); - Ok(buf) - } -} diff --git a/src/swmr/generic/tests/constructor.rs b/src/swmr/generic/tests/constructor.rs deleted file mode 100644 index f51bc1c..0000000 --- a/src/swmr/generic/tests/constructor.rs +++ /dev/null @@ -1,285 +0,0 @@ -use super::*; - -#[test] -#[allow(clippy::needless_borrows_for_generic_args)] -fn query_comparable() { - let p1 = Person { - id: 3127022870678870148, - name: "enthusiastic-magic".into(), - }; - let p2 = Person { - id: 9872687799307360216, - name: "damaged-friend".into(), - }; - - let p1bytes = p1.encode_into_vec().unwrap(); - let p2bytes = p2.encode_into_vec().unwrap(); - - let ptr1 = GenericPointer::::new(p1bytes.len(), 0, p1bytes.as_ptr()); - let ptr2 = GenericPointer::::new(p2bytes.len(), 0, p2bytes.as_ptr()); - - let map = SkipSet::new(); - map.insert(ptr1); - map.insert(ptr2); - - assert!(map.contains(&Query::new(&p1))); - assert!(map.get(&Query::new(&p1)).is_some()); - - assert!(map.contains(&Query::new(&p2))); - assert!(map.get(&Query::new(&p2)).is_some()); - - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - wal.insert(&p1, &"My name is Alice!".to_string()).unwrap(); - wal.insert(&p2, &"My name is Bob!".to_string()).unwrap(); - - assert!(wal.contains_key(&p1)); - assert_eq!(wal.get(&p1).unwrap().value(), "My name is Alice!"); - - assert!(wal.contains_key(&p2)); - assert_eq!(wal.get(&p2).unwrap().value(), "My name is Bob!"); -} - -#[test] -#[allow(clippy::needless_borrows_for_generic_args)] -fn construct_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - - let person = Person { - id: 1, - name: "Alice".to_string(), - }; - - assert!(wal.is_empty()); - - wal - .insert(&person, &"My name is Alice!".to_string()) - .unwrap(); - - let wal = wal.reader(); - - assert_eq!(wal.len(), 1); - assert!(!wal.is_empty()); -} - -#[test] -#[allow(clippy::needless_borrows_for_generic_args)] -fn construct_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - let person = Person { - id: 1, - name: "Alice".to_string(), - }; - - wal - .insert(&person, &"My name is Alice!".to_string()) - .unwrap(); -} - -#[test] -#[cfg_attr(miri, ignore)] -#[allow(clippy::needless_borrows_for_generic_args)] -fn construct_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_construct_map_file"); - - unsafe { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut::(&path) - .unwrap(); - let person = Person { - id: 1, - name: "Alice".to_string(), - }; - - wal - .insert(&person, &"My name is Alice!".to_string()) - .unwrap(); - assert_eq!(wal.get(&person).unwrap().value(), "My name is Alice!"); - assert_eq!(*wal.path().unwrap().as_ref(), path); - } - - let pr = PersonRef { - id: 1, - name: "Alice", - }; - - unsafe { - let wal = GenericBuilder::new() - .with_capacity(MB) - .with_create(true) - .with_read(true) - .with_write(true) - .map_mut::(&path) - .unwrap(); - assert_eq!(wal.get(&pr).unwrap().value(), "My name is Alice!"); - } - - let wal = unsafe { - GenericBuilder::new() - .map::(&path) - .unwrap() - }; - assert_eq!(wal.get(&pr).unwrap().value(), "My name is Alice!"); -} - -#[test] -fn construct_with_small_capacity_inmemory() { - let wal = GenericBuilder::new() - .with_capacity(1) - .alloc::(); - - assert!(wal.is_err()); - match wal { - Err(e) => println!("error: {:?}", e), - _ => panic!("unexpected error"), - } -} - -#[test] -fn construct_with_small_capacity_map_anon() { - let wal = GenericBuilder::new() - .with_capacity(1) - .map_anon::(); - - assert!(wal.is_err()); - match wal { - Err(e) => println!("error: {:?}", e), - _ => panic!("unexpected error"), - } -} - -#[test] -#[cfg_attr(miri, ignore)] -fn construct_with_small_capacity_map_file() { - let dir = tempdir().unwrap(); - let path = dir - .path() - .join("generic_wal_construct_with_small_capacity_map_file"); - - let wal = unsafe { - GenericBuilder::new() - .with_capacity(1) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut::(&path) - }; - - assert!(wal.is_err()); - match wal { - Err(e) => println!("{:?}", e), - _ => panic!("unexpected error"), - } -} - -fn zero_reserved(wal: &mut GenericOrderWal) { - unsafe { - assert_eq!(wal.reserved_slice(), &[]); - assert_eq!(wal.reserved_slice_mut(), &mut []); - - let wal = wal.reader(); - assert_eq!(wal.reserved_slice(), &[]); - } -} - -#[test] -fn zero_reserved_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - zero_reserved(&mut wal); -} - -#[test] -fn zero_reserved_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - zero_reserved(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn zero_reserved_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_zero_reserved_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut::(&path) - .unwrap() - }; - - zero_reserved(&mut wal); -} - -fn reserved(wal: &mut GenericOrderWal) { - unsafe { - let buf = wal.reserved_slice_mut(); - buf.copy_from_slice(b"al8n"); - assert_eq!(wal.reserved_slice(), b"al8n"); - assert_eq!(wal.reserved_slice_mut(), b"al8n"); - - let wal = wal.reader(); - assert_eq!(wal.reserved_slice(), b"al8n"); - } -} - -#[test] -fn reserved_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .with_reserved(4) - .alloc() - .unwrap(); - reserved(&mut wal); -} - -#[test] -fn reserved_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .with_reserved(4) - .map_anon() - .unwrap(); - reserved(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn reserved_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_reserved_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_reserved(4) - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut::(&path) - .unwrap() - }; - - reserved(&mut wal); -} diff --git a/src/swmr/generic/tests/get.rs b/src/swmr/generic/tests/get.rs deleted file mode 100644 index 3ad2ebb..0000000 --- a/src/swmr/generic/tests/get.rs +++ /dev/null @@ -1,562 +0,0 @@ -use super::*; - -fn first(wal: &mut GenericOrderWal) { - let people = (0..10) - .map(|_| { - let p = Person::random(); - let v = format!("My name is {}", p.name); - wal.insert(&p, &v).unwrap(); - - (p, v) - }) - .collect::>(); - - let ent = wal.first().unwrap(); - let (p, v) = people.first_key_value().unwrap(); - assert!(ent.key().equivalent(p)); - assert_eq!(ent.value(), v); - - let wal = wal.reader().clone(); - let ent = wal.first().unwrap(); - let (p, v) = people.first_key_value().unwrap(); - assert!(ent.key().equivalent(p)); - assert_eq!(ent.value(), v); -} - -#[test] -fn first_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - first(&mut wal); -} - -#[test] -fn first_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - first(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn first_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_first_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - first(&mut wal); -} - -fn last(wal: &mut GenericOrderWal) { - let people = (0..10) - .map(|_| { - let p = Person::random(); - let v = format!("My name is {}", p.name); - wal.insert(&p, &v).unwrap(); - - (p, v) - }) - .collect::>(); - - let ent = wal.last().unwrap(); - let (p, v) = people.last_key_value().unwrap(); - assert!(ent.key().equivalent(p)); - assert_eq!(ent.value(), v); - - let wal = wal.reader(); - let ent = wal.last().unwrap(); - assert!(ent.key().equivalent(p)); - assert_eq!(ent.value(), v); -} - -#[test] -fn last_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - last(&mut wal); -} - -#[test] -fn last_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - last(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn last_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_last_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - last(&mut wal); -} - -#[allow(clippy::needless_borrows_for_generic_args)] -fn get_or_insert(wal: &mut GenericOrderWal) { - let people = (0..100) - .map(|_| { - let p = Person::random(); - let v = format!("My name is {}", p.name); - wal.get_or_insert(&p, &v).unwrap_right().unwrap(); - (p, v) - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for (p, pv) in &people { - assert!(wal.contains_key(p)); - - assert_eq!( - wal - .get_or_insert(p, &format!("Hello! {}!", p.name)) - .unwrap_left() - .value(), - pv - ); - } - - for (p, _) in &people { - assert!(wal.contains_key(p)); - } -} - -#[test] -fn get_or_insert_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - get_or_insert(&mut wal); -} - -#[test] -fn get_or_insert_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - get_or_insert(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn get_or_insert_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_get_or_insert_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - get_or_insert(&mut wal); -} - -fn get_or_insert_with(wal: &mut GenericOrderWal) { - let people = (0..100) - .map(|_| { - let p = Person::random(); - let v = format!("My name is {}", p.name); - wal - .get_or_insert_with(&p, || (&v).into()) - .unwrap_right() - .unwrap(); - (p, v) - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for (p, pv) in &people { - assert!(wal.contains_key(p)); - assert_eq!( - wal - .get_or_insert_with(p, || (&format!("Hello! {}!", p.name)).into()) - .unwrap_left() - .value(), - pv - ); - } - - for (p, _) in &people { - assert!(wal.contains_key(p)); - assert!(wal.contains_key(&p.as_ref())); - } -} - -#[test] -fn get_or_insert_with_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - get_or_insert_with(&mut wal); -} - -#[test] -fn get_or_insert_with_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - get_or_insert_with(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn get_or_insert_with_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_get_or_insert_with_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - get_or_insert_with(&mut wal); -} - -#[allow(clippy::needless_borrows_for_generic_args)] -fn get_or_insert_key_with_value_bytes(wal: &mut GenericOrderWal) { - let people = (0..100) - .map(|_| { - let p = Person::random(); - let pvec = p.to_vec(); - let v = format!("My name is {}", p.name); - unsafe { - wal - .get_or_insert(Generic::from_slice(pvec.as_ref()), &v) - .unwrap_right() - .unwrap(); - } - (p, v) - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for (p, pv) in &people { - assert!(wal.contains_key(p)); - - assert_eq!( - wal - .get_or_insert(p, &format!("Hello! {}!", p.name)) - .unwrap_left() - .value(), - pv - ); - } - - for (p, _) in &people { - assert!(wal.contains_key(p)); - } -} - -#[test] -fn get_or_insert_key_with_value_bytes_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - get_or_insert_key_with_value_bytes(&mut wal); -} - -#[test] -fn get_or_insert_key_with_value_bytes_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - get_or_insert_key_with_value_bytes(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn get_or_insert_key_with_value_bytes_map_file() { - let dir = tempdir().unwrap(); - let path = dir - .path() - .join("generic_wal_get_or_insert_key_with_value_bytes_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - get_or_insert_key_with_value_bytes(&mut wal); -} - -fn get_or_insert_value_bytes(wal: &mut GenericOrderWal) { - let people = (0..100) - .map(|_| { - let p = Person::random(); - let v = format!("My name is {}", p.name); - unsafe { - wal - .get_or_insert(&p, Generic::from_slice(v.as_bytes())) - .unwrap_right() - .unwrap(); - } - (p, v) - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for (p, pv) in &people { - assert!(wal.contains_key(p)); - assert!(wal.contains_key(&p.as_ref())); - unsafe { - assert_eq!( - wal - .get_or_insert(p, Generic::from_slice(pv.as_bytes())) - .unwrap_left() - .value(), - pv - ); - } - } - - for (p, _) in &people { - assert!(wal.contains_key(p)); - } -} - -#[test] -fn get_or_insert_value_bytes_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - get_or_insert_value_bytes(&mut wal); -} - -#[test] -fn get_or_insert_value_bytes_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - get_or_insert_value_bytes(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn get_or_insert_value_bytes_map_file() { - let dir = tempdir().unwrap(); - let path = dir - .path() - .join("generic_wal_get_or_insert_value_bytes_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - get_or_insert_value_bytes(&mut wal); -} - -fn get_by_bytes_or_insert_with(wal: &mut GenericOrderWal) { - let people = (0..100) - .map(|_| { - let p = Person::random(); - let pvec = p.to_vec(); - let v = format!("My name is {}", p.name); - unsafe { - wal - .get_or_insert_with(Generic::from_slice(pvec.as_ref()), || v.clone()) - .unwrap_right() - .unwrap(); - } - (p, pvec, v) - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for (p, pvec, pv) in &people { - assert!(wal.contains_key(p)); - unsafe { - assert_eq!( - wal - .get_or_insert_with(Generic::from_slice(pvec), || format!("Hello! {}!", p.name)) - .unwrap_left() - .value(), - pv - ); - } - } - - for (p, _, _) in &people { - assert!(wal.contains_key(p)); - } -} - -#[test] -fn get_by_bytes_or_insert_with_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - get_by_bytes_or_insert_with(&mut wal); -} - -#[test] -fn get_by_bytes_or_insert_with_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - get_by_bytes_or_insert_with(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn get_by_bytes_or_insert_with_map_file() { - let dir = tempdir().unwrap(); - let path = dir - .path() - .join("generic_wal_get_by_bytes_or_insert_with_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - get_by_bytes_or_insert_with(&mut wal); -} - -fn get_by_bytes_or_insert_bytes(wal: &mut GenericOrderWal) { - let people = (0..100) - .map(|_| { - let p = Person::random(); - let pvec = p.to_vec(); - let v = format!("My name is {}", p.name); - unsafe { - wal - .get_or_insert( - Generic::from_slice(pvec.as_ref()), - Generic::from_slice(v.as_bytes()), - ) - .unwrap_right() - .unwrap(); - } - (p, pvec, v) - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for (p, pvec, pv) in &people { - assert!(wal.contains_key(p)); - unsafe { - assert_eq!( - wal - .get_or_insert( - Generic::from_slice(pvec), - Generic::from_slice(pv.as_bytes()) - ) - .unwrap_left() - .value(), - pv - ); - } - } - - for (p, _, _) in &people { - assert!(wal.contains_key(p)); - } -} - -#[test] -fn get_by_bytes_or_insert_bytes_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - get_by_bytes_or_insert_bytes(&mut wal); -} - -#[test] -fn get_by_bytes_or_insert_bytes_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - get_by_bytes_or_insert_bytes(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn get_by_bytes_or_insert_bytes_map_file() { - let dir = tempdir().unwrap(); - let path = dir - .path() - .join("generic_wal_get_by_bytes_or_insert_bytes_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - get_by_bytes_or_insert_bytes(&mut wal); -} diff --git a/src/swmr/generic/tests/insert.rs b/src/swmr/generic/tests/insert.rs deleted file mode 100644 index d63f543..0000000 --- a/src/swmr/generic/tests/insert.rs +++ /dev/null @@ -1,1158 +0,0 @@ -use super::*; - -fn insert_to_full(wal: &mut GenericOrderWal) { - let mut full = false; - for _ in 0u32.. { - let p = Person::random(); - #[allow(clippy::needless_borrows_for_generic_args)] - match wal.insert(&p, &format!("My name is {}", p.name)) { - Ok(_) => {} - Err(e) => match e { - Among::Right(Error::InsufficientSpace { .. }) => { - full = true; - break; - } - _ => panic!("unexpected error"), - }, - } - } - assert!(full); -} - -#[test] -fn insert_to_full_inmemory() { - let mut wal = GenericBuilder::new().with_capacity(100).alloc().unwrap(); - insert_to_full(&mut wal); -} - -#[test] -fn insert_to_full_map_anon() { - let mut wal = GenericBuilder::new().with_capacity(100).map_anon().unwrap(); - insert_to_full(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn insert_to_full_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_insert_to_full_map_file"); - - unsafe { - let mut wal = GenericBuilder::new() - .with_capacity(100) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap(); - insert_to_full(&mut wal); - } -} - -fn insert(wal: &mut GenericOrderWal) -> Vec { - let people = (0..100) - .map(|_| { - let p = Person::random(); - #[allow(clippy::needless_borrows_for_generic_args)] - wal.insert(&p, &format!("My name is {}", p.name)).unwrap(); - p - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for p in &people { - assert!(wal.contains_key(p)); - assert_eq!( - wal.get(p).unwrap().value(), - &format!("My name is {}", p.name) - ); - } - - people -} - -#[test] -fn insert_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - insert(&mut wal); -} - -#[test] -fn insert_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - insert(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn insert_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_insert_map_file"); - - let people = unsafe { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap(); - insert(&mut wal) - }; - - let wal = unsafe { - GenericBuilder::new() - .map::(&path) - .unwrap() - }; - - for p in people { - assert!(wal.contains_key(&p)); - assert_eq!( - wal.get(&p).unwrap().value(), - &format!("My name is {}", p.name) - ); - } -} - -fn insert_with_key_builder(wal: &mut GenericOrderWal) -> Vec { - let people = (0..100) - .map(|_| unsafe { - let p = Person::random(); - wal - .insert_with_key_builder( - KeyBuilder::new(p.encoded_len() as u32, |buf: &mut VacantBuffer<'_>| { - p.encode_to_buffer(buf).map(|_| ()) - }), - &format!("My name is {}", p.name), - ) - .unwrap(); - p - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for p in &people { - assert!(wal.contains_key(p)); - assert_eq!( - wal.get(p).unwrap().value(), - &format!("My name is {}", p.name) - ); - } - - people -} - -#[test] -fn insert_with_key_builder_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - insert_with_key_builder(&mut wal); -} - -#[test] -fn insert_with_key_builder_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - insert_with_key_builder(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn insert_with_key_builder_map_file() { - let dir = tempdir().unwrap(); - let path = dir - .path() - .join("generic_wal_insert_with_key_builder_map_file"); - - let people = unsafe { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap(); - insert_with_key_builder(&mut wal) - }; - - let wal = unsafe { - GenericBuilder::new() - .map::(&path) - .unwrap() - }; - - for p in people { - assert!(wal.contains_key(&p)); - assert_eq!( - wal.get(&p).unwrap().value(), - &format!("My name is {}", p.name) - ); - } -} - -fn insert_with_value_builder(wal: &mut GenericOrderWal) -> Vec { - let people = (0..100) - .map(|_| unsafe { - let p = Person::random(); - let v = format!("My name is {}", p.name); - - wal - .insert_with_value_builder( - &p, - ValueBuilder::new(v.len() as u32, |buf: &mut VacantBuffer<'_>| { - buf.put_slice(v.as_bytes()) - }), - ) - .unwrap(); - p - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for p in &people { - assert!(wal.contains_key(p)); - assert_eq!( - wal.get(p).unwrap().value(), - &format!("My name is {}", p.name) - ); - } - - people -} - -#[test] -fn insert_with_value_builder_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - insert_with_value_builder(&mut wal); -} - -#[test] -fn insert_with_value_builder_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - insert_with_value_builder(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn insert_with_value_builder_map_file() { - let dir = tempdir().unwrap(); - let path = dir - .path() - .join("generic_wal_insert_with_value_builder_map_file"); - - let people = unsafe { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap(); - insert_with_value_builder(&mut wal) - }; - - let wal = unsafe { - GenericBuilder::new() - .map::(&path) - .unwrap() - }; - - for p in people { - assert!(wal.contains_key(&p)); - assert_eq!( - wal.get(&p).unwrap().value(), - &format!("My name is {}", p.name) - ); - } -} - -fn insert_with_builders(wal: &mut GenericOrderWal) -> Vec { - let people = (0..100) - .map(|_| unsafe { - let p = Person::random(); - let v = format!("My name is {}", p.name); - wal - .insert_with_builders( - KeyBuilder::new(p.encoded_len() as u32, |buf: &mut VacantBuffer<'_>| { - buf.set_len(p.encoded_len()); - p.encode(buf).map(|_| ()) - }), - ValueBuilder::new(v.len() as u32, |buf: &mut VacantBuffer<'_>| { - buf.put_slice(v.as_bytes()) - }), - ) - .unwrap(); - p - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for p in &people { - assert!(wal.contains_key(p)); - assert_eq!( - wal.get(p).unwrap().value(), - &format!("My name is {}", p.name) - ); - } - - people -} - -#[test] -fn insert_with_builders_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - insert_with_builders(&mut wal); -} - -#[test] -fn insert_with_builders_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - insert_with_builders(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn insert_with_builders_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_insert_with_builders_map_file"); - - let people = unsafe { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap(); - insert_with_builders(&mut wal) - }; - - let wal = unsafe { - GenericBuilder::new() - .map::(&path) - .unwrap() - }; - - for p in people { - assert!(wal.contains_key(&p)); - assert_eq!( - wal.get(&p).unwrap().value(), - &format!("My name is {}", p.name) - ); - } -} - -fn insert_key_bytes_with_value( - wal: &mut GenericOrderWal, -) -> Vec<(Vec, Person)> { - let people = (0..100) - .map(|_| { - let p = Person::random(); - let pbytes = p.to_vec(); - unsafe { - wal - .insert( - Generic::from_slice(&pbytes), - &format!("My name is {}", p.name), - ) - .unwrap(); - } - (pbytes, p) - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for (pbytes, p) in &people { - assert!(wal.contains_key(p)); - unsafe { - assert!(wal.contains_key_by_bytes(pbytes)); - } - assert_eq!( - wal.get(p).unwrap().value(), - &format!("My name is {}", p.name) - ); - - assert_eq!( - unsafe { wal.get_by_bytes(pbytes).unwrap().value() }, - &format!("My name is {}", p.name) - ); - } - - people -} - -#[test] -fn insert_key_bytes_with_value_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - insert_key_bytes_with_value(&mut wal); -} - -#[test] -fn insert_key_bytes_with_value_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - insert_key_bytes_with_value(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn insert_key_bytes_with_value_map_file() { - let dir = tempdir().unwrap(); - let path = dir - .path() - .join("generic_wal_insert_key_bytes_with_value_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - let people = insert_key_bytes_with_value(&mut wal); - - let wal = wal.reader(); - - for (pbytes, p) in &people { - assert!(wal.contains_key(p)); - unsafe { - assert!(wal.contains_key_by_bytes(pbytes)); - } - assert_eq!( - wal.get(p).unwrap().value(), - &format!("My name is {}", p.name) - ); - assert_eq!( - unsafe { wal.get_by_bytes(pbytes).unwrap().value() }, - &format!("My name is {}", p.name) - ); - } - - let wal = unsafe { - GenericBuilder::new() - .map::(&path) - .unwrap() - }; - - for (pbytes, p) in people { - assert!(wal.contains_key(&p)); - unsafe { - assert!(wal.contains_key_by_bytes(&pbytes)); - } - assert_eq!( - wal.get(&p).unwrap().value(), - &format!("My name is {}", p.name) - ); - assert_eq!( - unsafe { wal.get_by_bytes(&pbytes).unwrap().value() }, - &format!("My name is {}", p.name) - ); - } -} - -fn insert_key_with_value_bytes(wal: &mut GenericOrderWal) -> Vec { - let people = (0..100) - .map(|_| { - let p = Person::random(); - unsafe { - wal - .insert( - &p, - Generic::from_slice(format!("My name is {}", p.name).as_bytes()), - ) - .unwrap(); - } - p - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for p in &people { - assert!(wal.contains_key(p)); - assert_eq!( - wal.get(p).unwrap().value(), - &format!("My name is {}", p.name) - ); - } - - people -} - -#[test] -fn insert_key_with_value_bytes_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - insert_key_with_value_bytes(&mut wal); -} - -#[test] -fn insert_key_with_value_bytes_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - insert_key_with_value_bytes(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn insert_key_with_value_bytes_map_file() { - let dir = tempdir().unwrap(); - let path = dir - .path() - .join("generic_wal_insert_key_with_value_bytes_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - let people = insert_key_with_value_bytes(&mut wal); - let wal = wal.reader(); - - for p in &people { - assert!(wal.contains_key(p)); - assert_eq!( - wal.get(p).unwrap().value(), - &format!("My name is {}", p.name) - ); - } -} - -fn insert_bytes(wal: &mut GenericOrderWal) -> Vec { - let people = (0..100) - .map(|_| { - let p = Person::random(); - let pbytes = p.to_vec(); - unsafe { - wal - .insert( - Generic::from_slice(&pbytes), - Generic::from_slice(format!("My name is {}", p.name).as_bytes()), - ) - .unwrap(); - } - p - }) - .collect::>(); - - assert_eq!(wal.len(), 100); - - for p in &people { - assert!(wal.contains_key(p)); - unsafe { - assert!(wal.contains_key_by_bytes(&p.to_vec())); - } - assert_eq!( - wal.get(p).unwrap().value(), - &format!("My name is {}", p.name) - ); - } - - people -} - -#[test] -fn insert_bytes_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - insert_bytes(&mut wal); -} - -#[test] -fn insert_bytes_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - insert_bytes(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn insert_bytes_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_insert_bytes_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - let people = insert_bytes(&mut wal); - - let wal = wal.reader(); - - for p in &people { - assert!(wal.contains_key(p)); - unsafe { - assert!(wal.contains_key_by_bytes(&p.to_vec())); - } - assert_eq!( - wal.get(p).unwrap().value(), - &format!("My name is {}", p.name) - ); - } -} - -fn concurrent_basic(mut w: GenericOrderWal) { - let readers = (0..100u32).map(|i| (i, w.reader())).collect::>(); - - let handles = readers.into_iter().map(|(i, reader)| { - spawn(move || loop { - if let Some(p) = reader.get(&i) { - assert_eq!(p.key(), &i); - assert_eq!(p.value(), &i.to_le_bytes()); - break; - } - }) - }); - - spawn(move || { - for i in 0..100u32 { - #[allow(clippy::needless_borrows_for_generic_args)] - w.insert(&i, &i.to_le_bytes()).unwrap(); - } - }); - - for handle in handles { - handle.join().unwrap(); - } -} - -#[test] -fn concurrent_basic_inmemory() { - let wal = GenericBuilder::new().with_capacity(MB).alloc().unwrap(); - concurrent_basic(wal); -} - -#[test] -fn concurrent_basic_map_anon() { - let wal = GenericBuilder::new().with_capacity(MB).map_anon().unwrap(); - concurrent_basic(wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn concurrent_basic_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_concurrent_basic_map_file"); - - let wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - concurrent_basic(wal); - - let wal = unsafe { GenericBuilder::new().map::(path).unwrap() }; - - for i in 0..100u32 { - assert!(wal.contains_key(&i)); - } -} - -fn concurrent_one_key(mut w: GenericOrderWal) { - let readers = (0..100u32).map(|i| (i, w.reader())).collect::>(); - let handles = readers.into_iter().map(|(_, reader)| { - spawn(move || loop { - if let Some(p) = reader.get(&1) { - assert_eq!(p.key(), &1); - assert_eq!(p.value(), &1u32.to_le_bytes()); - break; - } - }) - }); - - w.insert(&1, &1u32.to_le_bytes()).unwrap(); - - for handle in handles { - handle.join().unwrap(); - } -} - -#[test] -fn concurrent_one_key_inmemory() { - let wal = GenericBuilder::new().with_capacity(MB).alloc().unwrap(); - concurrent_one_key(wal); -} - -#[test] -fn concurrent_one_key_map_anon() { - let wal = GenericBuilder::new().with_capacity(MB).map_anon().unwrap(); - concurrent_one_key(wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn concurrent_one_key_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_concurrent_basic_map_file"); - - let wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - concurrent_one_key(wal); - - let wal = unsafe { GenericBuilder::new().map::(path).unwrap() }; - - assert!(wal.contains_key(&1)); -} - -fn insert_batch( - wal: &mut GenericOrderWal, -) -> (Person, Vec<(Person, String)>, Person) { - const N: u32 = 5; - - let mut batch = vec![]; - let output = (0..N) - .map(|i| { - ( - { - let mut p = Person::random(); - p.id = i as u64; - p - }, - format!("My id is {i}"), - ) - .clone() - }) - .collect::>(); - - for (person, val) in output.iter() { - if person.id % 2 == 0 { - batch.push(GenericEntry::new(person, val)); - } else { - unsafe { - batch.push(GenericEntry::new( - person, - Generic::from_slice(val.as_bytes()), - )); - } - } - } - - let rp1 = Person::random(); - wal.insert(&rp1, &"rp1".to_string()).unwrap(); - wal.insert_batch(&mut batch).unwrap(); - let rp2 = Person::random(); - wal.insert(&rp2, &"rp2".to_string()).unwrap(); - - for (p, val) in output.iter() { - assert_eq!(wal.get(p).unwrap().value(), val); - } - - assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); - assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); - - let wal = wal.reader(); - for (p, val) in output.iter() { - assert_eq!(wal.get(p).unwrap().value(), val); - } - - assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); - assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); - - (rp1, output, rp2) -} - -#[test] -fn test_insert_batch_inmemory() { - insert_batch(&mut GenericBuilder::new().with_capacity(MB).alloc().unwrap()); -} - -#[test] -fn test_insert_batch_map_anon() { - insert_batch(&mut GenericBuilder::new().with_capacity(MB).map_anon().unwrap()); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn test_insert_batch_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - let path = dir.path().join(concat!( - "test_", - stringify!($prefix), - "_insert_batch_map_file" - )); - let mut map = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - let (rp1, data, rp2) = insert_batch(&mut map); - - let map = unsafe { - GenericBuilder::new() - .map::(&path) - .unwrap() - }; - - for (p, val) in data { - assert_eq!(map.get(&p).unwrap().value(), &val); - } - assert_eq!(map.get(&rp1).unwrap().value(), "rp1"); - assert_eq!(map.get(&rp2).unwrap().value(), "rp2"); -} - -fn insert_batch_with_key_builder( - wal: &mut GenericOrderWal, -) -> (Person, Vec<(Person, String)>, Person) { - const N: u32 = 5; - - let mut batch = vec![]; - let output = (0..N) - .map(|i| { - ( - { - let mut p = Person::random(); - p.id = i as u64; - p - }, - format!("My id is {i}"), - ) - .clone() - }) - .collect::>(); - - for (person, val) in output.iter() { - batch.push(EntryWithKeyBuilder::new( - KeyBuilder::new(person.encoded_len() as u32, |buf: &mut VacantBuffer<'_>| { - buf.set_len(person.encoded_len()); - person.encode(buf).map(|_| ()) - }), - Generic::from(val), - )); - } - - let rp1 = Person::random(); - wal.insert(&rp1, &"rp1".to_string()).unwrap(); - wal.insert_batch_with_key_builder(&mut batch).unwrap(); - let rp2 = Person::random(); - wal.insert(&rp2, &"rp2".to_string()).unwrap(); - - for (p, val) in output.iter() { - assert_eq!(wal.get(p).unwrap().value(), val); - } - - assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); - assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); - - let wal = wal.reader(); - for (p, val) in output.iter() { - assert_eq!(wal.get(p).unwrap().value(), val); - } - - assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); - assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); - - (rp1, output, rp2) -} - -#[test] -fn test_insert_batch_with_key_builder_inmemory() { - insert_batch_with_key_builder(&mut GenericBuilder::new().with_capacity(MB).alloc().unwrap()); -} - -#[test] -fn test_insert_batch_with_key_builder_map_anon() { - insert_batch_with_key_builder(&mut GenericBuilder::new().with_capacity(MB).map_anon().unwrap()); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn test_insert_batch_with_key_builder_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - let path = dir.path().join(concat!( - "test_", - stringify!($prefix), - "_insert_batch_with_key_builder_map_file" - )); - let mut map = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - let (rp1, data, rp2) = insert_batch_with_key_builder(&mut map); - - let map = unsafe { - GenericBuilder::new() - .map::(&path) - .unwrap() - }; - - for (p, val) in data { - assert_eq!(map.get(&p).unwrap().value(), &val); - } - assert_eq!(map.get(&rp1).unwrap().value(), "rp1"); - assert_eq!(map.get(&rp2).unwrap().value(), "rp2"); -} - -fn insert_batch_with_value_builder( - wal: &mut GenericOrderWal, -) -> (Person, Vec<(Person, String)>, Person) { - const N: u32 = 5; - - let mut batch = vec![]; - let output = (0..N) - .map(|i| { - ( - { - let mut p = Person::random(); - p.id = i as u64; - p - }, - format!("My id is {i}"), - ) - .clone() - }) - .collect::>(); - - for (person, val) in output.iter() { - batch.push(EntryWithValueBuilder::new( - person.into(), - ValueBuilder::new(val.len() as u32, |buf: &mut VacantBuffer<'_>| { - buf.put_slice(val.as_bytes()) - }), - )); - } - - let rp1 = Person::random(); - wal.insert(&rp1, &"rp1".to_string()).unwrap(); - wal.insert_batch_with_value_builder(&mut batch).unwrap(); - let rp2 = Person::random(); - wal.insert(&rp2, &"rp2".to_string()).unwrap(); - - for (p, val) in output.iter() { - assert_eq!(wal.get(p).unwrap().value(), val); - } - - assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); - assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); - - let wal = wal.reader(); - for (p, val) in output.iter() { - assert_eq!(wal.get(p).unwrap().value(), val); - } - - assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); - assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); - - (rp1, output, rp2) -} - -#[test] -fn test_insert_batch_with_value_builder_inmemory() { - insert_batch_with_value_builder(&mut GenericBuilder::new().with_capacity(MB).alloc().unwrap()); -} - -#[test] -fn test_insert_batch_with_value_builder_map_anon() { - insert_batch_with_value_builder(&mut GenericBuilder::new().with_capacity(MB).map_anon().unwrap()); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn test_insert_batch_with_value_builder_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - let path = dir.path().join(concat!( - "test_", - stringify!($prefix), - "_insert_batch_with_value_builder_map_file" - )); - let mut map = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - let (rp1, data, rp2) = insert_batch_with_value_builder(&mut map); - - let map = unsafe { - GenericBuilder::new() - .map::(&path) - .unwrap() - }; - - for (p, val) in data { - assert_eq!(map.get(&p).unwrap().value(), &val); - } - assert_eq!(map.get(&rp1).unwrap().value(), "rp1"); - assert_eq!(map.get(&rp2).unwrap().value(), "rp2"); -} - -fn insert_batch_with_builders( - wal: &mut GenericOrderWal, -) -> (Person, Vec<(Person, String)>, Person) { - const N: u32 = 5; - - let mut batch = vec![]; - let output = (0..N) - .map(|i| { - ( - { - let mut p = Person::random(); - p.id = i as u64; - p - }, - format!("My id is {i}"), - ) - .clone() - }) - .collect::>(); - - for (person, val) in output.iter() { - batch.push(EntryWithBuilders::new( - KeyBuilder::new(person.encoded_len() as u32, |buf: &mut VacantBuffer<'_>| { - buf.set_len(person.encoded_len()); - person.encode(buf).map(|_| ()) - }), - ValueBuilder::new(val.len() as u32, |buf: &mut VacantBuffer<'_>| { - buf.put_slice(val.as_bytes()) - }), - )); - } - - let rp1 = Person::random(); - wal.insert(&rp1, &"rp1".to_string()).unwrap(); - wal.insert_batch_with_builders(&mut batch).unwrap(); - let rp2 = Person::random(); - wal.insert(&rp2, &"rp2".to_string()).unwrap(); - - for (p, val) in output.iter() { - assert_eq!(wal.get(p).unwrap().value(), val); - } - - assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); - assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); - - let wal = wal.reader(); - for (p, val) in output.iter() { - assert_eq!(wal.get(p).unwrap().value(), val); - } - - assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); - assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); - - (rp1, output, rp2) -} - -#[test] -fn test_insert_batch_with_builders_inmemory() { - insert_batch_with_builders(&mut GenericBuilder::new().with_capacity(MB).alloc().unwrap()); -} - -#[test] -fn test_insert_batch_with_builders_map_anon() { - insert_batch_with_builders(&mut GenericBuilder::new().with_capacity(MB).map_anon().unwrap()); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn test_insert_batch_with_builders_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - let path = dir.path().join(concat!( - "test_", - stringify!($prefix), - "_insert_batch_with_builders_map_file" - )); - let mut map = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; - - let (rp1, data, rp2) = insert_batch_with_builders(&mut map); - - let map = unsafe { - GenericBuilder::new() - .map::(&path) - .unwrap() - }; - - for (p, val) in data { - assert_eq!(map.get(&p).unwrap().value(), &val); - } - assert_eq!(map.get(&rp1).unwrap().value(), "rp1"); - assert_eq!(map.get(&rp2).unwrap().value(), "rp2"); -} diff --git a/src/swmr/reader.rs b/src/swmr/reader.rs new file mode 100644 index 0000000..92965cf --- /dev/null +++ b/src/swmr/reader.rs @@ -0,0 +1,65 @@ +use std::sync::Arc; + +use rarena_allocator::sync::Arena; + +use crate::{ + memtable::BaseTable, + sealed::{Constructable, Immutable}, + swmr::wal::OrderCore, +}; + +use super::writer::OrderWal; + +/// An [`OrderWal`] reader. +pub struct OrderWalReader(OrderWal); + +impl core::fmt::Debug for OrderWalReader +where + K: ?Sized, + V: ?Sized, +{ + #[inline] + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("OrderWalReader").field(&self.0.core).finish() + } +} + +impl Immutable for OrderWalReader {} + +impl OrderWalReader +where + K: ?Sized, + V: ?Sized, +{ + /// Creates a new read-only WAL reader. + #[inline] + pub(super) fn new(wal: Arc>) -> Self { + Self(OrderWal::construct(wal)) + } +} + +impl Constructable for OrderWalReader +where + K: ?Sized + 'static, + V: ?Sized + 'static, + S: 'static, + M: BaseTable + 'static, +{ + type Allocator = Arena; + type Wal = OrderCore; + type Memtable = M; + type Checksumer = S; + type Reader = OrderWalReader; + + #[inline] + fn as_wal(&self) -> &Self::Wal { + self.0.as_wal() + } + + #[inline] + fn from_core(core: Self::Wal) -> Self { + Self(OrderWal { + core: Arc::new(core), + }) + } +} diff --git a/src/swmr/tests.rs b/src/swmr/tests.rs new file mode 100644 index 0000000..4f64104 --- /dev/null +++ b/src/swmr/tests.rs @@ -0,0 +1,344 @@ +use core::cmp; +#[cfg(feature = "std")] +use std::thread::spawn; + +use std::{ + string::{String, ToString}, + vec, + vec::Vec, +}; + +use base::{AlternativeTable, OrderWal, OrderWalReader}; +use dbutils::{ + equivalent::{Comparable, Equivalent}, + leb128::{decode_u64_varint, encode_u64_varint, encoded_u64_varint_len}, + types::{KeyRef, Type, TypeRef}, +}; + +use super::*; + +const MB: u32 = 1024 * 1024; + +macro_rules! expand_unit_tests { + ($prefix:literal: $wal:ty [$memtable_opts:expr]: $table:ty { $($name:ident $({ $($tt:tt)* })?), +$(,)? }) => { + $( + paste::paste! { + #[test] + fn [< test_ $prefix _ $name _inmemory >]() { + $name(&mut $crate::Builder::<$table>::new() + .with_capacity(MB) + .with_memtable_options($memtable_opts) + .alloc::<$wal>() + .unwrap() + ); + } + + #[test] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + fn [< test_ $prefix _ $name _map_anon >]() { + $name(&mut $crate::Builder::<$table>::new() + .with_capacity(MB) + .with_memtable_options($memtable_opts) + .map_anon::<$wal>().unwrap() + ); + } + + #[test] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(miri, ignore)] + fn [< test_ $prefix _ $name _map_file >]() { + let dir = ::tempfile::tempdir().unwrap(); + $name( + &mut unsafe { + $crate::Builder::<$table>::new() + .with_create_new(true) + .with_read(true) + .with_write(true) + .with_capacity(MB as u32) + .with_memtable_options($memtable_opts) + .map_mut::<$wal, _>( + dir.path().join(concat!("test_", $prefix, "_", stringify!($name), "_map_file") + ), + ) + .unwrap() }, + ); + } + } + )* + }; + (move $prefix:literal: $wal:ty [$memtable_opts:expr]: $table:ty { $($name:ident $($block:expr)?), +$(,)? }) => { + $( + paste::paste! { + #[test] + fn [< test_ $prefix _ $name _inmemory >]() { + $name($crate::Builder::<$table>::new().with_memtable_options($memtable_opts).with_capacity(MB).alloc::<$wal>().unwrap()); + } + + #[test] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + fn [< test_ $prefix _ $name _map_anon >]() { + $name($crate::Builder::<$table>::new().with_memtable_options($memtable_opts).with_capacity(MB).map_anon::<$wal>().unwrap()); + } + + #[test] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(miri, ignore)] + fn [< test_ $prefix _ $name _map_file >]() { + let dir = ::tempfile::tempdir().unwrap(); + let p = dir.path().join(concat!("test_", $prefix, "_", stringify!($name), "_map_file")); + let wal = unsafe { + $crate::Builder::<$table>::new() + .with_memtable_options($memtable_opts) + .with_create_new(true) + .with_read(true) + .with_write(true) + .with_capacity(MB as u32) + .map_mut::<$wal, _>( + &p, + ) + .unwrap() + }; + + let res = $name(wal); + + $( + { + let f = |p, res| { $block(p, res) }; + f(p, res); + } + )? + } + } + )* + }; + ($prefix:literal: $wal:ty [$memtable_opts:expr]: $table:ty { $($name:ident($builder:expr) $({ $($tt:tt)* })?), +$(,)? }) => { + $( + paste::paste! { + #[test] + fn [< test_ $prefix _ $name _inmemory >]() { + $name(&mut $builder.alloc::<$wal>().unwrap()); + } + + #[test] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + fn [< test_ $prefix _ $name _map_anon >]() { + $name(&mut $builder + .map_anon::<$wal>().unwrap() + ); + } + + #[test] + #[cfg_attr(miri, ignore)] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + fn [< test_ $prefix _ $name _map_file >]() { + let dir = ::tempfile::tempdir().unwrap(); + $name( + &mut unsafe { + $builder + .with_create_new(true) + .with_read(true) + .with_write(true) + .with_capacity(MB as u32) + .map_mut::<$wal, _>( + dir.path().join(concat!("test_", $prefix, "_", stringify!($name), "_map_file") + ), + ) + .unwrap() }, + ); + } + } + )* + }; +} + +type OrderWalAlternativeTable = OrderWal>; +type OrderWalReaderAlternativeTable = OrderWalReader>; + +type MultipleVersionOrderWalAlternativeTable = + multiple_version::OrderWal>; +type MultipleVersionOrderWalReaderAlternativeTable = + multiple_version::OrderWalReader>; + +#[doc(hidden)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct Person { + #[doc(hidden)] + pub id: u64, + #[doc(hidden)] + pub name: String, +} + +impl Person { + #[doc(hidden)] + #[cfg(test)] + pub fn random() -> Self { + Self { + id: rand::random(), + name: names::Generator::default().next().unwrap(), + } + } + + #[doc(hidden)] + pub fn as_ref(&self) -> PersonRef<'_> { + PersonRef { + id: self.id, + name: &self.name, + } + } + + #[doc(hidden)] + #[cfg(test)] + #[allow(dead_code)] + fn to_vec(&self) -> Vec { + let mut buf = vec![0; self.encoded_len()]; + self.encode(&mut buf).unwrap(); + buf + } +} + +#[doc(hidden)] +#[derive(Debug, Clone, Copy)] +pub struct PersonRef<'a> { + id: u64, + name: &'a str, +} + +impl PartialEq for PersonRef<'_> { + fn eq(&self, other: &Self) -> bool { + self.id == other.id && self.name == other.name + } +} + +impl Eq for PersonRef<'_> {} + +impl PartialOrd for PersonRef<'_> { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for PersonRef<'_> { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self + .id + .cmp(&other.id) + .then_with(|| self.name.cmp(other.name)) + } +} + +impl Equivalent for PersonRef<'_> { + fn equivalent(&self, key: &Person) -> bool { + self.id == key.id && self.name == key.name + } +} + +impl Comparable for PersonRef<'_> { + fn compare(&self, key: &Person) -> core::cmp::Ordering { + self.id.cmp(&key.id).then_with(|| self.name.cmp(&key.name)) + } +} + +impl Equivalent> for Person { + fn equivalent(&self, key: &PersonRef<'_>) -> bool { + self.id == key.id && self.name == key.name + } +} + +impl Comparable> for Person { + fn compare(&self, key: &PersonRef<'_>) -> core::cmp::Ordering { + self + .id + .cmp(&key.id) + .then_with(|| self.name.as_str().cmp(key.name)) + } +} + +impl KeyRef<'_, Person> for PersonRef<'_> { + fn compare(&self, a: &Q) -> cmp::Ordering + where + Q: ?Sized + Comparable, + { + Comparable::compare(a, self).reverse() + } + + unsafe fn compare_binary(this: &[u8], other: &[u8]) -> cmp::Ordering { + let (this_id_size, this_id) = decode_u64_varint(this).unwrap(); + let (other_id_size, other_id) = decode_u64_varint(other).unwrap(); + PersonRef { + id: this_id, + name: std::str::from_utf8(&this[this_id_size..]).unwrap(), + } + .cmp(&PersonRef { + id: other_id, + name: std::str::from_utf8(&other[other_id_size..]).unwrap(), + }) + } +} + +impl Type for Person { + type Ref<'a> = PersonRef<'a>; + type Error = dbutils::error::InsufficientBuffer; + + fn encoded_len(&self) -> usize { + encoded_u64_varint_len(self.id) + self.name.len() + } + + fn encode(&self, buf: &mut [u8]) -> Result { + let id_size = encode_u64_varint(self.id, buf)?; + buf[id_size..].copy_from_slice(self.name.as_bytes()); + Ok(id_size + self.name.len()) + } + + #[inline] + fn encode_to_buffer( + &self, + buf: &mut dbutils::buffer::VacantBuffer<'_>, + ) -> Result { + let id_size = buf.put_u64_varint(self.id)?; + buf.put_slice_unchecked(self.name.as_bytes()); + Ok(id_size + self.name.len()) + } +} + +impl<'a> TypeRef<'a> for PersonRef<'a> { + unsafe fn from_slice(src: &'a [u8]) -> Self { + let (id_size, id) = decode_u64_varint(src).unwrap(); + let name = std::str::from_utf8(&src[id_size..]).unwrap(); + PersonRef { id, name } + } +} + +impl PersonRef<'_> { + #[cfg(test)] + #[allow(dead_code)] + fn encode_into_vec(&self) -> Result, dbutils::error::InsufficientBuffer> { + let mut buf = vec![0; encoded_u64_varint_len(self.id) + self.name.len()]; + let id_size = encode_u64_varint(self.id, &mut buf)?; + buf[id_size..].copy_from_slice(self.name.as_bytes()); + Ok(buf) + } +} + +#[cfg(all(test, any(test_swmr_constructor, all_orderwal_tests)))] +mod constructor; + +#[cfg(all(test, any(test_swmr_insert, all_orderwal_tests)))] +mod insert; + +#[cfg(all(test, any(test_swmr_iters, all_orderwal_tests)))] +mod iters; + +#[cfg(all(test, any(test_swmr_get, all_orderwal_tests)))] +mod get; + +#[cfg(all(test, any(test_swmr_multiple_version_constructor, all_orderwal_tests)))] +mod multiple_version_constructor; + +#[cfg(all(test, any(test_swmr_multiple_version_get, all_orderwal_tests)))] +mod multiple_version_get; + +#[cfg(all(test, any(test_swmr_multiple_version_insert, all_orderwal_tests)))] +mod multiple_version_insert; + +#[cfg(all(test, any(test_swmr_multiple_version_iters, all_orderwal_tests)))] +mod multiple_version_iters; diff --git a/src/swmr/tests/constructor.rs b/src/swmr/tests/constructor.rs new file mode 100644 index 0000000..27c54d8 --- /dev/null +++ b/src/swmr/tests/constructor.rs @@ -0,0 +1,113 @@ +use base::{Reader, Writer}; +use skl::KeySize; + +use crate::memtable::{ + alternative::{Table, TableOptions}, + Memtable, MemtableEntry, +}; + +use super::*; + +fn zero_reserved(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + unsafe { + assert_eq!(wal.reserved_slice(), b""); + assert_eq!(wal.reserved_slice_mut(), b""); + + let wal = wal.reader(); + assert_eq!(wal.reserved_slice(), b""); + } +} + +fn reserved(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + unsafe { + let buf = wal.reserved_slice_mut(); + buf.copy_from_slice(b"al8n"); + assert_eq!(wal.reserved_slice(), b"al8n"); + assert_eq!(wal.reserved_slice_mut(), b"al8n"); + + let wal = wal.reader(); + assert_eq!(wal.reserved_slice(), b"al8n"); + } +} + +#[cfg(feature = "std")] +expand_unit_tests!( + "linked": OrderWalAlternativeTable [TableOptions::Linked]: Table<_, _> { + zero_reserved, + } +); + +#[cfg(feature = "std")] +expand_unit_tests!( + "linked": OrderWalAlternativeTable [TableOptions::Linked]: Table<_, _> { + reserved({ + crate::Builder::new() + .with_capacity(MB) + .with_reserved(4) + }), + } +); + +expand_unit_tests!( + "arena": OrderWalAlternativeTable [TableOptions::Arena(Default::default())]: Table<_, _> { + zero_reserved, + } +); + +expand_unit_tests!( + "arena": OrderWalAlternativeTable [TableOptions::Arena(Default::default())]: Table<_, _> { + reserved({ + crate::Builder::new() + .with_capacity(MB) + .with_reserved(4) + }), + } +); + +#[test] +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +#[cfg_attr(miri, ignore)] +fn reopen_wrong_kind() { + use crate::Builder; + + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("test_reopen_wrong_kind"); + let wal = unsafe { + Builder::new() + .with_capacity(MB) + .with_maximum_key_size(KeySize::with(10)) + .with_maximum_value_size(10) + .with_create_new(true) + .with_read(true) + .with_write(true) + .map_mut::, _>(path.as_path()) + .unwrap() + }; + + assert!(!wal.read_only()); + assert_eq!(wal.capacity(), MB); + assert!(wal.remaining() < MB); + assert_eq!(wal.maximum_key_size(), 10); + assert_eq!(wal.maximum_value_size(), 10); + assert_eq!(wal.path().unwrap().as_path(), path.as_path()); + assert_eq!(wal.options().maximum_key_size(), 10); + + let err = unsafe { + Builder::new() + .with_capacity(MB) + .with_read(true) + .map_mut::, _>(path.as_path()) + .unwrap_err() + }; + assert!(matches!(err, crate::error::Error::KindMismatch { .. })); +} diff --git a/src/swmr/tests/get.rs b/src/swmr/tests/get.rs new file mode 100644 index 0000000..360f890 --- /dev/null +++ b/src/swmr/tests/get.rs @@ -0,0 +1,254 @@ +use base::OrderWal; + +use dbutils::{buffer::VacantBuffer, types::MaybeStructured}; + +use std::collections::BTreeMap; + +use crate::{ + memtable::{alternative::TableOptions, Memtable, MemtableEntry}, + swmr::base::{Reader, Writer}, + types::{KeyBuilder, ValueBuilder}, +}; + +use super::*; + +fn first(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + let people = (0..10) + .map(|_| { + let p = Person::random(); + let v = std::format!("My name is {}", p.name); + wal.insert(&p, &v).unwrap(); + + (p, v) + }) + .collect::>(); + + let ent = wal.first().unwrap(); + let (p, v) = people.first_key_value().unwrap(); + assert!(ent.key().equivalent(p)); + assert_eq!(ent.value(), v); + + let wal = wal.reader(); + let ent = wal.first().unwrap(); + let (p, v) = people.first_key_value().unwrap(); + assert!(ent.key().equivalent(p)); + assert_eq!(ent.value(), v); +} + +fn last(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + let people = (0..10) + .map(|_| { + let p = Person::random(); + let v = std::format!("My name is {}", p.name); + wal.insert(&p, &v).unwrap(); + + (p, v) + }) + .collect::>(); + + let ent = wal.last().unwrap(); + let (p, v) = people.last_key_value().unwrap(); + assert!(ent.key().equivalent(p)); + assert_eq!(ent.value(), v); + + let wal = wal.reader(); + let ent = wal.last().unwrap(); + assert!(ent.key().equivalent(p)); + assert_eq!(ent.value(), v); +} + +#[allow(clippy::needless_borrows_for_generic_args)] +fn insert(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + let people = (0..100) + .map(|_| { + let p = Person::random(); + let v = std::format!("My name is {}", p.name); + wal.insert(&p, &v).unwrap(); + (p, v) + }) + .collect::>(); + + assert_eq!(wal.len(), 100); + + for (p, pv) in &people { + assert!(wal.contains_key(p)); + + assert_eq!(wal.get(p).unwrap().value(), pv); + } + + for (p, _) in &people { + assert!(wal.contains_key(p)); + } +} + +fn insert_with_value_builder(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + let people = (0..100) + .map(|_| { + let p = Person::random(); + let v = std::format!("My name is {}", p.name); + wal + .insert_with_value_builder( + &p, + ValueBuilder::new(v.len(), |buf: &mut VacantBuffer<'_>| { + buf.put_slice(v.as_bytes()).map(|_| v.len()) + }), + ) + .unwrap(); + (p, v) + }) + .collect::>(); + + assert_eq!(wal.len(), 100); + + for (p, _) in &people { + assert!(wal.contains_key(p)); + assert!(wal.contains_key(&p.as_ref())); + } +} + +#[allow(clippy::needless_borrows_for_generic_args)] +fn insert_with_key_builder(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + let people = (0..100) + .map(|_| { + let p = Person::random(); + let pvec = p.to_vec(); + let v = std::format!("My name is {}", p.name); + unsafe { + wal + .insert_with_key_builder( + KeyBuilder::once(p.encoded_len(), |buf| p.encode_to_buffer(buf)), + &v, + ) + .unwrap(); + } + (p, v) + }) + .collect::>(); + + assert_eq!(wal.len(), 100); + + for (p, pv) in &people { + assert!(wal.contains_key(p)); + assert_eq!(wal.get(p).unwrap().value(), pv); + } + + for (p, _) in &people { + assert!(wal.contains_key(p)); + } +} + +fn insert_with_bytes(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + let people = (0..100) + .map(|_| { + let p = Person::random(); + let v = std::format!("My name is {}", p.name); + unsafe { + wal + .insert( + MaybeStructured::from_slice(p.to_vec().as_slice()), + MaybeStructured::from_slice(v.as_bytes()), + ) + .unwrap(); + } + (p, v) + }) + .collect::>(); + + assert_eq!(wal.len(), 100); + + for (p, pv) in &people { + assert!(wal.contains_key(p)); + assert!(wal.contains_key(&p.as_ref())); + assert_eq!(wal.get(p).unwrap().value(), pv); + } +} + +fn insert_with_builders(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a> + std::fmt::Debug, + M::Error: std::fmt::Debug, +{ + let people = (0..1) + .map(|_| { + let p = Person::random(); + let pvec = p.to_vec(); + let v = std::format!("My name is {}", p.name); + wal + .insert_with_builders( + KeyBuilder::new(pvec.len(), |buf: &mut VacantBuffer<'_>| { + p.encode_to_buffer(buf) + }), + ValueBuilder::new(v.len(), |buf: &mut VacantBuffer<'_>| { + buf.put_slice(v.as_bytes()).map(|_| v.len()) + }), + ) + .unwrap(); + (p, pvec, v) + }) + .collect::>(); + + assert_eq!(wal.len(), 1); + + for (p, pvec, pv) in &people { + assert!(wal.contains_key(p)); + unsafe { + assert_eq!(wal.get_by_bytes(pvec.as_ref()).unwrap().value(), pv); + } + } + + for (p, _, _) in &people { + assert!(wal.contains_key(p)); + } +} + +#[cfg(feature = "std")] +expand_unit_tests!("linked": OrderWalAlternativeTable [TableOptions::Linked]: crate::memtable::alternative::Table<_, _> { + first, + last, + insert, + insert_with_value_builder, + insert_with_key_builder, + insert_with_bytes, + insert_with_builders, +}); + +expand_unit_tests!("arena": OrderWalAlternativeTable [TableOptions::Arena(Default::default())]: crate::memtable::alternative::Table<_, _> { + first, + last, + insert, + insert_with_value_builder, + insert_with_key_builder, + insert_with_bytes, + insert_with_builders, +}); diff --git a/src/swmr/tests/insert.rs b/src/swmr/tests/insert.rs new file mode 100644 index 0000000..6f147b0 --- /dev/null +++ b/src/swmr/tests/insert.rs @@ -0,0 +1,451 @@ +use base::{Reader, Writer}; +use dbutils::{buffer::VacantBuffer, types::MaybeStructured}; + +use crate::{ + batch::BatchEntry, + memtable::{ + alternative::{Table, TableOptions}, + Memtable, MemtableEntry, + }, + types::{KeyBuilder, ValueBuilder}, + Builder, +}; + +use super::*; + +#[cfg(feature = "std")] +fn concurrent_basic(mut w: OrderWal) +where + M: Memtable + Send + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + let readers = (0..100u32).map(|i| (i, w.reader())).collect::>(); + + let handles = readers.into_iter().map(|(i, reader)| { + spawn(move || loop { + if let Some(p) = reader.get(&i) { + assert_eq!(p.key(), &i); + assert_eq!(p.value(), &i.to_le_bytes()); + break; + } + }) + }); + + spawn(move || { + for i in 0..100u32 { + #[allow(clippy::needless_borrows_for_generic_args)] + w.insert(&i, &i.to_le_bytes()).unwrap(); + } + }); + + for handle in handles { + handle.join().unwrap(); + } +} + +#[cfg(feature = "std")] +fn concurrent_one_key(mut w: OrderWal) +where + M: Memtable + Send + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + let readers = (0..100u32).map(|i| (i, w.reader())).collect::>(); + let handles = readers.into_iter().map(|(_, reader)| { + spawn(move || loop { + if let Some(p) = reader.get(&1) { + assert_eq!(p.key(), &1); + assert_eq!(p.value(), &1u32.to_le_bytes()); + break; + } + }) + }); + + w.insert(&1, &1u32.to_le_bytes()).unwrap(); + + for handle in handles { + handle.join().unwrap(); + } +} + +fn insert_batch(mut wal: OrderWal) -> (Person, Vec<(Person, String)>, Person) +where + M: Memtable + Send + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + const N: u32 = 5; + + let mut batch = vec![]; + let output = (0..N) + .map(|i| { + ( + { + let mut p = Person::random(); + p.id = i as u64; + p + }, + std::format!("My id is {i}"), + ) + .clone() + }) + .collect::>(); + + for (person, val) in output.iter() { + batch.push(BatchEntry::new( + MaybeStructured::from(person), + MaybeStructured::from(val), + )); + } + + let rp1 = Person::random(); + wal.insert(&rp1, &"rp1".to_string()).unwrap(); + wal.insert_batch(&mut batch).unwrap(); + let rp2 = Person::random(); + wal.insert(&rp2, &"rp2".to_string()).unwrap(); + + for (p, val) in output.iter() { + assert_eq!(wal.get(p).unwrap().value(), val); + } + + assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); + + let wal = wal.reader(); + for (p, val) in output.iter() { + assert_eq!(wal.get(p).unwrap().value(), val); + } + + assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); + + (rp1, output, rp2) +} + +fn insert_batch_with_key_builder( + mut wal: OrderWal, +) -> (Person, Vec<(Person, String)>, Person) +where + M: Memtable + Send + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + const N: u32 = 5; + + let mut batch = vec![]; + let output = (0..N) + .map(|i| { + ( + { + let mut p = Person::random(); + p.id = i as u64; + p + }, + std::format!("My id is {i}"), + ) + .clone() + }) + .collect::>(); + + for (person, val) in output.iter() { + batch.push(BatchEntry::new( + KeyBuilder::new(person.encoded_len(), |buf: &mut VacantBuffer<'_>| { + buf.set_len(person.encoded_len()); + person.encode(buf) + }), + MaybeStructured::from(val), + )); + } + + let rp1 = Person::random(); + wal.insert(&rp1, &"rp1".to_string()).unwrap(); + wal.insert_batch_with_key_builder(&mut batch).unwrap(); + let rp2 = Person::random(); + wal.insert(&rp2, &"rp2".to_string()).unwrap(); + + for (p, val) in output.iter() { + assert_eq!(wal.get(p).unwrap().value(), val); + } + + assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); + + let wal = wal.reader(); + for (p, val) in output.iter() { + assert_eq!(wal.get(p).unwrap().value(), val); + } + + assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); + + (rp1, output, rp2) +} + +fn insert_batch_with_value_builder( + mut wal: OrderWal, +) -> (Person, Vec<(Person, String)>, Person) +where + M: Memtable + Send + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + const N: u32 = 5; + + let mut batch = vec![]; + let output = (0..N) + .map(|i| { + ( + { + let mut p = Person::random(); + p.id = i as u64; + p + }, + std::format!("My id is {i}"), + ) + .clone() + }) + .collect::>(); + + for (person, val) in output.iter() { + batch.push(BatchEntry::new( + person.into(), + ValueBuilder::new(val.len(), |buf: &mut VacantBuffer<'_>| { + buf.put_slice(val.as_bytes()).map(|_| val.len()) + }), + )); + } + + let rp1 = Person::random(); + wal.insert(&rp1, &"rp1".to_string()).unwrap(); + wal.insert_batch_with_value_builder(&mut batch).unwrap(); + let rp2 = Person::random(); + wal.insert(&rp2, &"rp2".to_string()).unwrap(); + + for (p, val) in output.iter() { + assert_eq!(wal.get(p).unwrap().value(), val); + } + + assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); + + let wal = wal.reader(); + for (p, val) in output.iter() { + assert_eq!(wal.get(p).unwrap().value(), val); + } + + assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); + + (rp1, output, rp2) +} + +fn insert_batch_with_builders( + mut wal: OrderWal, +) -> (Person, Vec<(Person, String)>, Person) +where + M: Memtable + Send + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + const N: u32 = 5; + + let mut batch = vec![]; + let output = (0..N) + .map(|i| { + ( + { + let mut p = Person::random(); + p.id = i as u64; + p + }, + std::format!("My id is {i}"), + ) + .clone() + }) + .collect::>(); + + for (person, val) in output.iter() { + batch.push(BatchEntry::new( + KeyBuilder::new(person.encoded_len(), |buf: &mut VacantBuffer<'_>| { + buf.set_len(person.encoded_len()); + person.encode(buf) + }), + ValueBuilder::new(val.len(), |buf: &mut VacantBuffer<'_>| { + buf.put_slice(val.as_bytes()).map(|_| val.len()) + }), + )); + } + + let rp1 = Person::random(); + wal.insert(&rp1, &"rp1".to_string()).unwrap(); + wal.insert_batch_with_builders(&mut batch).unwrap(); + let rp2 = Person::random(); + wal.insert(&rp2, &"rp2".to_string()).unwrap(); + + for (p, val) in output.iter() { + assert_eq!(wal.get(p).unwrap().value(), val); + } + + assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); + + let wal = wal.reader(); + for (p, val) in output.iter() { + assert_eq!(wal.get(p).unwrap().value(), val); + } + + assert_eq!(wal.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(&rp2).unwrap().value(), "rp2"); + + (rp1, output, rp2) +} + +#[cfg(feature = "std")] +expand_unit_tests!( + move "linked": OrderWalAlternativeTable [TableOptions::Linked]: Table<_, _> { + concurrent_basic |p, _res| { + let wal = unsafe { Builder::new().map::, _>(p).unwrap() }; + + for i in 0..100u32 { + assert!(wal.contains_key(&i)); + } + }, + concurrent_one_key |p, _res| { + let wal = unsafe { Builder::new().map::, _>(p).unwrap() }; + assert!(wal.contains_key(&1)); + }, + } +); + +#[cfg(feature = "std")] +expand_unit_tests!( + move "linked": OrderWalAlternativeTable [TableOptions::Linked]: Table<_, _> { + insert_batch |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(&p).unwrap().value(), &val); + } + assert_eq!(map.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(&rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_key_builder |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(&p).unwrap().value(), &val); + } + assert_eq!(map.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(&rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_value_builder |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(&p).unwrap().value(), &val); + } + assert_eq!(map.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(&rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_builders |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(&p).unwrap().value(), &val); + } + assert_eq!(map.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(&rp2).unwrap().value(), "rp2"); + } + } +); + +#[cfg(feature = "std")] +expand_unit_tests!( + move "arena": OrderWalAlternativeTable [TableOptions::Arena(Default::default())]: Table<_, _> { + concurrent_basic |p, _res| { + let wal = unsafe { Builder::new().map::, _>(p).unwrap() }; + + for i in 0..100u32 { + assert!(wal.contains_key(&i)); + } + }, + concurrent_one_key |p, _res| { + let wal = unsafe { Builder::new().map::, _>(p).unwrap() }; + assert!(wal.contains_key(&1)); + }, + } +); + +expand_unit_tests!( + move "arena": OrderWalAlternativeTable [TableOptions::Arena(Default::default())]: Table<_, _> { + insert_batch |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(&p).unwrap().value(), &val); + } + assert_eq!(map.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(&rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_key_builder |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(&p).unwrap().value(), &val); + } + assert_eq!(map.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(&rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_value_builder |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(&p).unwrap().value(), &val); + } + assert_eq!(map.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(&rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_builders |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(&p).unwrap().value(), &val); + } + assert_eq!(map.get(&rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(&rp2).unwrap().value(), "rp2"); + } + } +); diff --git a/src/swmr/generic/tests/iters.rs b/src/swmr/tests/iters.rs similarity index 56% rename from src/swmr/generic/tests/iters.rs rename to src/swmr/tests/iters.rs index 0463eb6..6fac374 100644 --- a/src/swmr/generic/tests/iters.rs +++ b/src/swmr/tests/iters.rs @@ -1,10 +1,25 @@ +use core::ops::Bound; +use std::collections::BTreeMap; + +use base::{OrderWal, Reader, Writer}; + +use crate::memtable::{ + alternative::{Table, TableOptions}, + Memtable, MemtableEntry, +}; + use super::*; -fn iter(wal: &mut GenericOrderWal) -> Vec<(Person, String)> { +fn iter(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ let mut people = (0..100) .map(|_| { let p = Person::random(); - let v = format!("My name is {}", p.name); + let v = std::format!("My name is {}", p.name); wal.insert(&p, &v).unwrap(); (p, v) }) @@ -16,65 +31,65 @@ fn iter(wal: &mut GenericOrderWal) -> Vec<(Person, String)> { for (pwal, pvec) in people.iter().zip(iter.by_ref()) { assert!(pwal.0.equivalent(pvec.key())); + assert!(pwal.0.to_vec().eq(pvec.raw_key())); assert_eq!(&pwal.1, pvec.value()); + assert_eq!(pwal.1.as_bytes(), pvec.raw_value()); } let mut rev_iter = wal.iter().rev(); for (pwal, pvec) in people.iter().rev().zip(rev_iter.by_ref()) { assert!(pwal.0.equivalent(pvec.key())); + assert!(pwal.0.to_vec().eq(pvec.raw_key())); assert_eq!(&pwal.1, pvec.value()); + assert_eq!(pwal.1.as_bytes(), pvec.raw_value()); } - people -} + let mut iter = wal.keys(); -#[test] -fn iter_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - iter(&mut wal); -} + for (pwal, pvec) in people.iter().zip(iter.by_ref()) { + assert!(pwal.0.equivalent(pvec.key())); + assert!(pwal.0.to_vec().eq(pvec.raw_key())); + } -#[test] -fn iter_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - iter(&mut wal); -} + let mut rev_iter = wal.keys().rev(); -#[test] -#[cfg_attr(miri, ignore)] -fn iter_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_iter_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut::(&path) - .unwrap() - }; + for (pwal, pvec) in people.iter().rev().zip(rev_iter.by_ref()) { + assert!(pwal.0.equivalent(pvec.key())); + assert!(pwal.0.to_vec().eq(pvec.raw_key())); + } + + let mut iter = wal.values(); + + for (pwal, pvec) in people.iter().zip(iter.by_ref()) { + assert_eq!(&pwal.1, pvec.value()); + assert_eq!(pwal.1.as_bytes(), pvec.raw_value()); + } + + let mut rev_iter = wal.values().rev(); - let people = iter(&mut wal); + for (pwal, pvec) in people.iter().rev().zip(rev_iter.by_ref()) { + assert_eq!(&pwal.1, pvec.value()); + assert_eq!(pwal.1.as_bytes(), pvec.raw_value()); + } let wal = wal.reader(); let mut iter = wal.iter(); for (pwal, pvec) in people.iter().zip(iter.by_ref()) { assert!(pwal.0.equivalent(pvec.key())); + assert!(pwal.0.to_vec().eq(pvec.raw_key())); assert_eq!(&pwal.1, pvec.value()); + assert_eq!(pwal.1.as_bytes(), pvec.raw_value()); } } -fn bounds(wal: &mut GenericOrderWal) { +fn bounds(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ for i in 0..100u32 { wal.insert(&i, &i).unwrap(); } @@ -209,43 +224,17 @@ fn bounds(wal: &mut GenericOrderWal) { assert_eq!(lower_unbounded.value(), &0u32); } -#[test] -fn bounds_inmemory() { - let mut wal = GenericBuilder::new().with_capacity(MB).alloc().unwrap(); - bounds(&mut wal); -} - -#[test] -fn bounds_map_anon() { - let mut wal = GenericBuilder::new().with_capacity(MB).map_anon().unwrap(); - bounds(&mut wal); -} - -#[test] -#[cfg_attr(miri, ignore)] -fn bounds_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_bounds_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut::(&path) - .unwrap() - }; - - bounds(&mut wal); -} - -fn range(wal: &mut GenericOrderWal) { +fn range(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ let mut mid = Person::random(); let people = (0..100) .map(|idx| { let p = Person::random(); - let v = format!("My name is {}", p.name); + let v = std::format!("My name is {}", p.name); wal.insert(&p, &v).unwrap(); if idx == 500 { @@ -255,76 +244,80 @@ fn range(wal: &mut GenericOrderWal) { }) .collect::>(); - let mut iter = wal.range(Bound::Included(&mid), Bound::Unbounded); + let mut iter = wal.range::(&mid..); for (pwal, pvec) in people.range(&mid..).zip(iter.by_ref()) { assert!(pwal.0.equivalent(pvec.key())); + assert!(pwal.0.to_vec().eq(pvec.raw_key())); assert_eq!(&pwal.1, pvec.value()); + assert_eq!(pwal.1.as_bytes(), pvec.raw_value()); } assert!(iter.next().is_none()); - let wal = wal.reader(); - let mut iter = wal.range(Bound::Included(&mid), Bound::Unbounded); - + let mut iter = wal.range_keys::(&mid..); for (pwal, pvec) in people.range(&mid..).zip(iter.by_ref()) { - assert!(pwal.0.equivalent(pvec.key())); - assert_eq!(&pwal.1, pvec.value()); + assert!(pwal.0.equivalent(pvec.clone().key())); } - let mut rev_iter = wal.range(Bound::Included(&mid), Bound::Unbounded).rev(); + assert!(iter.next().is_none()); + + let mut rev_iter = wal.range_keys::(&mid..).rev(); for (pwal, pvec) in people.range(&mid..).rev().zip(rev_iter.by_ref()) { assert!(pwal.0.equivalent(pvec.key())); + assert!(pwal.0.to_vec().eq(pvec.raw_key())); + } + + let mut iter = wal.range_values::(&mid..); + for (pwal, pvec) in people.range(&mid..).zip(iter.by_ref()) { + assert_eq!(&pwal.1, pvec.clone().value()); + } + + assert!(iter.next().is_none()); + + let mut rev_iter = wal.range_values::(&mid..).rev(); + + for (pwal, pvec) in people.range(&mid..).rev().zip(rev_iter.by_ref()) { assert_eq!(&pwal.1, pvec.value()); + assert_eq!(pwal.1.as_bytes(), pvec.raw_value()); } -} -#[test] -fn range_inmemory() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .alloc::() - .unwrap(); - range(&mut wal); -} + let wal = wal.reader(); + let mut iter = wal.range::(&mid..); -#[test] -fn range_map_anon() { - let mut wal = GenericBuilder::new() - .with_capacity(MB) - .map_anon::() - .unwrap(); - range(&mut wal); -} + for (pwal, pvec) in people.range(&mid..).zip(iter.by_ref()) { + assert!(pwal.0.equivalent(pvec.key())); + assert!(pwal.0.to_vec().eq(pvec.raw_key())); + assert_eq!(&pwal.1, pvec.value()); + assert_eq!(pwal.1.as_bytes(), pvec.raw_value()); + } -#[test] -#[cfg_attr(miri, ignore)] -fn range_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_range_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut::(&path) - .unwrap() - }; + let mut rev_iter = wal.range::(&mid..).rev(); - range(&mut wal); + for (pwal, pvec) in people.range(&mid..).rev().zip(rev_iter.by_ref()) { + assert!(pwal.0.equivalent(pvec.key())); + assert!(pwal.0.to_vec().eq(pvec.raw_key())); + assert_eq!(&pwal.1, pvec.value()); + assert_eq!(pwal.1.as_bytes(), pvec.raw_value()); + } } -fn entry_iter(wal: &mut GenericOrderWal) { +fn entry_iter(wal: &mut OrderWal) +where + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a> + std::fmt::Debug, + M::Error: std::fmt::Debug, +{ for i in 0..100u32 { wal.insert(&i, &i).unwrap(); } let mut curr = wal.first(); + #[cfg(feature = "std")] + std::println!("{:?}", curr); let mut cursor = 0; - while let Some(ent) = curr { + while let Some(mut ent) = curr { assert_eq!(ent.key(), &cursor); assert_eq!(ent.value(), &cursor); cursor += 1; @@ -332,45 +325,75 @@ fn entry_iter(wal: &mut GenericOrderWal) { } let curr = wal.last(); - std::println!("{:?}", curr); let mut curr = curr.clone(); let mut cursor = 100; - while let Some(ent) = curr { + while let Some(mut ent) = curr { cursor -= 1; assert_eq!(ent.key(), &cursor); assert_eq!(ent.value(), &cursor); curr = ent.prev(); } -} -#[test] -fn entry_iter_inmemory() { - let mut wal = GenericBuilder::new().with_capacity(MB).alloc().unwrap(); - entry_iter(&mut wal); -} + let mut curr = wal.keys().next(); + #[cfg(feature = "std")] + std::println!("{:?}", curr); + let mut cursor = 0; + while let Some(mut ent) = curr { + assert_eq!(ent.key(), &cursor); + cursor += 1; + curr = ent.next(); + } -#[test] -fn entry_iter_map_anon() { - let mut wal = GenericBuilder::new().with_capacity(MB).map_anon().unwrap(); - entry_iter(&mut wal); -} + let curr = wal.keys().next_back(); -#[test] -#[cfg_attr(miri, ignore)] -fn entry_iter_map_file() { - let dir = tempdir().unwrap(); - let path = dir.path().join("generic_wal_entry_iter_map_file"); - - let mut wal = unsafe { - GenericBuilder::new() - .with_capacity(MB) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut(&path) - .unwrap() - }; + let mut curr = curr.clone(); + let mut cursor = 100; + while let Some(mut ent) = curr { + cursor -= 1; + assert_eq!(ent.key(), &cursor); + curr = ent.prev(); + } - entry_iter(&mut wal); + let mut curr = wal.values().next(); + #[cfg(feature = "std")] + std::println!("{:?}", curr); + let mut cursor = 0; + while let Some(mut ent) = curr { + assert_eq!(ent.value(), &cursor); + cursor += 1; + curr = ent.next(); + } + + let curr = wal.values().next_back(); + + let mut curr = curr.clone(); + let mut cursor = 100; + while let Some(mut ent) = curr { + cursor -= 1; + assert_eq!(ent.value(), &cursor); + curr = ent.prev(); + } } + +#[cfg(feature = "std")] +expand_unit_tests!("linked": OrderWalAlternativeTable [TableOptions::Linked]: Table<_, _> { + bounds, + entry_iter, +}); + +expand_unit_tests!("arena": OrderWalAlternativeTable [TableOptions::Arena(Default::default())]: Table<_, _> { + bounds, + entry_iter, +}); + +#[cfg(feature = "std")] +expand_unit_tests!("linked": OrderWalAlternativeTable [TableOptions::Linked]: Table<_, _> { + range, + iter, +}); + +expand_unit_tests!("arena": OrderWalAlternativeTable [TableOptions::Arena(Default::default())]: Table<_, _> { + range, + iter, +}); diff --git a/src/swmr/tests/multiple_version_constructor.rs b/src/swmr/tests/multiple_version_constructor.rs new file mode 100644 index 0000000..009a359 --- /dev/null +++ b/src/swmr/tests/multiple_version_constructor.rs @@ -0,0 +1,113 @@ +use multiple_version::{OrderWal, Reader, Writer}; +use skl::KeySize; + +use crate::memtable::{ + alternative::{MultipleVersionTable, TableOptions}, + MultipleVersionMemtable, VersionedMemtableEntry, +}; + +use super::*; + +fn zero_reserved(wal: &mut OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + unsafe { + assert_eq!(wal.reserved_slice(), b""); + assert_eq!(wal.reserved_slice_mut(), b""); + + let wal = wal.reader(); + assert_eq!(wal.reserved_slice(), b""); + } +} + +fn reserved(wal: &mut OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + unsafe { + let buf = wal.reserved_slice_mut(); + buf.copy_from_slice(b"al8n"); + assert_eq!(wal.reserved_slice(), b"al8n"); + assert_eq!(wal.reserved_slice_mut(), b"al8n"); + + let wal = wal.reader(); + assert_eq!(wal.reserved_slice(), b"al8n"); + } +} + +#[cfg(feature = "std")] +expand_unit_tests!( + "linked": MultipleVersionOrderWalAlternativeTable [TableOptions::Linked]: MultipleVersionTable<_, _> { + zero_reserved, + } +); + +#[cfg(feature = "std")] +expand_unit_tests!( + "linked": MultipleVersionOrderWalAlternativeTable [TableOptions::Linked]: MultipleVersionTable<_, _> { + reserved({ + crate::Builder::new() + .with_capacity(MB) + .with_reserved(4) + }), + } +); + +expand_unit_tests!( + "arena": MultipleVersionOrderWalAlternativeTable [TableOptions::Arena(Default::default())]: MultipleVersionTable<_, _> { + zero_reserved, + } +); + +expand_unit_tests!( + "arena": MultipleVersionOrderWalAlternativeTable [TableOptions::Arena(Default::default())]: MultipleVersionTable<_, _> { + reserved({ + crate::Builder::new() + .with_capacity(MB) + .with_reserved(4) + }), + } +); + +#[test] +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +#[cfg_attr(miri, ignore)] +fn reopen_wrong_kind() { + use crate::Builder; + + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("test_reopen_wrong_kind"); + let wal = unsafe { + Builder::new() + .with_capacity(MB) + .with_maximum_key_size(KeySize::with(10)) + .with_maximum_value_size(10) + .with_create_new(true) + .with_read(true) + .with_write(true) + .map_mut::, _>(path.as_path()) + .unwrap() + }; + + assert!(!wal.read_only()); + assert_eq!(wal.capacity(), MB); + assert!(wal.remaining() < MB); + assert_eq!(wal.maximum_key_size(), 10); + assert_eq!(wal.maximum_value_size(), 10); + assert_eq!(wal.path().unwrap().as_path(), path.as_path()); + assert_eq!(wal.options().maximum_key_size(), 10); + + let err = unsafe { + Builder::new() + .with_capacity(MB) + .with_read(true) + .map_mut::, _>(path.as_path()) + .unwrap_err() + }; + assert!(matches!(err, crate::error::Error::KindMismatch { .. })); +} diff --git a/src/swmr/tests/multiple_version_get.rs b/src/swmr/tests/multiple_version_get.rs new file mode 100644 index 0000000..60fe719 --- /dev/null +++ b/src/swmr/tests/multiple_version_get.rs @@ -0,0 +1,724 @@ +use core::ops::Bound; + +use crate::{ + memtable::{ + alternative::{MultipleVersionTable, TableOptions}, + MultipleVersionMemtable, VersionedMemtableEntry, + }, + types::{KeyBuilder, ValueBuilder}, +}; +use dbutils::types::MaybeStructured; +use multiple_version::{Reader, Writer}; +use skl::VacantBuffer; + +use super::*; + +#[cfg(feature = "std")] +expand_unit_tests!("linked": MultipleVersionOrderWalAlternativeTable [TableOptions::Linked]: MultipleVersionTable<_, _> { + mvcc, + gt, + ge, + le, + lt, +}); + +expand_unit_tests!("arena": MultipleVersionOrderWalAlternativeTable [TableOptions::Arena(Default::default())]: MultipleVersionTable<_, _> { + mvcc, + gt, + ge, + le, + lt, +}); + +#[cfg(feature = "std")] +expand_unit_tests!("linked": MultipleVersionOrderWalAlternativeTable [TableOptions::Linked]: MultipleVersionTable<_, _> { + insert, + insert_with_value_builder, + insert_with_key_builder, + insert_with_bytes, + insert_with_builders, +}); + +expand_unit_tests!("arena": MultipleVersionOrderWalAlternativeTable [TableOptions::Arena(Default::default())]: MultipleVersionTable<_, _> { + insert, + insert_with_value_builder, + insert_with_key_builder, + insert_with_bytes, + insert_with_builders, +}); + +fn mvcc(wal: &mut multiple_version::OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + wal.insert(1, "a", "a1").unwrap(); + wal.insert(3, "a", "a2").unwrap(); + wal.insert(1, "c", "c1").unwrap(); + wal.insert(3, "c", "c2").unwrap(); + + let ent = wal.get(1, "a").unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.get(2, "a").unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.get(3, "a").unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + let ent = wal.get(4, "a").unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + assert!(wal.get(0, "b").is_none()); + assert!(wal.get(1, "b").is_none()); + assert!(wal.get(2, "b").is_none()); + assert!(wal.get(3, "b").is_none()); + assert!(wal.get(4, "b").is_none()); + + let ent = wal.get(1, "c").unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.get(2, "c").unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.get(3, "c").unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + let ent = wal.get(4, "c").unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + assert!(wal.get(5, "d").is_none()); +} + +fn gt(wal: &mut multiple_version::OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + wal.insert(1, "a", "a1").unwrap(); + wal.insert(3, "a", "a2").unwrap(); + wal.insert(1, "c", "c1").unwrap(); + wal.insert(3, "c", "c2").unwrap(); + wal.insert(5, "c", "c3").unwrap(); + + assert!(wal.lower_bound(0, Bound::Excluded("a")).is_none()); + assert!(wal.lower_bound(0, Bound::Excluded("b")).is_none()); + assert!(wal.lower_bound(0, Bound::Excluded("c")).is_none()); + + let ent = wal.lower_bound(1, Bound::Excluded("")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.lower_bound(2, Bound::Excluded("")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.lower_bound(3, Bound::Excluded("")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + let ent = wal.lower_bound(1, Bound::Excluded("a")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.lower_bound(2, Bound::Excluded("a")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.lower_bound(3, Bound::Excluded("a")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + let ent = wal.lower_bound(1, Bound::Excluded("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.lower_bound(2, Bound::Excluded("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.lower_bound(3, Bound::Excluded("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + let ent = wal.lower_bound(4, Bound::Excluded("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + let ent = wal.lower_bound(5, Bound::Excluded("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c3"); + assert_eq!(ent.version(), 5); + + let ent = wal.lower_bound(6, Bound::Excluded("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c3"); + assert_eq!(ent.version(), 5); + + assert!(wal.lower_bound(1, Bound::Excluded("c")).is_none()); + assert!(wal.lower_bound(2, Bound::Excluded("c")).is_none()); + assert!(wal.lower_bound(3, Bound::Excluded("c")).is_none()); + assert!(wal.lower_bound(4, Bound::Excluded("c")).is_none()); + assert!(wal.lower_bound(5, Bound::Excluded("c")).is_none()); + assert!(wal.lower_bound(6, Bound::Excluded("c")).is_none()); +} + +fn ge(wal: &mut multiple_version::OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + wal.insert(1, "a", "a1").unwrap(); + wal.insert(3, "a", "a2").unwrap(); + wal.insert(1, "c", "c1").unwrap(); + wal.insert(3, "c", "c2").unwrap(); + + assert!(wal.lower_bound(0, Bound::Included("a")).is_none()); + assert!(wal.lower_bound(0, Bound::Included("b")).is_none()); + assert!(wal.lower_bound(0, Bound::Included("c")).is_none()); + + let ent = wal.lower_bound(1, Bound::Included("a")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.lower_bound(2, Bound::Included("a")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.lower_bound(3, Bound::Included("a")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + let ent = wal.lower_bound(4, Bound::Included("a")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + let ent = wal.lower_bound(1, Bound::Included("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.lower_bound(2, Bound::Included("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.lower_bound(3, Bound::Included("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + let ent = wal.lower_bound(4, Bound::Included("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + let ent = wal.lower_bound(1, Bound::Included("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.lower_bound(2, Bound::Included("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.lower_bound(3, Bound::Included("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + let ent = wal.lower_bound(4, Bound::Included("b")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + assert!(wal.lower_bound(0, Bound::Included("d")).is_none()); + assert!(wal.lower_bound(1, Bound::Included("d")).is_none()); + assert!(wal.lower_bound(2, Bound::Included("d")).is_none()); + assert!(wal.lower_bound(3, Bound::Included("d")).is_none()); + assert!(wal.lower_bound(4, Bound::Included("d")).is_none()); +} + +fn le(wal: &mut multiple_version::OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + wal.insert(1, "a", "a1").unwrap(); + wal.insert(3, "a", "a2").unwrap(); + wal.insert(1, "c", "c1").unwrap(); + wal.insert(3, "c", "c2").unwrap(); + + assert!(wal.upper_bound(0, Bound::Included("a")).is_none()); + assert!(wal.upper_bound(0, Bound::Included("b")).is_none()); + assert!(wal.upper_bound(0, Bound::Included("c")).is_none()); + + let ent = wal.upper_bound(1, Bound::Included("a")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(2, Bound::Included("a")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(3, Bound::Included("a")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + let ent = wal.upper_bound(4, Bound::Included("a")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + let ent = wal.upper_bound(1, Bound::Included("b")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(2, Bound::Included("b")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(3, Bound::Included("b")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + let ent = wal.upper_bound(4, Bound::Included("b")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + let ent = wal.upper_bound(1, Bound::Included("c")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(2, Bound::Included("c")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(3, Bound::Included("c")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + let ent = wal.upper_bound(4, Bound::Included("c")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + let ent = wal.upper_bound(1, Bound::Included("d")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(2, Bound::Included("d")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(3, Bound::Included("d")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + let ent = wal.upper_bound(4, Bound::Included("d")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); +} + +fn lt(wal: &mut multiple_version::OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + wal.insert(1, "a", "a1").unwrap(); + wal.insert(3, "a", "a2").unwrap(); + wal.insert(1, "c", "c1").unwrap(); + wal.insert(3, "c", "c2").unwrap(); + + assert!(wal.upper_bound(0, Bound::Excluded("a")).is_none()); + assert!(wal.upper_bound(0, Bound::Excluded("b")).is_none()); + assert!(wal.upper_bound(0, Bound::Excluded("c")).is_none()); + assert!(wal.upper_bound(1, Bound::Excluded("a")).is_none()); + assert!(wal.upper_bound(2, Bound::Excluded("a")).is_none()); + + let ent = wal.upper_bound(1, Bound::Excluded("b")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(2, Bound::Excluded("b")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(3, Bound::Excluded("b")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + let ent = wal.upper_bound(4, Bound::Excluded("b")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + let ent = wal.upper_bound(1, Bound::Excluded("c")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(2, Bound::Excluded("c")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a1"); + assert_eq!(ent.raw_value(), b"a1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(3, Bound::Excluded("c")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + let ent = wal.upper_bound(4, Bound::Excluded("c")).unwrap(); + assert_eq!(ent.key(), "a"); + assert_eq!(ent.raw_key(), b"a"); + assert_eq!(ent.value(), "a2"); + assert_eq!(ent.raw_value(), b"a2"); + assert_eq!(ent.version(), 3); + + let ent = wal.upper_bound(1, Bound::Excluded("d")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(2, Bound::Excluded("d")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c1"); + assert_eq!(ent.raw_value(), b"c1"); + assert_eq!(ent.version(), 1); + + let ent = wal.upper_bound(3, Bound::Excluded("d")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); + + let ent = wal.upper_bound(4, Bound::Excluded("d")).unwrap(); + assert_eq!(ent.key(), "c"); + assert_eq!(ent.raw_key(), b"c"); + assert_eq!(ent.value(), "c2"); + assert_eq!(ent.raw_value(), b"c2"); + assert_eq!(ent.version(), 3); +} + +#[allow(clippy::needless_borrows_for_generic_args)] +fn insert(wal: &mut OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + let people = (0..100) + .map(|_| { + let p = Person::random(); + let v = std::format!("My name is {}", p.name); + wal.insert(0, &p, &v).unwrap(); + (p, v) + }) + .collect::>(); + + for (p, pv) in &people { + assert!(wal.contains_key(0, p)); + + assert_eq!(wal.get(0, p).unwrap().value(), pv); + } +} + +fn insert_with_value_builder(wal: &mut OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + let people = (0..100) + .map(|_| { + let p = Person::random(); + let v = std::format!("My name is {}", p.name); + wal + .insert_with_value_builder( + 0, + &p, + ValueBuilder::once(v.len(), |buf: &mut VacantBuffer<'_>| { + buf.put_slice(v.as_bytes()).map(|_| v.len()) + }), + ) + .unwrap(); + (p, v) + }) + .collect::>(); + + for (p, _) in &people { + assert!(wal.contains_key(0, p)); + assert!(wal.contains_key(0, &p.as_ref())); + } +} + +fn insert_with_key_builder(wal: &mut OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + let people = (0..100) + .map(|_| { + let p = Person::random(); + let v = std::format!("My name is {}", p.name); + wal + .insert_with_key_builder( + 0, + KeyBuilder::once(p.encoded_len(), |buf| p.encode_to_buffer(buf)), + &v, + ) + .unwrap(); + (p, v) + }) + .collect::>(); + + for (p, pv) in &people { + assert!(wal.contains_key(0, p)); + assert_eq!(wal.get(0, p).unwrap().value(), pv); + } +} + +fn insert_with_bytes(wal: &mut OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + let people = (0..100) + .map(|_| { + let p = Person::random(); + let v = std::format!("My name is {}", p.name); + unsafe { + wal + .insert( + 0, + MaybeStructured::from_slice(p.to_vec().as_slice()), + MaybeStructured::from_slice(v.as_bytes()), + ) + .unwrap(); + } + (p, v) + }) + .collect::>(); + + for (p, pv) in &people { + assert!(wal.contains_key(0, p)); + assert!(wal.contains_key(0, &p.as_ref())); + assert_eq!(wal.get(0, p).unwrap().value(), pv); + } +} + +fn insert_with_builders(wal: &mut OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + let people = (0..1) + .map(|_| { + let p = Person::random(); + let pvec = p.to_vec(); + let v = std::format!("My name is {}", p.name); + wal + .insert_with_builders( + 0, + KeyBuilder::new(pvec.len(), |buf: &mut VacantBuffer<'_>| { + p.encode_to_buffer(buf) + }), + ValueBuilder::new(v.len(), |buf: &mut VacantBuffer<'_>| { + buf.put_slice(v.as_bytes()).map(|_| v.len()) + }), + ) + .unwrap(); + (p, pvec, v) + }) + .collect::>(); + + for (p, pvec, pv) in &people { + assert!(wal.contains_key(0, p)); + assert!(wal.contains_key_versioned(0, p)); + assert_eq!(wal.get(0, p).unwrap().value(), pv); + assert_eq!(wal.get_versioned(0, p).unwrap().value().unwrap(), pv); + + unsafe { + assert!(wal.contains_key_by_bytes(0, pvec)); + assert!(wal.contains_key_versioned_by_bytes(0, pvec)); + assert_eq!(wal.get_by_bytes(0, pvec.as_ref()).unwrap().value(), pv); + assert_eq!( + wal + .get_versioned_by_bytes(0, pvec) + .unwrap() + .value() + .unwrap(), + pv + ); + } + } +} diff --git a/src/swmr/tests/multiple_version_insert.rs b/src/swmr/tests/multiple_version_insert.rs new file mode 100644 index 0000000..8d295f2 --- /dev/null +++ b/src/swmr/tests/multiple_version_insert.rs @@ -0,0 +1,549 @@ +use dbutils::{buffer::VacantBuffer, types::MaybeStructured}; +use multiple_version::{Reader, Writer}; + +use crate::{ + batch::BatchEntry, + memtable::{ + alternative::{MultipleVersionTable, TableOptions}, + MultipleVersionMemtable, VersionedMemtableEntry, + }, + types::{KeyBuilder, ValueBuilder}, + Builder, +}; + +use super::*; + +#[cfg(feature = "std")] +fn concurrent_basic(mut w: OrderWal) +where + M: MultipleVersionMemtable + Send + 'static, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + let readers = (0..100u32).map(|i| (i, w.reader())).collect::>(); + + let handles = readers.into_iter().map(|(i, reader)| { + spawn(move || loop { + if let Some(p) = reader.get(0, &i) { + assert_eq!(p.key(), &i); + assert_eq!(p.value(), &i.to_le_bytes()); + break; + } + }) + }); + + spawn(move || { + for i in 0..100u32 { + #[allow(clippy::needless_borrows_for_generic_args)] + w.insert(0, &i, &i.to_le_bytes()).unwrap(); + } + }); + + for handle in handles { + handle.join().unwrap(); + } +} + +#[cfg(feature = "std")] +fn concurrent_one_key(mut w: OrderWal) +where + M: MultipleVersionMemtable + Send + 'static, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + let readers = (0..100u32).map(|i| (i, w.reader())).collect::>(); + let handles = readers.into_iter().map(|(_, reader)| { + spawn(move || loop { + if let Some(p) = reader.get(0, &1) { + assert_eq!(p.key(), &1); + assert_eq!(p.value(), &1u32.to_le_bytes()); + break; + } + }) + }); + + w.insert(0, &1, &1u32.to_le_bytes()).unwrap(); + + for handle in handles { + handle.join().unwrap(); + } +} + +fn insert_batch(mut wal: OrderWal) -> (Person, Vec<(Person, String)>, Person) +where + M: MultipleVersionMemtable + Send + 'static, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + const N: u32 = 5; + + let mut batch = vec![]; + let output = (0..N) + .map(|i| { + ( + { + let mut p = Person::random(); + p.id = i as u64; + p + }, + std::format!("My id is {i}"), + ) + .clone() + }) + .collect::>(); + + for (person, val) in output.iter() { + batch.push(BatchEntry::with_version( + 0, + MaybeStructured::from(person), + MaybeStructured::from(val), + )); + } + + let rp1 = Person::random(); + wal.insert(0, &rp1, &"rp1".to_string()).unwrap(); + wal.insert_batch(&mut batch).unwrap(); + let rp2 = Person::random(); + wal.insert(0, &rp2, &"rp2".to_string()).unwrap(); + + for (p, val) in output.iter() { + assert_eq!(wal.get(0, p).unwrap().value(), val); + } + + assert_eq!(wal.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(0, &rp2).unwrap().value(), "rp2"); + + let wal = wal.reader(); + for (p, val) in output.iter() { + assert_eq!(wal.get(0, p).unwrap().value(), val); + } + + assert_eq!(wal.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(0, &rp2).unwrap().value(), "rp2"); + + (rp1, output, rp2) +} + +fn insert_batch_with_key_builder( + mut wal: OrderWal, +) -> (Person, Vec<(Person, String)>, Person) +where + M: MultipleVersionMemtable + Send + 'static, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + const N: u32 = 5; + + let mut batch = vec![]; + let output = (0..N) + .map(|i| { + ( + { + let mut p = Person::random(); + p.id = i as u64; + p + }, + std::format!("My id is {i}"), + ) + .clone() + }) + .collect::>(); + + for (person, val) in output.iter() { + batch.push(BatchEntry::with_version( + 0, + KeyBuilder::new(person.encoded_len(), |buf: &mut VacantBuffer<'_>| { + buf.set_len(person.encoded_len()); + person.encode(buf) + }), + MaybeStructured::from(val), + )); + } + + let rp1 = Person::random(); + wal.insert(0, &rp1, &"rp1".to_string()).unwrap(); + wal.insert_batch_with_key_builder(&mut batch).unwrap(); + let rp2 = Person::random(); + wal.insert(0, &rp2, &"rp2".to_string()).unwrap(); + + for (p, val) in output.iter() { + assert_eq!(wal.get(0, p).unwrap().value(), val); + } + + assert_eq!(wal.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(0, &rp2).unwrap().value(), "rp2"); + + let wal = wal.reader(); + for (p, val) in output.iter() { + assert_eq!(wal.get(0, p).unwrap().value(), val); + } + + assert_eq!(wal.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(0, &rp2).unwrap().value(), "rp2"); + + (rp1, output, rp2) +} + +fn insert_batch_with_value_builder( + mut wal: OrderWal, +) -> (Person, Vec<(Person, String)>, Person) +where + M: MultipleVersionMemtable + Send + 'static, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + const N: u32 = 5; + + let mut batch = vec![]; + let output = (0..N) + .map(|i| { + ( + { + let mut p = Person::random(); + p.id = i as u64; + p + }, + std::format!("My id is {i}"), + ) + .clone() + }) + .collect::>(); + + for (person, val) in output.iter() { + batch.push(BatchEntry::with_version( + 0, + person.into(), + ValueBuilder::new(val.len(), |buf: &mut VacantBuffer<'_>| { + buf.put_slice(val.as_bytes()).map(|_| val.len()) + }), + )); + } + + let rp1 = Person::random(); + wal.insert(0, &rp1, &"rp1".to_string()).unwrap(); + wal.insert_batch_with_value_builder(&mut batch).unwrap(); + let rp2 = Person::random(); + wal.insert(0, &rp2, &"rp2".to_string()).unwrap(); + + for (p, val) in output.iter() { + assert_eq!(wal.get(0, p).unwrap().value(), val); + } + + assert_eq!(wal.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(0, &rp2).unwrap().value(), "rp2"); + + let wal = wal.reader(); + for (p, val) in output.iter() { + assert_eq!(wal.get(0, p).unwrap().value(), val); + } + + assert_eq!(wal.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(0, &rp2).unwrap().value(), "rp2"); + + (rp1, output, rp2) +} + +fn insert_batch_with_builders( + mut wal: OrderWal, +) -> (Person, Vec<(Person, String)>, Person) +where + M: MultipleVersionMemtable + Send + 'static, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + const N: u32 = 5; + + let mut batch = vec![]; + let output = (0..N) + .map(|i| { + ( + { + let mut p = Person::random(); + p.id = i as u64; + p + }, + std::format!("My id is {i}"), + ) + .clone() + }) + .collect::>(); + + for (person, val) in output.iter() { + batch.push(BatchEntry::with_version( + 0, + KeyBuilder::new(person.encoded_len(), |buf: &mut VacantBuffer<'_>| { + buf.set_len(person.encoded_len()); + person.encode(buf) + }), + ValueBuilder::new(val.len(), |buf: &mut VacantBuffer<'_>| { + buf.put_slice(val.as_bytes()).map(|_| val.len()) + }), + )); + } + + let rp1 = Person::random(); + wal.insert(0, &rp1, &"rp1".to_string()).unwrap(); + wal.insert_batch_with_builders(&mut batch).unwrap(); + let rp2 = Person::random(); + wal.insert(0, &rp2, &"rp2".to_string()).unwrap(); + + for (p, val) in output.iter() { + assert_eq!(wal.get(0, p).unwrap().value(), val); + } + + assert_eq!(wal.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(0, &rp2).unwrap().value(), "rp2"); + + let wal = wal.reader(); + for (p, val) in output.iter() { + assert_eq!(wal.get(0, p).unwrap().value(), val); + } + + assert_eq!(wal.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(0, &rp2).unwrap().value(), "rp2"); + + (rp1, output, rp2) +} + +fn insert_batch_with_tombstone( + mut wal: OrderWal, +) -> (Person, Vec<(Person, String)>, Person) +where + M: MultipleVersionMemtable + Send + 'static, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a>, + M::Error: std::fmt::Debug, +{ + const N: u32 = 5; + + let mut batch = vec![]; + let output = (0..N) + .map(|i| { + ( + { + let mut p = Person::random(); + p.id = i as u64; + p + }, + std::format!("My id is {i}"), + ) + .clone() + }) + .collect::>(); + + for (person, val) in output.iter() { + batch.push(BatchEntry::with_version( + 0, + MaybeStructured::from(person), + MaybeStructured::from(val), + )); + + batch.push(BatchEntry::tombstone_with_version( + 1, + MaybeStructured::from(person), + )); + } + + let rp1 = Person::random(); + wal.insert(0, &rp1, &"rp1".to_string()).unwrap(); + wal.insert_batch(&mut batch).unwrap(); + let rp2 = Person::random(); + wal.insert(0, &rp2, &"rp2".to_string()).unwrap(); + + for (p, val) in output.iter() { + assert_eq!(wal.get(0, p).unwrap().value(), val); + } + + for (p, _) in output.iter() { + assert!(wal.get(1, p).is_none()); + } + + assert_eq!(wal.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(0, &rp2).unwrap().value(), "rp2"); + + let wal = wal.reader(); + for (p, val) in output.iter() { + assert_eq!(wal.get(0, p).unwrap().value(), val); + } + + assert_eq!(wal.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(wal.get(0, &rp2).unwrap().value(), "rp2"); + + (rp1, output, rp2) +} + +#[cfg(feature = "std")] +expand_unit_tests!( + move "linked": MultipleVersionOrderWalAlternativeTable [TableOptions::Linked]: MultipleVersionTable<_, _> { + concurrent_basic |p, _res| { + let wal = unsafe { Builder::new().map::, _>(p).unwrap() }; + + for i in 0..100u32 { + assert!(wal.contains_key(0, &i)); + } + }, + concurrent_one_key |p, _res| { + let wal = unsafe { Builder::new().map::, _>(p).unwrap() }; + assert!(wal.contains_key(0, &1)); + }, + } +); + +#[cfg(feature = "std")] +expand_unit_tests!( + move "linked": MultipleVersionOrderWalAlternativeTable [TableOptions::Linked]: MultipleVersionTable<_, _> { + insert_batch |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(0, &p).unwrap().value(), &val); + } + assert_eq!(map.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(0, &rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_tombstone |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(0, &p).unwrap().value(), &val); + assert!(map.get(1, &p).is_none()); + } + assert_eq!(map.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(0, &rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_key_builder |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::>::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(0, &p).unwrap().value(), &val); + } + assert_eq!(map.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(0, &rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_value_builder |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(0, &p).unwrap().value(), &val); + } + assert_eq!(map.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(0, &rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_builders |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(0, &p).unwrap().value(), &val); + } + assert_eq!(map.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(0, &rp2).unwrap().value(), "rp2"); + } + } +); + +#[cfg(feature = "std")] +expand_unit_tests!( + move "arena": MultipleVersionOrderWalAlternativeTable [TableOptions::Arena(Default::default())]: MultipleVersionTable<_, _> { + concurrent_basic |p, _res| { + let wal = unsafe { Builder::new().map::, _>(p).unwrap() }; + + for i in 0..100u32 { + assert!(wal.contains_key(0, &i)); + } + }, + concurrent_one_key |p, _res| { + let wal = unsafe { Builder::new().map::, _>(p).unwrap() }; + assert!(wal.contains_key(0, &1)); + }, + } +); + +expand_unit_tests!( + move "arena": MultipleVersionOrderWalAlternativeTable [TableOptions::Arena(Default::default())]: MultipleVersionTable<_, _> { + insert_batch |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(0, &p).unwrap().value(), &val); + } + assert_eq!(map.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(0, &rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_tombstone |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(0, &p).unwrap().value(), &val); + assert!(map.get(1, &p).is_none()); + } + assert_eq!(map.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(0, &rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_key_builder |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(0, &p).unwrap().value(), &val); + } + assert_eq!(map.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(0, &rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_value_builder |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(0, &p).unwrap().value(), &val); + } + assert_eq!(map.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(0, &rp2).unwrap().value(), "rp2"); + }, + insert_batch_with_builders |p, (rp1, data, rp2)| { + let map = unsafe { + Builder::new() + .map::, _>(&p) + .unwrap() + }; + + for (p, val) in data { + assert_eq!(map.get(0, &p).unwrap().value(), &val); + } + assert_eq!(map.get(0, &rp1).unwrap().value(), "rp1"); + assert_eq!(map.get(0, &rp2).unwrap().value(), "rp2"); + } + } +); diff --git a/src/swmr/tests/multiple_version_iters.rs b/src/swmr/tests/multiple_version_iters.rs new file mode 100644 index 0000000..5b4a074 --- /dev/null +++ b/src/swmr/tests/multiple_version_iters.rs @@ -0,0 +1,556 @@ +use core::ops::Bound; + +use crate::memtable::{ + alternative::{MultipleVersionTable, TableOptions}, + MultipleVersionMemtable, VersionedMemtableEntry, +}; +use multiple_version::{Reader, Writer}; + +use super::*; + +#[cfg(feature = "std")] +expand_unit_tests!("linked": MultipleVersionOrderWalAlternativeTable [TableOptions::Linked]: MultipleVersionTable<_, _> { + iter_all_versions_mvcc, +}); + +expand_unit_tests!("arena": MultipleVersionOrderWalAlternativeTable [TableOptions::Arena(Default::default())]: MultipleVersionTable<_, _> { + iter_all_versions_mvcc, +}); + +#[cfg(feature = "std")] +expand_unit_tests!("linked": MultipleVersionOrderWalAlternativeTable [TableOptions::Linked]: MultipleVersionTable<_, _> { + iter_next, + iter_all_versions_next_by_entry, + iter_all_versions_next_by_versioned_entry, + range_next, + iter_prev, + range_prev, + iter_all_versions_prev_by_entry, + iter_all_versions_prev_by_versioned_entry, +}); + +macro_rules! arena_builder { + () => {{ + crate::Builder::new() + .with_memtable_options( + crate::memtable::arena::TableOptions::new() + .with_capacity(1024 * 1024) + .into(), + ) + .with_capacity(8 * 1024) + }}; +} + +expand_unit_tests!("arena": MultipleVersionOrderWalAlternativeTable [TableOptions::Arena(Default::default())]: MultipleVersionTable<_, _> { + iter_next(arena_builder!()), + iter_all_versions_next_by_entry(arena_builder!()), + iter_all_versions_next_by_versioned_entry(arena_builder!()), + range_next(arena_builder!()), + iter_prev(arena_builder!()), + range_prev(arena_builder!()), + iter_all_versions_prev_by_entry(arena_builder!()), + iter_all_versions_prev_by_versioned_entry(arena_builder!()), +}); + +fn make_int_key(i: usize) -> String { + ::std::format!("{:05}", i) +} + +fn make_value(i: usize) -> String { + ::std::format!("v{:05}", i) +} + +fn iter_all_versions_mvcc(wal: &mut multiple_version::OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + wal.insert(1, "a", "a1").unwrap(); + wal.insert(3, "a", "a2").unwrap(); + wal.insert(1, "c", "c1").unwrap(); + wal.insert(3, "c", "c2").unwrap(); + + let mut iter = wal.iter_all_versions(0); + let mut num = 0; + while iter.next().is_some() { + num += 1; + } + assert_eq!(num, 0); + + let mut iter = wal.iter_all_versions(1); + let mut num = 0; + while iter.next().is_some() { + num += 1; + } + assert_eq!(num, 2); + + let mut iter = wal.iter_all_versions(2); + let mut num = 0; + while iter.next().is_some() { + num += 1; + } + assert_eq!(num, 2); + + let mut iter = wal.iter_all_versions(3); + let mut num = 0; + while iter.next().is_some() { + num += 1; + } + assert_eq!(num, 4); + + let upper_bound = wal.upper_bound(1, Bound::Included("b")).unwrap(); + assert_eq!(upper_bound.value(), "a1"); + + let upper_bound = wal.upper_bound_versioned(1, Bound::Included("b")).unwrap(); + assert_eq!(upper_bound.value().unwrap(), "a1"); + + let upper_bound = unsafe { wal.upper_bound_by_bytes(1, Bound::Included(b"b")).unwrap() }; + assert_eq!(upper_bound.value(), "a1"); + + let upper_bound = unsafe { + wal + .upper_bound_versioned_by_bytes(1, Bound::Included(b"b")) + .unwrap() + }; + assert_eq!(upper_bound.value().unwrap(), "a1"); + + let lower_bound = wal.lower_bound(1, Bound::Included("b")).unwrap(); + assert_eq!(lower_bound.value(), "c1"); + + let lower_bound = wal.lower_bound_versioned(1, Bound::Included("b")).unwrap(); + assert_eq!(lower_bound.value().unwrap(), "c1"); + + let lower_bound = unsafe { wal.lower_bound_by_bytes(1, Bound::Included(b"b")).unwrap() }; + assert_eq!(lower_bound.value(), "c1"); + + let lower_bound = unsafe { + wal + .lower_bound_versioned_by_bytes(1, Bound::Included(b"b")) + .unwrap() + }; + assert_eq!(lower_bound.value().unwrap(), "c1"); +} + +fn iter_next(wal: &mut multiple_version::OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + const N: usize = 100; + + for i in (0..N).rev() { + wal.insert(0, &make_int_key(i), &make_value(i)).unwrap(); + } + + let iter = wal.iter_all_versions(0); + + let mut i = 0; + for ent in iter { + assert_eq!(ent.key(), make_int_key(i).as_str()); + assert_eq!(ent.raw_key(), make_int_key(i).as_bytes()); + assert_eq!(ent.value().unwrap(), make_value(i).as_str()); + assert_eq!(ent.raw_value().unwrap(), make_value(i).as_bytes()); + i += 1; + } + + assert_eq!(i, N); + + let iter = wal.iter(0); + let mut i = 0; + for ent in iter { + assert_eq!(ent.key(), make_int_key(i).as_str()); + assert_eq!(ent.raw_key(), make_int_key(i).as_bytes()); + assert_eq!(ent.value(), make_value(i).as_str()); + assert_eq!(ent.raw_value(), make_value(i).as_bytes()); + i += 1; + } + + assert_eq!(i, N); + + let iter = wal.values(0); + + let mut i = 0; + for ent in iter { + assert_eq!(ent.value(), make_value(i).as_str()); + assert_eq!(ent.raw_value(), make_value(i).as_bytes()); + i += 1; + } + + assert_eq!(i, N); + + let iter = wal.keys(0); + let mut i = 0; + for ent in iter { + assert_eq!(ent.key(), make_int_key(i).as_str()); + assert_eq!(ent.raw_key(), make_int_key(i).as_bytes()); + i += 1; + } + + assert_eq!(i, N); +} + +fn iter_all_versions_next_by_entry(wal: &mut multiple_version::OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + const N: usize = 100; + + for i in (0..N).rev() { + wal.insert(0, &make_int_key(i), &make_value(i)).unwrap(); + } + + let mut ent = wal.first(0).clone(); + #[cfg(feature = "std")] + std::println!("{ent:?}"); + let mut i = 0; + while let Some(ref mut entry) = ent { + assert_eq!(entry.key(), make_int_key(i).as_str()); + assert_eq!(entry.value(), make_value(i).as_str()); + ent = entry.next(); + i += 1; + } + assert_eq!(i, N); + + let mut ent = wal.keys(0).next().clone(); + #[cfg(feature = "std")] + std::println!("{ent:?}"); + + let mut i = 0; + while let Some(ref mut entry) = ent { + assert_eq!(entry.key(), make_int_key(i).as_str()); + ent = entry.next(); + i += 1; + } + assert_eq!(i, N); + + let mut ent = wal.values(0).next().clone(); + #[cfg(feature = "std")] + std::println!("{ent:?}"); + + let mut i = 0; + while let Some(ref mut entry) = ent { + assert_eq!(entry.value(), make_value(i).as_str()); + ent = entry.next(); + i += 1; + } + assert_eq!(i, N); +} + +fn iter_all_versions_next_by_versioned_entry( + wal: &mut multiple_version::OrderWal, +) where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, + for<'a> M::VersionedItem<'a>: std::fmt::Debug, +{ + const N: usize = 100; + + for i in 0..N { + let k = make_int_key(i); + let v = make_value(i); + wal.insert(0, &k, &v).unwrap(); + wal.remove(1, &k).unwrap(); + } + + let mut ent = wal.first(0).clone(); + let mut i = 0; + while let Some(ref mut entry) = ent { + assert_eq!(entry.key(), make_int_key(i).as_str()); + assert_eq!(entry.value(), make_value(i).as_str()); + ent = entry.next(); + i += 1; + } + assert_eq!(i, N); + + let mut ent = wal.first_versioned(1).clone(); + #[cfg(feature = "std")] + std::println!("{ent:?}"); + let mut i = 0; + while let Some(ref mut entry) = ent { + if i % 2 == 1 { + assert_eq!(entry.version(), 0); + assert_eq!(entry.key(), make_int_key(i / 2).as_str()); + assert_eq!(entry.value().unwrap(), make_value(i / 2).as_str()); + } else { + assert_eq!(entry.version(), 1); + assert_eq!(entry.key(), make_int_key(i / 2).as_str()); + assert!(entry.value().is_none()); + } + + ent = entry.next(); + i += 1; + } + assert_eq!(i, N * 2); + let ent = wal.first(1); + assert!(ent.is_none()); +} + +fn range_next(wal: &mut multiple_version::OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + const N: usize = 100; + + for i in (0..N).rev() { + wal.insert(0, &make_int_key(i), &make_value(i)).unwrap(); + } + + let upper = make_int_key(50); + let mut i = 0; + let mut iter = wal.range(0, ..=upper.as_str()); + for ent in &mut iter { + assert_eq!(ent.key(), make_int_key(i).as_str()); + assert_eq!(ent.raw_key(), make_int_key(i).as_bytes()); + assert_eq!(ent.value(), make_value(i).as_str()); + assert_eq!(ent.raw_value(), make_value(i).as_bytes()); + i += 1; + } + + assert_eq!(i, 51); + + let mut i = 0; + let mut iter = wal.range_all_versions(0, ..=upper.as_str()); + for ent in &mut iter { + assert_eq!(ent.key(), make_int_key(i).as_str()); + assert_eq!(ent.raw_key(), make_int_key(i).as_bytes()); + assert_eq!(ent.value().unwrap(), make_value(i).as_str()); + assert_eq!(ent.raw_value().unwrap(), make_value(i).as_bytes()); + i += 1; + } + + assert_eq!(i, 51); + + let mut i = 0; + let mut iter = wal.range_keys(0, ..=upper.as_str()); + for ent in &mut iter { + assert_eq!(ent.key(), make_int_key(i).as_str()); + assert_eq!(ent.raw_key(), make_int_key(i).as_bytes()); + i += 1; + } + + assert_eq!(i, 51); + + let mut i = 0; + let mut iter = wal.range_values(0, ..=upper.as_str()); + for ent in &mut iter { + assert_eq!(ent.value(), make_value(i).as_str()); + assert_eq!(ent.raw_value(), make_value(i).as_bytes()); + i += 1; + } + assert_eq!(i, 51); +} + +fn iter_prev(wal: &mut multiple_version::OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + const N: usize = 100; + + for i in 0..N { + wal.insert(0, &make_int_key(i), &make_value(i)).unwrap(); + } + + let iter = wal.iter_all_versions(0).rev(); + let mut i = N; + for ent in iter { + assert_eq!(ent.key(), make_int_key(i - 1).as_str()); + assert_eq!(ent.value().unwrap(), make_value(i - 1).as_str()); + i -= 1; + } + + assert_eq!(i, 0); + + let iter = wal.iter(0).rev(); + let mut i = N; + for ent in iter { + assert_eq!(ent.key(), make_int_key(i - 1).as_str()); + assert_eq!(ent.value(), make_value(i - 1).as_str()); + i -= 1; + } + + assert_eq!(i, 0); + + let iter = wal.values(0).rev(); + let mut i = N; + for ent in iter { + assert_eq!(ent.value(), make_value(i - 1).as_str()); + i -= 1; + } + + assert_eq!(i, 0); + + let iter = wal.keys(0).rev(); + let mut i = N; + for ent in iter { + assert_eq!(ent.key(), make_int_key(i - 1).as_str()); + i -= 1; + } + + assert_eq!(i, 0); +} + +fn iter_all_versions_prev_by_entry(wal: &mut multiple_version::OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + const N: usize = 100; + + for i in 0..N { + wal.insert(0, &make_int_key(i), &make_value(i)).unwrap(); + } + + let mut ent = wal.last(0); + + let mut i = 0; + while let Some(ref mut entry) = ent { + i += 1; + assert_eq!(entry.key(), make_int_key(N - i).as_str()); + assert_eq!(entry.value(), make_value(N - i).as_str()); + ent = entry.prev(); + } + assert_eq!(i, N); + + let mut ent = wal.values(0).next_back(); + + let mut i = 0; + while let Some(ref mut entry) = ent { + i += 1; + assert_eq!(entry.value(), make_value(N - i).as_str()); + ent = entry.prev(); + } + + assert_eq!(i, N); + + let mut ent = wal.keys(0).next_back(); + + let mut i = 0; + while let Some(ref mut entry) = ent { + i += 1; + assert_eq!(entry.key(), make_int_key(N - i).as_str()); + ent = entry.prev(); + } + + assert_eq!(i, N); +} + +fn iter_all_versions_prev_by_versioned_entry( + wal: &mut multiple_version::OrderWal, +) where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, + for<'a> M::VersionedItem<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + const N: usize = 100; + + for i in 0..N { + let k = make_int_key(i); + let v = make_value(i); + wal.insert(0, &k, &v).unwrap(); + wal.remove(1, &k).unwrap(); + } + + let mut ent = wal.last(0); + let mut i = 0; + while let Some(ref mut entry) = ent { + i += 1; + assert_eq!(entry.key(), make_int_key(N - i).as_str()); + assert_eq!(entry.value(), make_value(N - i).as_str()); + ent = entry.prev(); + } + assert_eq!(i, N); + + let mut ent = wal.last_versioned(1); + let mut i = 0; + while let Some(ref mut entry) = ent { + if i % 2 == 0 { + assert_eq!(entry.version(), 0); + assert_eq!(entry.key(), make_int_key(N - 1 - i / 2).as_str()); + assert_eq!(entry.value().unwrap(), make_value(N - 1 - i / 2).as_str()); + } else { + assert_eq!(entry.version(), 1); + assert_eq!(entry.key(), make_int_key(N - 1 - i / 2).as_str()); + assert!(entry.value().is_none()); + } + + ent = entry.prev(); + i += 1; + } + + assert_eq!(i, N * 2); + let ent = wal.last(1); + assert!(ent.is_none()); +} + +fn range_prev(wal: &mut multiple_version::OrderWal) +where + M: MultipleVersionMemtable + 'static, + M::Error: std::fmt::Debug, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a> + std::fmt::Debug, +{ + const N: usize = 100; + + for i in 0..N { + wal.insert(0, &make_int_key(i), &make_value(i)).unwrap(); + } + + let lower = make_int_key(50); + let it = wal.range(0, lower.as_str()..).rev(); + let mut i = N - 1; + + for ent in it { + assert_eq!(ent.key(), make_int_key(i).as_str()); + assert_eq!(ent.raw_key(), make_int_key(i).as_bytes()); + assert_eq!(ent.value(), make_value(i).as_str()); + assert_eq!(ent.raw_value(), make_value(i).as_bytes()); + assert_eq!(ent.version(), 0); + i -= 1; + } + + assert_eq!(i, 49); + + let it = wal.range_all_versions(0, lower.as_str()..).rev(); + let mut i = N - 1; + + for ent in it { + assert_eq!(ent.key(), make_int_key(i).as_str()); + assert_eq!(ent.raw_key(), make_int_key(i).as_bytes()); + assert_eq!(ent.value().unwrap(), make_value(i).as_str()); + assert_eq!(ent.raw_value().unwrap(), make_value(i).as_bytes()); + assert_eq!(ent.version(), 0); + i -= 1; + } + + assert_eq!(i, 49); + + let mut i = N - 1; + let mut iter = wal.range_keys(0, lower.as_str()..).rev(); + for ent in &mut iter { + assert_eq!(ent.key(), make_int_key(i).as_str()); + assert_eq!(ent.raw_key(), make_int_key(i).as_bytes()); + assert_eq!(ent.version(), 0); + i -= 1; + } + assert_eq!(i, 49); + + let mut i = N - 1; + let mut iter = wal.range_values(0, lower.as_str()..).rev(); + for ent in &mut iter { + assert_eq!(ent.value(), make_value(i).as_str()); + assert_eq!(ent.raw_value(), make_value(i).as_bytes()); + assert_eq!(ent.version(), 0); + i -= 1; + } + assert_eq!(i, 49); +} diff --git a/src/swmr/wal.rs b/src/swmr/wal.rs index b8dd40b..5c5ce8c 100644 --- a/src/swmr/wal.rs +++ b/src/swmr/wal.rs @@ -1,418 +1,77 @@ -use crossbeam_skiplist::SkipSet; +use core::marker::PhantomData; -use crate::{ - error::Error, - pointer::Pointer, - wal::sealed::{Constructor, Sealed, WalCore}, - Ascend, Options, -}; -use dbutils::checksum::{BuildChecksumer, Crc32}; -use rarena_allocator::{either::Either, Allocator}; - -pub use crate::{ - builder::Builder, - wal::{Batch, BatchWithBuilders, BatchWithKeyBuilder, BatchWithValueBuilder, ImmutableWal, Wal}, - Comparator, KeyBuilder, VacantBuffer, ValueBuilder, -}; -pub use dbutils::CheapClone; - -use core::{borrow::Borrow, marker::PhantomData, ops::Bound}; use rarena_allocator::sync::Arena; -use std::sync::Arc; - -mod reader; -pub use reader::*; - -mod iter; -pub use iter::*; - -#[cfg(all( - test, - any( - all_tests, - test_swmr_constructor, - test_swmr_insert, - test_swmr_get, - test_swmr_iters, - ) -))] -mod tests; - -#[doc(hidden)] -pub struct OrderWalCore { - arena: Arena, - map: SkipSet>, - opts: Options, - cmp: C, - cks: S, -} -impl OrderWalCore { - #[inline] - fn iter(&self) -> Iter<'_, C> { - Iter::new(self.map.iter()) - } -} +use crate::{memtable::BaseTable, sealed::Wal, Options}; -impl WalCore for OrderWalCore +pub struct OrderCore where - C: Comparator + CheapClone + Send + 'static, + K: ?Sized, + V: ?Sized, { - type Allocator = Arena; - type Base = SkipSet>; - type Pointer = Pointer; - - #[inline] - fn construct(arena: Arena, set: SkipSet>, opts: Options, cmp: C, cks: S) -> Self { - Self { - arena, - map: set, - cmp, - opts, - cks, - } - } -} - -/// A single writer multiple readers ordered write-ahead log implementation. -/// -/// Both read and write operations of this WAL are zero-cost (no allocation will happen for both read and write). -/// -/// Users can create multiple readers from the WAL by [`OrderWal::reader`], but only one writer is allowed. -// ```text -// +----------------------+--------------------------+--------------------+ -// | magic text (6 bytes) | magic version (2 bytes) | header (8 bytes) | -// +----------------------+--------------------------+--------------------+-----------------+--------------------+ -// | flag (1 byte) | klen & vlen (1-10 bytes) | key (n bytes) | value (n bytes) | checksum (8 bytes) | -// +----------------------+--------------------------+--------------------+-----------------|--------------------+ -// | flag (1 byte) | klen & vlen (1-10 bytes) | key (n bytes) | value (n bytes) | checksum (8 bytes) | -// +----------------------+--------------------------+--------------------+-----------------+--------------------+ -// | flag (1 byte) | klen & vlen (1-10 bytes) | key (n bytes) | value (n bytes) | checksum (8 bytes) | -// +----------------------+--------------------------+--------------------+-----------------+-----------------+--------------------+ -// | ... | ... | ... | ... | ... | ... | -// +----------------------+--------------------------+--------------------+-----------------+-----------------+--------------------+ -// | ... | ... | ... | ... | ... | ... | -// +----------------------+--------------------------+--------------------+-----------------+-----------------+--------------------+ -// ``` -pub struct OrderWal { - core: Arc>, - _s: PhantomData, + pub(super) arena: Arena, + pub(super) map: M, + pub(super) opts: Options, + pub(super) cks: S, + pub(super) _m: PhantomData<(fn() -> K, fn() -> V)>, } -impl Constructor for OrderWal +impl core::fmt::Debug for OrderCore where - C: Comparator + CheapClone + Send + 'static, + K: ?Sized, + V: ?Sized, { - type Allocator = Arena; - type Core = OrderWalCore; - type Pointer = Pointer; - - #[inline] - fn allocator(&self) -> &Self::Allocator { - &self.core.arena - } - #[inline] - fn from_core(core: Self::Core) -> Self { - Self { - core: Arc::new(core), - _s: PhantomData, - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("OrderCore") + .field("arena", &self.arena) + .field("options", &self.opts) + .finish() } } -impl Sealed for OrderWal +impl Wal for OrderCore where - C: Comparator + CheapClone + Send + 'static, + K: ?Sized, + V: ?Sized, + M: BaseTable, { - fn hasher(&self) -> &S { - &self.core.cks - } - - fn options(&self) -> &Options { - &self.core.opts - } - - fn comparator(&self) -> &C { - &self.core.cmp - } - - fn insert_pointer(&self, ptr: Pointer) - where - C: Comparator, - { - self.core.map.insert(ptr); - } - - fn insert_pointers(&self, ptrs: impl Iterator>) - where - C: Comparator, - { - for ptr in ptrs { - self.core.map.insert(ptr); - } - } -} - -impl OrderWal { - /// Returns the path of the WAL if it is backed by a file. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::{swmr::OrderWal, Wal, Builder}; - /// - /// // A in-memory WAL - /// let wal = Builder::new().with_capacity(100).alloc::().unwrap(); - /// - /// assert!(wal.path_buf().is_none()); - /// ``` - pub fn path_buf(&self) -> Option<&std::sync::Arc> { - self.core.arena.path() - } -} - -impl ImmutableWal for OrderWal -where - C: Comparator + CheapClone + Send + 'static, -{ - type Iter<'a> - = Iter<'a, C> - where - Self: 'a, - C: Comparator; - type Range<'a, Q, R> - = Range<'a, Q, R, C> - where - R: core::ops::RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - Self: 'a, - C: Comparator; - type Keys<'a> - = Keys<'a, C> - where - Self: 'a, - C: Comparator; - - type RangeKeys<'a, Q, R> - = RangeKeys<'a, Q, R, C> - where - R: core::ops::RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - Self: 'a, - C: Comparator; - - type Values<'a> - = Values<'a, C> - where - Self: 'a, - C: Comparator; - - type RangeValues<'a, Q, R> - = RangeValues<'a, Q, R, C> - where - R: core::ops::RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - Self: 'a, - C: Comparator; - - #[inline] - fn path(&self) -> Option<&std::path::Path> { - self.core.arena.path().map(|p| p.as_ref().as_path()) - } - - #[inline] - fn len(&self) -> usize { - self.core.map.len() - } + type Allocator = Arena; + type Memtable = M; #[inline] - fn maximum_key_size(&self) -> u32 { - self.core.opts.maximum_key_size() + fn memtable(&self) -> &Self::Memtable { + &self.map } #[inline] - fn maximum_value_size(&self) -> u32 { - self.core.opts.maximum_value_size() + fn memtable_mut(&mut self) -> &mut Self::Memtable { + &mut self.map } #[inline] - fn remaining(&self) -> u32 { - self.core.arena.remaining() as u32 + fn construct(arena: Self::Allocator, set: Self::Memtable, opts: Options, checksumer: S) -> Self { + Self { + arena, + map: set, + opts, + cks: checksumer, + _m: PhantomData, + } } #[inline] fn options(&self) -> &Options { - &self.core.opts - } - - #[inline] - fn contains_key(&self, key: &Q) -> bool - where - [u8]: Borrow, - Q: ?Sized + Ord, - C: Comparator, - { - self.core.map.contains(key) - } - - #[inline] - fn iter(&self) -> Self::Iter<'_> - where - C: Comparator, - { - self.core.iter() - } - - #[inline] - fn range(&self, range: R) -> Self::Range<'_, Q, R> - where - R: core::ops::RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + crossbeam_skiplist::Comparable<[u8]>, - C: Comparator, - { - Range::new(self.core.map.range(range)) - } - - #[inline] - fn keys(&self) -> Self::Keys<'_> - where - C: Comparator, - { - Keys::new(self.core.map.iter()) - } - - #[inline] - fn range_keys(&self, range: R) -> Self::RangeKeys<'_, Q, R> - where - R: core::ops::RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - C: Comparator, - { - RangeKeys::new(self.core.map.range(range)) - } - - #[inline] - fn values(&self) -> Self::Values<'_> - where - C: Comparator, - { - Values::new(self.core.map.iter()) + &self.opts } #[inline] - fn range_values(&self, range: R) -> Self::RangeValues<'_, Q, R> - where - R: core::ops::RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - C: Comparator, - { - RangeValues::new(self.core.map.range(range)) - } - - #[inline] - fn first(&self) -> Option<(&[u8], &[u8])> - where - C: Comparator, - { - self - .core - .map - .front() - .map(|ent| (ent.as_key_slice(), ent.as_value_slice())) - } - - #[inline] - fn last(&self) -> Option<(&[u8], &[u8])> - where - C: Comparator, - { - self - .core - .map - .back() - .map(|ent| (ent.as_key_slice(), ent.as_value_slice())) - } - - #[inline] - fn get(&self, key: &Q) -> Option<&[u8]> - where - [u8]: Borrow, - Q: ?Sized + Ord, - C: Comparator, - { - self.core.map.get(key).map(|ent| ent.as_value_slice()) - } - - #[inline] - fn upper_bound(&self, bound: Bound<&Q>) -> Option<&[u8]> - where - [u8]: Borrow, - Q: ?Sized + Ord, - C: Comparator, - { - self - .core - .map - .upper_bound(bound) - .map(|ent| ent.as_value_slice()) - } - - #[inline] - fn lower_bound(&self, bound: Bound<&Q>) -> Option<&[u8]> - where - [u8]: Borrow, - Q: ?Sized + Ord, - C: Comparator, - { - self - .core - .map - .lower_bound(bound) - .map(|ent| ent.as_value_slice()) + fn allocator(&self) -> &Self::Allocator { + &self.arena } -} - -impl Wal for OrderWal -where - C: Comparator + CheapClone + Send + 'static, -{ - type Reader = OrderWalReader; #[inline] - fn reader(&self) -> Self::Reader { - OrderWalReader::new(self.core.clone()) - } - - fn get_or_insert_with_value_builder( - &mut self, - key: &[u8], - vb: ValueBuilder) -> Result<(), E>>, - ) -> Result, Either> - where - C: Comparator + CheapClone, - S: BuildChecksumer, - { - self - .check( - key.len(), - vb.size() as usize, - self.maximum_key_size(), - self.maximum_value_size(), - self.read_only(), - ) - .map_err(Either::Right)?; - - if let Some(ent) = self.core.map.get(key) { - return Ok(Some(ent.as_value_slice())); - } - - self.insert_with_value_builder::(key, vb).map(|_| None) + fn hasher(&self) -> &S { + &self.cks } } diff --git a/src/swmr/wal/iter.rs b/src/swmr/wal/iter.rs deleted file mode 100644 index 1b08166..0000000 --- a/src/swmr/wal/iter.rs +++ /dev/null @@ -1,258 +0,0 @@ -use core::{borrow::Borrow, ops::RangeBounds}; - -use crossbeam_skiplist::Comparable; -use dbutils::Comparator; - -use super::Pointer; - -/// An iterator over the entries in the WAL. -pub struct Iter<'a, C> { - iter: crossbeam_skiplist::set::Iter<'a, Pointer>, -} - -impl<'a, C> Iter<'a, C> { - #[inline] - pub(super) fn new(iter: crossbeam_skiplist::set::Iter<'a, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, C: Comparator> Iterator for Iter<'a, C> { - type Item = (&'a [u8], &'a [u8]); - - #[inline] - fn next(&mut self) -> Option { - self - .iter - .next() - .map(|ptr| (ptr.as_key_slice(), ptr.as_value_slice())) - } -} - -impl DoubleEndedIterator for Iter<'_, C> { - #[inline] - fn next_back(&mut self) -> Option { - self - .iter - .next_back() - .map(|ptr| (ptr.as_key_slice(), ptr.as_value_slice())) - } -} - -/// An iterator over the keys in the WAL. -pub struct Keys<'a, C> { - iter: crossbeam_skiplist::set::Iter<'a, Pointer>, -} - -impl<'a, C> Keys<'a, C> { - #[inline] - pub(super) fn new(iter: crossbeam_skiplist::set::Iter<'a, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, C: Comparator> Iterator for Keys<'a, C> { - type Item = &'a [u8]; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| ptr.as_key_slice()) - } -} - -impl DoubleEndedIterator for Keys<'_, C> { - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| ptr.as_key_slice()) - } -} - -/// An iterator over the values in the WAL. -pub struct Values<'a, C> { - iter: crossbeam_skiplist::set::Iter<'a, Pointer>, -} - -impl<'a, C> Values<'a, C> { - #[inline] - pub(super) fn new(iter: crossbeam_skiplist::set::Iter<'a, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, C: Comparator> Iterator for Values<'a, C> { - type Item = &'a [u8]; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| ptr.as_value_slice()) - } -} - -impl DoubleEndedIterator for Values<'_, C> { - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| ptr.as_value_slice()) - } -} - -/// An iterator over a subset of the entries in the WAL. -pub struct Range<'a, Q, R, C> -where - C: Comparator, - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + Comparable<[u8]>, -{ - iter: crossbeam_skiplist::set::Range<'a, Q, R, Pointer>, -} - -impl<'a, Q, R, C> Range<'a, Q, R, C> -where - C: Comparator, - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + Comparable<[u8]>, -{ - #[inline] - pub(super) fn new(iter: crossbeam_skiplist::set::Range<'a, Q, R, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, Q, R, C> Iterator for Range<'a, Q, R, C> -where - C: Comparator, - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + Comparable<[u8]>, -{ - type Item = (&'a [u8], &'a [u8]); - - #[inline] - fn next(&mut self) -> Option { - self - .iter - .next() - .map(|ptr| (ptr.as_key_slice(), ptr.as_value_slice())) - } -} - -impl DoubleEndedIterator for Range<'_, Q, R, C> -where - C: Comparator, - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + Comparable<[u8]>, -{ - #[inline] - fn next_back(&mut self) -> Option { - self - .iter - .next_back() - .map(|ptr| (ptr.as_key_slice(), ptr.as_value_slice())) - } -} - -/// An iterator over the keys in a subset of the entries in the WAL. -pub struct RangeKeys<'a, Q, R, C> -where - C: Comparator, - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + Comparable<[u8]>, -{ - iter: crossbeam_skiplist::set::Range<'a, Q, R, Pointer>, -} - -impl<'a, Q, R, C> RangeKeys<'a, Q, R, C> -where - C: Comparator, - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + Comparable<[u8]>, -{ - #[inline] - pub(super) fn new(iter: crossbeam_skiplist::set::Range<'a, Q, R, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, Q, R, C> Iterator for RangeKeys<'a, Q, R, C> -where - C: Comparator, - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + Comparable<[u8]>, -{ - type Item = &'a [u8]; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| ptr.as_key_slice()) - } -} - -impl DoubleEndedIterator for RangeKeys<'_, Q, R, C> -where - C: Comparator, - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + Comparable<[u8]>, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| ptr.as_key_slice()) - } -} - -/// An iterator over the values in a subset of the entries in the WAL. -pub struct RangeValues<'a, Q, R, C> -where - C: Comparator, - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + Comparable<[u8]>, -{ - iter: crossbeam_skiplist::set::Range<'a, Q, R, Pointer>, -} - -impl<'a, Q, R, C> RangeValues<'a, Q, R, C> -where - C: Comparator, - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + Comparable<[u8]>, -{ - #[inline] - pub(super) fn new(iter: crossbeam_skiplist::set::Range<'a, Q, R, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, Q, R, C> Iterator for RangeValues<'a, Q, R, C> -where - C: Comparator, - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + Comparable<[u8]>, -{ - type Item = &'a [u8]; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| ptr.as_value_slice()) - } -} - -impl DoubleEndedIterator for RangeValues<'_, Q, R, C> -where - C: Comparator, - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized + Comparable<[u8]>, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| ptr.as_value_slice()) - } -} diff --git a/src/swmr/wal/reader.rs b/src/swmr/wal/reader.rs deleted file mode 100644 index 29533bf..0000000 --- a/src/swmr/wal/reader.rs +++ /dev/null @@ -1,192 +0,0 @@ -use super::*; - -/// An [`OrderWal`] reader. -pub struct OrderWalReader(OrderWal); - -impl OrderWalReader { - /// Creates a new read-only WAL reader. - #[inline] - pub(super) fn new(wal: Arc>) -> Self { - Self(OrderWal { - core: wal.clone(), - _s: PhantomData, - }) - } -} - -impl Constructor for OrderWalReader -where - C: Comparator + CheapClone + Send + 'static, -{ - type Allocator = Arena; - type Core = OrderWalCore; - type Pointer = Pointer; - - #[inline] - fn allocator(&self) -> &Self::Allocator { - self.0.allocator() - } - - fn from_core(core: Self::Core) -> Self { - Self(OrderWal { - core: Arc::new(core), - _s: PhantomData, - }) - } -} - -impl ImmutableWal for OrderWalReader -where - C: Comparator + CheapClone + Send + 'static, -{ - type Iter<'a> - = Iter<'a, C> - where - Self: 'a, - C: Comparator; - type Range<'a, Q, R> - = Range<'a, Q, R, C> - where - R: core::ops::RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - Self: 'a, - C: Comparator; - type Keys<'a> - = Keys<'a, C> - where - Self: 'a, - C: Comparator; - - type RangeKeys<'a, Q, R> - = RangeKeys<'a, Q, R, C> - where - R: core::ops::RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - Self: 'a, - C: Comparator; - - type Values<'a> - = Values<'a, C> - where - Self: 'a, - C: Comparator; - - type RangeValues<'a, Q, R> - = RangeValues<'a, Q, R, C> - where - R: core::ops::RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - Self: 'a, - C: Comparator; - - #[inline] - fn path(&self) -> Option<&std::path::Path> { - self.0.path() - } - - #[inline] - fn len(&self) -> usize { - self.0.len() - } - - #[inline] - fn options(&self) -> &Options { - ImmutableWal::options(&self.0) - } - - #[inline] - fn contains_key(&self, key: &Q) -> bool - where - [u8]: Borrow, - Q: ?Sized + Ord, - C: Comparator, - { - self.0.contains_key(key) - } - - #[inline] - fn iter(&self) -> Self::Iter<'_> - where - C: Comparator, - { - self.0.iter() - } - - #[inline] - fn range(&self, range: R) -> Self::Range<'_, Q, R> - where - R: core::ops::RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - C: Comparator, - { - self.0.range(range) - } - - #[inline] - fn keys(&self) -> Self::Keys<'_> - where - C: Comparator, - { - self.0.keys() - } - - #[inline] - fn range_keys(&self, range: R) -> Self::RangeKeys<'_, Q, R> - where - R: core::ops::RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - C: Comparator, - { - self.0.range_keys(range) - } - - #[inline] - fn values(&self) -> Self::Values<'_> - where - C: Comparator, - { - self.0.values() - } - - #[inline] - fn range_values(&self, range: R) -> Self::RangeValues<'_, Q, R> - where - R: core::ops::RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - C: Comparator, - { - self.0.range_values(range) - } - - #[inline] - fn first(&self) -> Option<(&[u8], &[u8])> - where - C: Comparator, - { - self.0.first() - } - - #[inline] - fn last(&self) -> Option<(&[u8], &[u8])> - where - C: Comparator, - { - self.0.last() - } - - #[inline] - fn get(&self, key: &Q) -> Option<&[u8]> - where - [u8]: Borrow, - Q: ?Sized + Ord, - C: Comparator, - { - self.0.get(key) - } -} diff --git a/src/swmr/wal/tests.rs b/src/swmr/wal/tests.rs deleted file mode 100644 index 80a65a8..0000000 --- a/src/swmr/wal/tests.rs +++ /dev/null @@ -1,15 +0,0 @@ -use super::*; - -#[cfg(all(test, any(test_swmr_constructor, all_tests)))] -mod constructor; - -#[cfg(all(test, any(test_swmr_insert, all_tests)))] -mod insert; - -#[cfg(all(test, any(test_swmr_iters, all_tests)))] -mod iter; - -#[cfg(all(test, any(test_swmr_get, all_tests)))] -mod get; - -const MB: u32 = 1024 * 1024; diff --git a/src/swmr/wal/tests/constructor.rs b/src/swmr/wal/tests/constructor.rs deleted file mode 100644 index c80352e..0000000 --- a/src/swmr/wal/tests/constructor.rs +++ /dev/null @@ -1,3 +0,0 @@ -use super::*; - -common_unittests!(swmr::constructor::OrderWal); diff --git a/src/swmr/wal/tests/get.rs b/src/swmr/wal/tests/get.rs deleted file mode 100644 index 0165171..0000000 --- a/src/swmr/wal/tests/get.rs +++ /dev/null @@ -1,3 +0,0 @@ -use super::*; - -common_unittests!(swmr::get::OrderWal); diff --git a/src/swmr/wal/tests/insert.rs b/src/swmr/wal/tests/insert.rs deleted file mode 100644 index 7d45755..0000000 --- a/src/swmr/wal/tests/insert.rs +++ /dev/null @@ -1,5 +0,0 @@ -use super::*; - -common_unittests!(swmr::insert::OrderWal); - -common_unittests!(swmr::insert_batch::OrderWal); diff --git a/src/swmr/wal/tests/iter.rs b/src/swmr/wal/tests/iter.rs deleted file mode 100644 index e42d2ed..0000000 --- a/src/swmr/wal/tests/iter.rs +++ /dev/null @@ -1,3 +0,0 @@ -use super::*; - -common_unittests!(swmr::iters::OrderWal); diff --git a/src/swmr/writer.rs b/src/swmr/writer.rs new file mode 100644 index 0000000..957a1f3 --- /dev/null +++ b/src/swmr/writer.rs @@ -0,0 +1,121 @@ +use crate::{ + memtable::{BaseTable, Memtable, MemtableEntry, MultipleVersionMemtable, VersionedMemtableEntry}, + sealed::{Constructable, WithVersion}, +}; +use dbutils::{checksum::Crc32, types::Type}; +use rarena_allocator::sync::Arena; +#[cfg(all(feature = "std", not(target_family = "wasm")))] +use rarena_allocator::Allocator; + +use std::sync::Arc; + +use super::{reader::OrderWalReader, wal::OrderCore}; + +/// A ordered write-ahead log implementation for concurrent thread environments. +pub struct OrderWal { + pub(super) core: Arc>, +} + +impl core::fmt::Debug for OrderWal +where + K: ?Sized, + V: ?Sized, +{ + #[inline] + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("OrderWal").field(&self.core).finish() + } +} + +unsafe impl Send for OrderWal {} +unsafe impl Sync for OrderWal {} + +impl OrderWal { + #[inline] + pub(super) const fn construct(core: Arc>) -> Self { + Self { core } + } +} + +impl Constructable for OrderWal +where + K: ?Sized + 'static, + V: ?Sized + 'static, + S: 'static, + M: BaseTable + 'static, +{ + type Allocator = Arena; + type Wal = OrderCore; + type Memtable = M; + type Checksumer = S; + type Reader = OrderWalReader; + + #[inline] + fn as_wal(&self) -> &Self::Wal { + &self.core + } + + #[inline] + fn from_core(core: Self::Wal) -> Self { + Self { + core: Arc::new(core), + } + } +} + +impl OrderWal +where + K: ?Sized + 'static, + V: ?Sized + 'static, + S: 'static, + M: BaseTable + 'static, +{ + /// Returns the path of the WAL if it is backed by a file. + /// + /// ## Example + /// + /// ```rust + /// use orderwal::{base::OrderWal, Builder}; + /// + /// // A in-memory WAL + /// let wal = Builder::new().with_capacity(100).alloc::>().unwrap(); + /// + /// assert!(wal.path_buf().is_none()); + /// ``` + #[cfg(all(feature = "std", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "std", not(target_family = "wasm")))))] + #[inline] + pub fn path_buf(&self) -> Option<&std::sync::Arc> { + self.as_wal().arena.path() + } +} + +impl crate::wal::base::Writer for OrderWal +where + K: ?Sized + Type + Ord + 'static, + V: ?Sized + Type + 'static, + M: Memtable + 'static, + for<'a> M::Item<'a>: MemtableEntry<'a>, + S: 'static, +{ + #[inline] + fn reader(&self) -> Self::Reader { + OrderWalReader::new(self.core.clone()) + } +} + +impl crate::wal::multiple_version::Writer for OrderWal +where + K: ?Sized + Type + Ord + 'static, + V: ?Sized + Type + 'static, + M: MultipleVersionMemtable + 'static, + for<'a> M::Item<'a>: VersionedMemtableEntry<'a>, + for<'a> M::VersionedItem<'a>: WithVersion, + for<'a> M::Item<'a>: WithVersion, + S: 'static, +{ + #[inline] + fn reader(&self) -> Self::Reader { + OrderWalReader::new(self.core.clone()) + } +} diff --git a/src/tests.rs b/src/tests.rs deleted file mode 100644 index effcec7..0000000 --- a/src/tests.rs +++ /dev/null @@ -1,1151 +0,0 @@ -use core::ops::Bound; - -use super::*; -use wal::{ImmutableWal, Wal}; - -const MB: usize = 1024 * 1024; - -macro_rules! expand_unit_tests { - ($wal:ident { $($name:ident), +$(,)? }) => { - $( - paste::paste! { - #[test] - fn [< test_ $name _inmemory >]() { - $crate::tests::$name(&mut $crate::Builder::new().with_capacity(MB).alloc::<$wal>().unwrap()); - } - - #[test] - fn [< test_ $name _map_anon >]() { - $crate::tests::$name(&mut $crate::Builder::new().with_capacity(MB).map_anon::<$wal>().unwrap()); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn [< test_ $name _map_file >]() { - let dir = ::tempfile::tempdir().unwrap(); - $crate::tests::$name( - &mut unsafe { $crate::Builder::new().with_create_new(true).with_read(true).with_write(true).with_capacity(MB as u32).map_mut::<$wal, _>( - dir.path().join(concat!("test_", stringify!($prefix), "_", stringify!($name), "_map_file")), - - ) - .unwrap() }, - ); - } - } - )* - }; -} - -macro_rules! common_unittests { - ($prefix:ident::insert::$wal:ty) => { - paste::paste! { - #[test] - fn test_insert_to_full_inmemory() { - $crate::tests::insert_to_full(&mut $crate::Builder::new().with_capacity(100).alloc::<$wal>().unwrap()); - } - - #[test] - fn test_insert_to_full_map_anon() { - $crate::tests::insert_to_full(&mut $crate::Builder::new().with_capacity(100).map_anon::<$wal>().unwrap()); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_insert_to_full_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - $crate::tests::insert_to_full( - &mut unsafe { - $crate::Builder::new() - .with_capacity(100) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut::<$wal, _>( - dir.path().join(concat!("test_", stringify!($prefix), "_insert_to_full_map_file")), - ) - .unwrap() - }, - ); - } - - #[test] - fn test_insert_inmemory() { - $crate::tests::insert(&mut $crate::Builder::new().with_capacity(MB).alloc::<$wal>().unwrap()); - } - - #[test] - fn test_insert_map_anon() { - $crate::tests::insert(&mut $crate::Builder::new().with_capacity(MB).map_anon::<$wal>().unwrap()); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_insert_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - $crate::tests::insert( - &mut unsafe { $crate::Builder::new().with_create_new(true).with_read(true).with_write(true).with_capacity(MB as u32).map_mut::<$wal, _>( - dir.path().join(concat!("test_", stringify!($prefix), "_insert_map_file")), - ) - .unwrap() }, - ); - } - - #[test] - fn test_insert_with_key_builder_inmemory() { - $crate::tests::insert_with_key_builder(&mut $crate::Builder::new().with_capacity(MB).alloc::<$wal>().unwrap()); - } - - #[test] - fn test_insert_with_key_builder_map_anon() { - $crate::tests::insert_with_key_builder(&mut $crate::Builder::new().with_capacity(MB).map_anon::<$wal>().unwrap()); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_insert_with_key_builder_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - $crate::tests::insert_with_key_builder( - &mut unsafe { $crate::Builder::new().with_create_new(true).with_read(true).with_write(true).with_capacity(MB as u32).map_mut::<$wal, _>( - dir.path().join(concat!("test_", stringify!($prefix), "_insert_with_key_builder_map_file")), - - ) - .unwrap() }, - ); - } - - #[test] - fn test_insert_with_value_builder_inmemory() { - $crate::tests::insert_with_value_builder(&mut $crate::Builder::new().with_capacity(MB).alloc::<$wal>().unwrap()); - } - - #[test] - fn test_insert_with_value_builder_map_anon() { - $crate::tests::insert_with_value_builder(&mut $crate::Builder::new().with_capacity(MB).map_anon::<$wal>().unwrap()); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_insert_with_value_builder_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - $crate::tests::insert_with_value_builder( - &mut unsafe { $crate::Builder::new().with_create_new(true).with_read(true).with_write(true).with_capacity(MB as u32).map_mut::<$wal, _>( - dir.path().join(concat!("test_", stringify!($prefix), "_insert_with_value_builder_map_file")), - - ) - .unwrap() }, - ); - } - - #[test] - fn test_insert_with_builders_inmemory() { - $crate::tests::insert_with_builders(&mut $crate::Builder::new().with_capacity(MB).alloc::<$wal>().unwrap()); - } - - #[test] - fn test_insert_with_builders_map_anon() { - $crate::tests::insert_with_builders(&mut $crate::Builder::new().with_capacity(MB).map_anon::<$wal>().unwrap()); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_insert_with_builders_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - $crate::tests::insert_with_builders( - &mut unsafe { $crate::Builder::new().with_create_new(true).with_read(true).with_write(true).with_capacity(MB as u32).map_mut::<$wal, _>( - dir.path().join(concat!("test_", stringify!($prefix), "_insert_with_builders_map_file")), - - ) - .unwrap() }, - ); - } - } - }; - ($prefix:ident::insert_batch::$wal:ident) => { - paste::paste! { - #[test] - fn test_insert_batch_inmemory() { - $crate::tests::insert_batch(&mut $crate::Builder::new().with_capacity(MB).alloc::<$wal>().unwrap()); - } - - #[test] - fn test_insert_batch_map_anon() { - $crate::tests::insert_batch(&mut $crate::Builder::new().with_capacity(MB).map_anon::<$wal>().unwrap()); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_insert_batch_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - let path = dir.path().join(concat!( - "test_", - stringify!($prefix), - "_insert_batch_map_file" - )); - let mut map = unsafe { - $crate::Builder::new().with_create_new(true).with_read(true).with_write(true).with_capacity(MB as u32).map_mut::<$wal, _>( - &path, - - ) - .unwrap() - }; - - $crate::tests::insert_batch(&mut map); - - let map = unsafe { $crate::Builder::new().map::<$wal, _>(&path).unwrap() }; - - for i in 0..100u32 { - assert_eq!(map.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - - assert_eq!(map.get(&1000u32.to_be_bytes()).unwrap(), 1000u32.to_be_bytes()); - } - - #[test] - fn test_insert_batch_with_key_builder_inmemory() { - $crate::tests::insert_batch_with_key_builder(&mut $crate::Builder::new().with_capacity(MB).alloc::<$wal>().unwrap()); - } - - #[test] - fn test_insert_batch_with_key_builder_map_anon() { - $crate::tests::insert_batch_with_key_builder(&mut $crate::Builder::new().with_capacity(MB).map_anon::<$wal>().unwrap()); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_insert_batch_with_key_builder_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - let path = dir.path().join(concat!( - "test_", - stringify!($prefix), - "_insert_batch_with_key_builder_map_file" - )); - let mut map = unsafe { - $crate::Builder::new().with_create_new(true).with_read(true).with_write(true).with_capacity(MB as u32).map_mut::<$wal, _>( - &path, - - ) - .unwrap() - }; - - $crate::tests::insert_batch_with_key_builder(&mut map); - map.flush().unwrap(); - - let map = unsafe { $crate::Builder::new().map::<$wal, _>(&path).unwrap() }; - - for i in 0..100u32 { - assert_eq!(map.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - } - - #[test] - fn test_insert_batch_with_value_builder_inmemory() { - $crate::tests::insert_batch_with_value_builder(&mut $crate::Builder::new().with_capacity(MB).alloc::<$wal>().unwrap()); - } - - #[test] - fn test_insert_batch_with_value_builder_map_anon() { - $crate::tests::insert_batch_with_value_builder(&mut $crate::Builder::new().with_capacity(MB).map_anon::<$wal>().unwrap()); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_insert_batch_with_value_builder_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - let path = dir.path().join(concat!( - "test_", - stringify!($prefix), - "_insert_batch_with_value_builder_map_file" - )); - let mut map = unsafe { - $crate::Builder::new().with_create_new(true).with_read(true).with_write(true).with_capacity(MB as u32).map_mut::<$wal, _>( - &path, - - ) - .unwrap() - }; - - $crate::tests::insert_batch_with_value_builder(&mut map); - map.flush_async().unwrap(); - - let map = unsafe { $crate::Builder::new().map::<$wal, _>(&path).unwrap() }; - - for i in 0..100u32 { - assert_eq!(map.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - } - - #[test] - fn test_insert_batch_with_builders_inmemory() { - $crate::tests::insert_batch_with_builders(&mut $crate::Builder::new().with_capacity(MB).alloc::<$wal>().unwrap()); - } - - #[test] - fn test_insert_batch_with_builders_map_anon() { - $crate::tests::insert_batch_with_builders(&mut $crate::Builder::new().with_capacity(MB).map_anon::<$wal>().unwrap()); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_insert_batch_with_builders_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - let path = dir.path().join(concat!( - "test_", - stringify!($prefix), - "_insert_batch_with_builders_map_file" - )); - let mut map = unsafe { - $crate::Builder::new().with_create_new(true).with_read(true).with_write(true).with_capacity(MB as u32).map_mut::<$wal, _>( - &path, - - ) - .unwrap() - }; - - $crate::tests::insert_batch_with_builders(&mut map); - - let map = unsafe { $crate::Builder::new().map::<$wal, _>(&path).unwrap() }; - - for i in 0..100u32 { - assert_eq!(map.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - } - } - }; - ($prefix:ident::iters::$wal:ident) => { - expand_unit_tests!( - $wal { - iter, - range, - keys, - values, - bounds, - range_keys, - range_values, - } - ); - }; - ($prefix:ident::get::$wal:ident) => { - expand_unit_tests!( - $wal { - first, - last, - get_or_insert, - get_or_insert_with_value_builder, - } - ); - }; - ($prefix:ident::constructor::$wal:ident) => { - paste::paste! { - #[test] - fn test_construct_inmemory() { - $crate::tests::construct_inmemory::>(); - } - - #[test] - fn test_construct_map_anon() { - $crate::tests::construct_map_anon::>(); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_construct_map_file() { - $crate::tests::construct_map_file::>(stringify!($prefix)); - } - - #[test] - fn test_construct_with_small_capacity_inmemory() { - $crate::tests::construct_with_small_capacity_inmemory::>(); - } - - #[test] - fn test_construct_with_small_capacity_map_anon() { - $crate::tests::construct_with_small_capacity_map_anon::>(); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_construct_with_small_capacity_map_file() { - $crate::tests::construct_with_small_capacity_map_file::>(stringify!($prefix)); - } - - #[test] - fn test_zero_reserved_inmemory() { - $crate::tests::zero_reserved(&mut $crate::Builder::new().with_capacity(MB).alloc::<$wal>().unwrap()); - } - - #[test] - fn test_zero_reserved_map_anon() { - $crate::tests::zero_reserved(&mut $crate::Builder::new().with_capacity(MB).map_anon::<$wal>().unwrap()); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_zero_reserved_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - $crate::tests::zero_reserved( - &mut unsafe { $crate::Builder::new().with_create_new(true).with_read(true).with_write(true).with_capacity(MB as u32).map_mut::<$wal, _>( - dir.path().join(concat!("test_", stringify!($prefix), "_zero_reserved_map_file")), - - ) - .unwrap() }, - ); - } - - #[test] - fn test_reserved_inmemory() { - $crate::tests::reserved(&mut $crate::Builder::new().with_capacity(MB).with_reserved(4).alloc::<$wal>().unwrap()); - } - - #[test] - fn test_reserved_map_anon() { - $crate::tests::reserved(&mut $crate::Builder::new().with_capacity(MB).with_reserved(4).map_anon::<$wal>().unwrap()); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_reserved_map_file() { - let dir = ::tempfile::tempdir().unwrap(); - $crate::tests::reserved( - &mut unsafe { Builder::new().with_reserved(4).with_capacity(MB).with_create_new(true).with_write(true).with_read(true).map_mut::<$wal, _>( - dir.path().join(concat!("test_", stringify!($prefix), "_reserved_map_file")), - - ) - .unwrap() }, - ); - } - } - } -} - -pub(crate) fn construct_inmemory>() { - let mut wal = Builder::new() - .with_capacity(MB as u32) - .alloc::() - .unwrap(); - let wal = &mut wal; - assert!(wal.is_empty()); - wal.insert(b"key1", b"value1").unwrap(); -} - -pub(crate) fn construct_map_anon>() { - let mut wal = Builder::new() - .with_capacity(MB as u32) - .map_anon::() - .unwrap(); - let wal = &mut wal; - wal.insert(b"key1", b"value1").unwrap(); -} - -pub(crate) fn construct_map_file>(prefix: &str) { - let dir = ::tempfile::tempdir().unwrap(); - let path = dir.path().join(format!("{prefix}_construct_map_file")); - - unsafe { - let mut wal = Builder::new() - .with_capacity(MB as u32) - .with_create_new(true) - .with_read(true) - .with_write(true) - .map_mut::(&path) - .unwrap(); - - let wal = &mut wal; - wal.insert(b"key1", b"value1").unwrap(); - assert_eq!(wal.get(b"key1").unwrap(), b"value1"); - } - - unsafe { - let wal = Builder::new() - .with_capacity(MB as u32) - .with_create(true) - .with_read(true) - .with_write(true) - .map_mut::(&path) - .unwrap(); - - assert_eq!(wal.get(b"key1").unwrap(), b"value1"); - assert!(!wal.read_only()); - } - - let wal = unsafe { Builder::new().map::(&path).unwrap() }; - assert_eq!(wal.get(b"key1").unwrap(), b"value1"); - assert_eq!(wal.path().unwrap(), path); - assert_eq!(wal.maximum_key_size(), Options::new().maximum_key_size()); - assert_eq!( - wal.maximum_value_size(), - Options::new().maximum_value_size() - ); -} - -pub(crate) fn construct_with_small_capacity_inmemory>() { - let wal = Builder::new().with_capacity(1).alloc::(); - - assert!(wal.is_err()); - match wal { - Err(e) => println!("error: {:?}", e), - _ => panic!("unexpected error"), - } -} - -pub(crate) fn construct_with_small_capacity_map_anon>() { - let wal = Builder::new().with_capacity(1).map_anon::(); - - assert!(wal.is_err()); - match wal { - Err(e) => println!("error: {:?}", e), - _ => panic!("unexpected error"), - } -} - -pub(crate) fn construct_with_small_capacity_map_file>(prefix: &str) { - let dir = ::tempfile::tempdir().unwrap(); - let path = dir - .path() - .join(format!("{prefix}_construct_with_small_capacity_map_file")); - - let wal = unsafe { - Builder::new() - .with_capacity(1) - .with_create_new(true) - .with_write(true) - .with_read(true) - .map_mut::(&path) - }; - - assert!(wal.is_err()); - match wal { - Err(e) => println!("{:?}", e), - _ => panic!("unexpected error"), - } -} - -pub(crate) fn insert_to_full>(wal: &mut W) { - let mut full = false; - for i in 0u32.. { - match wal.insert(&i.to_be_bytes(), &i.to_be_bytes()) { - Ok(_) => {} - Err(e) => match e { - Error::InsufficientSpace { .. } => { - full = true; - break; - } - _ => panic!("unexpected error"), - }, - } - } - assert!(full); -} - -pub(crate) fn insert>(wal: &mut W) { - for i in 0..100u32 { - wal.insert(&i.to_be_bytes(), &i.to_be_bytes()).unwrap(); - } - - assert!(!wal.is_empty()); - assert_eq!(wal.len(), 100); - - for i in 0..100u32 { - assert!(wal.contains_key(&i.to_be_bytes())); - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - - let wal = wal.reader(); - assert!(!wal.is_empty()); - assert_eq!(wal.len(), 100); - - for i in 0..100u32 { - assert!(wal.contains_key(&i.to_be_bytes())); - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } -} - -pub(crate) fn insert_with_key_builder>(wal: &mut W) { - for i in 0..100u32 { - wal - .insert_with_key_builder::<()>( - KeyBuilder::<_>::once(4, |buf| { - let _ = buf.put_u32_be(i); - Ok(()) - }), - &i.to_be_bytes(), - ) - .unwrap(); - } - - for i in 0..100u32 { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - - let wal = wal.reader(); - for i in 0..100u32 { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } -} - -pub(crate) fn insert_with_value_builder>(wal: &mut W) { - for i in 0..100u32 { - wal - .insert_with_value_builder::<()>( - &i.to_be_bytes(), - ValueBuilder::<_>::once(4, |buf| { - let _ = buf.put_u32_be(i); - Ok(()) - }), - ) - .unwrap(); - } - - for i in 0..100u32 { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - - let wal = wal.reader(); - for i in 0..100u32 { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } -} - -pub(crate) fn insert_with_builders>(wal: &mut W) { - for i in 0..100u32 { - wal - .insert_with_builders::<(), ()>( - KeyBuilder::<_>::once(4, |buf| { - let _ = buf.put_u32_be(i); - Ok(()) - }), - ValueBuilder::<_>::once(4, |buf| { - let _ = buf.put_u32_be(i); - Ok(()) - }), - ) - .unwrap(); - } - - for i in 0..100u32 { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - - let wal = wal.reader(); - for i in 0..100u32 { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } -} - -pub(crate) fn iter>(wal: &mut W) { - for i in 0..100u32 { - wal.insert(&i.to_be_bytes(), &i.to_be_bytes()).unwrap(); - } - - let mut iter = wal.iter(); - - for i in 0..100u32 { - let (key, value) = iter.next().unwrap(); - assert_eq!(key, i.to_be_bytes()); - assert_eq!(value, i.to_be_bytes()); - } - - let mut iter = wal.iter(); - for i in (0..100u32).rev() { - let (key, value) = iter.next_back().unwrap(); - assert_eq!(key, i.to_be_bytes()); - assert_eq!(value, i.to_be_bytes()); - } - - let wal = wal.reader(); - let mut iter = wal.iter(); - - for i in 0..100u32 { - let (key, value) = iter.next().unwrap(); - assert_eq!(key, i.to_be_bytes()); - assert_eq!(value, i.to_be_bytes()); - } - - let mut iter = wal.iter(); - for i in (0..100u32).rev() { - let (key, value) = iter.next_back().unwrap(); - assert_eq!(key, i.to_be_bytes()); - assert_eq!(value, i.to_be_bytes()); - } -} - -pub(crate) fn bounds>(wal: &mut W) { - for i in 0..100u32 { - wal.insert(&i.to_be_bytes(), &i.to_be_bytes()).unwrap(); - } - - let upper50 = wal - .upper_bound(Bound::Included(&50u32.to_be_bytes())) - .unwrap(); - assert_eq!(upper50, 50u32.to_be_bytes()); - let upper51 = wal - .upper_bound(Bound::Excluded(&51u32.to_be_bytes())) - .unwrap(); - assert_eq!(upper51, 50u32.to_be_bytes()); - - let upper101 = wal - .upper_bound(Bound::Included(&101u32.to_be_bytes())) - .unwrap(); - assert_eq!(upper101, 99u32.to_be_bytes()); - - let upper_unbounded = wal.upper_bound(Bound::Unbounded).unwrap(); - assert_eq!(upper_unbounded, 99u32.to_be_bytes()); - - let lower50 = wal - .lower_bound(Bound::Included(&50u32.to_be_bytes())) - .unwrap(); - assert_eq!(lower50, 50u32.to_be_bytes()); - let lower51 = wal - .lower_bound(Bound::Excluded(&51u32.to_be_bytes())) - .unwrap(); - assert_eq!(lower51, 52u32.to_be_bytes()); - - let lower0 = wal - .lower_bound(Bound::Excluded(&0u32.to_be_bytes())) - .unwrap(); - assert_eq!(lower0, 1u32.to_be_bytes()); - - let lower_unbounded = wal.lower_bound(Bound::Unbounded).unwrap(); - assert_eq!(lower_unbounded, 0u32.to_be_bytes()); - - let wal = wal.reader(); - let upper50 = wal - .upper_bound(Bound::Included(&50u32.to_be_bytes())) - .unwrap(); - assert_eq!(upper50, 50u32.to_be_bytes()); - let upper51 = wal - .upper_bound(Bound::Excluded(&51u32.to_be_bytes())) - .unwrap(); - assert_eq!(upper51, 50u32.to_be_bytes()); - - let upper101 = wal - .upper_bound(Bound::Included(&101u32.to_be_bytes())) - .unwrap(); - assert_eq!(upper101, 99u32.to_be_bytes()); - - let upper_unbounded = wal.upper_bound(Bound::Unbounded).unwrap(); - assert_eq!(upper_unbounded, 99u32.to_be_bytes()); - - let lower50 = wal - .lower_bound(Bound::Included(&50u32.to_be_bytes())) - .unwrap(); - assert_eq!(lower50, 50u32.to_be_bytes()); - let lower51 = wal - .lower_bound(Bound::Excluded(&51u32.to_be_bytes())) - .unwrap(); - assert_eq!(lower51, 52u32.to_be_bytes()); - - let lower0 = wal - .lower_bound(Bound::Excluded(&0u32.to_be_bytes())) - .unwrap(); - assert_eq!(lower0, 1u32.to_be_bytes()); - - let lower_unbounded = wal.lower_bound(Bound::Unbounded).unwrap(); - assert_eq!(lower_unbounded, 0u32.to_be_bytes()); -} - -pub(crate) fn range>(wal: &mut W) { - for i in 0..100u32 { - wal.insert(&i.to_be_bytes(), &i.to_be_bytes()).unwrap(); - } - - let x = 50u32.to_be_bytes(); - - let mut iter = wal.range((Bound::Included(x.as_slice()), Bound::Unbounded)); - for i in 50..100u32 { - let (key, value) = iter.next().unwrap(); - assert_eq!(key, i.to_be_bytes()); - assert_eq!(value, i.to_be_bytes()); - } - - assert!(iter.next().is_none()); - - let mut iter = wal.range((Bound::Included(x.as_slice()), Bound::Unbounded)); - for i in (50..100u32).rev() { - let (key, value) = iter.next_back().unwrap(); - assert_eq!(key, i.to_be_bytes()); - assert_eq!(value, i.to_be_bytes()); - } - - let wal = wal.reader(); - - let mut iter = wal.range((Bound::Included(x.as_slice()), Bound::Unbounded)); - for i in 50..100u32 { - let (key, value) = iter.next().unwrap(); - assert_eq!(key, i.to_be_bytes()); - assert_eq!(value, i.to_be_bytes()); - } - - assert!(iter.next().is_none()); - - let mut iter = wal.range((Bound::Included(x.as_slice()), Bound::Unbounded)); - for i in (50..100u32).rev() { - let (key, value) = iter.next_back().unwrap(); - assert_eq!(key, i.to_be_bytes()); - assert_eq!(value, i.to_be_bytes()); - } -} - -pub(crate) fn keys>(wal: &mut W) { - for i in 0..100u32 { - wal.insert(&i.to_be_bytes(), &i.to_be_bytes()).unwrap(); - } - - let mut iter = wal.keys(); - - for i in 0..100u32 { - let key = iter.next().unwrap(); - assert_eq!(key, i.to_be_bytes()); - } - - assert!(iter.next().is_none()); - - let mut iter = wal.keys(); - for i in (0..100u32).rev() { - let key = iter.next_back().unwrap(); - assert_eq!(key, i.to_be_bytes()); - } - - let wal = wal.reader(); - let mut iter = wal.keys(); - - for i in 0..100u32 { - let key = iter.next().unwrap(); - assert_eq!(key, i.to_be_bytes()); - } - - assert!(iter.next().is_none()); - - let mut iter = wal.keys(); - for i in (0..100u32).rev() { - let key = iter.next_back().unwrap(); - assert_eq!(key, i.to_be_bytes()); - } -} - -pub(crate) fn range_keys>(wal: &mut W) { - for i in 0..100u32 { - wal.insert(&i.to_be_bytes(), &i.to_be_bytes()).unwrap(); - } - - let x = 50u32.to_be_bytes(); - - let mut iter = wal.range_keys((Bound::Included(x.as_slice()), Bound::Unbounded)); - for i in 50..100u32 { - let key = iter.next().unwrap(); - assert_eq!(key, i.to_be_bytes()); - } - - assert!(iter.next().is_none()); - - let mut iter = wal.range_keys((Bound::Included(x.as_slice()), Bound::Unbounded)); - for i in (50..100u32).rev() { - let key = iter.next_back().unwrap(); - assert_eq!(key, i.to_be_bytes()); - } - - let wal = wal.reader(); - let mut iter = wal.range_keys((Bound::Included(x.as_slice()), Bound::Unbounded)); - for i in 50..100u32 { - let key = iter.next().unwrap(); - assert_eq!(key, i.to_be_bytes()); - } - - assert!(iter.next().is_none()); - - let mut iter = wal.range_keys((Bound::Included(x.as_slice()), Bound::Unbounded)); - for i in (50..100u32).rev() { - let key = iter.next_back().unwrap(); - assert_eq!(key, i.to_be_bytes()); - } -} - -pub(crate) fn values>(wal: &mut W) { - for i in 0..100u32 { - wal.insert(&i.to_be_bytes(), &i.to_be_bytes()).unwrap(); - } - - let mut iter = wal.values(); - - for i in 0..100u32 { - let value = iter.next().unwrap(); - assert_eq!(value, i.to_be_bytes()); - } - - assert!(iter.next().is_none()); - - let mut iter = wal.values(); - for i in (0..100u32).rev() { - let value = iter.next_back().unwrap(); - assert_eq!(value, i.to_be_bytes()); - } - - let wal = wal.reader(); - let mut iter = wal.values(); - - for i in 0..100u32 { - let value = iter.next().unwrap(); - assert_eq!(value, i.to_be_bytes()); - } - - assert!(iter.next().is_none()); - - let mut iter = wal.values(); - for i in (0..100u32).rev() { - let value = iter.next_back().unwrap(); - assert_eq!(value, i.to_be_bytes()); - } -} - -pub(crate) fn range_values>(wal: &mut W) { - for i in 0..100u32 { - wal.insert(&i.to_be_bytes(), &i.to_be_bytes()).unwrap(); - } - - let x = 50u32.to_be_bytes(); - - let mut iter = wal.range_values((Bound::Included(x.as_slice()), Bound::Unbounded)); - - for i in 50..100u32 { - let value = iter.next().unwrap(); - assert_eq!(value, i.to_be_bytes()); - } - - assert!(iter.next().is_none()); - - let mut iter = wal.range_values((Bound::Included(x.as_slice()), Bound::Unbounded)); - for i in (50..100u32).rev() { - let value = iter.next_back().unwrap(); - assert_eq!(value, i.to_be_bytes()); - } - - let wal = wal.reader(); - let mut iter = wal.range_values((Bound::Included(x.as_slice()), Bound::Unbounded)); - for i in 50..100u32 { - let value = iter.next().unwrap(); - assert_eq!(value, i.to_be_bytes()); - } - - assert!(iter.next().is_none()); - - let mut iter = wal.range_values((Bound::Included(x.as_slice()), Bound::Unbounded)); - for i in (50..100u32).rev() { - let value = iter.next_back().unwrap(); - assert_eq!(value, i.to_be_bytes()); - } -} - -pub(crate) fn first>(wal: &mut W) { - for i in 0..100u32 { - wal.insert(&i.to_be_bytes(), &i.to_be_bytes()).unwrap(); - } - - let (key, value) = wal.first().unwrap(); - assert_eq!(key, 0u32.to_be_bytes()); - assert_eq!(value, 0u32.to_be_bytes()); - - let wal = wal.reader(); - let (key, value) = wal.first().unwrap(); - assert_eq!(key, 0u32.to_be_bytes()); - assert_eq!(value, 0u32.to_be_bytes()); -} - -pub(crate) fn last>(wal: &mut W) { - for i in 0..100u32 { - wal.insert(&i.to_be_bytes(), &i.to_be_bytes()).unwrap(); - } - - let (key, value) = wal.last().unwrap(); - assert_eq!(key, 99u32.to_be_bytes()); - assert_eq!(value, 99u32.to_be_bytes()); - - let wal = wal.reader(); - let (key, value) = wal.last().unwrap(); - assert_eq!(key, 99u32.to_be_bytes()); - assert_eq!(value, 99u32.to_be_bytes()); -} - -pub(crate) fn get_or_insert>(wal: &mut W) { - for i in 0..100u32 { - wal - .get_or_insert(&i.to_be_bytes(), &i.to_be_bytes()) - .unwrap(); - } - - for i in 0..100u32 { - wal - .get_or_insert(&i.to_be_bytes(), &(i * 2).to_be_bytes()) - .unwrap(); - } - - for i in 0..100u32 { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - - let wal = wal.reader(); - for i in 0..100u32 { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } -} - -pub(crate) fn get_or_insert_with_value_builder>(wal: &mut W) { - for i in 0..100u32 { - wal - .get_or_insert_with_value_builder::<()>( - &i.to_be_bytes(), - ValueBuilder::<_>::once(4, |buf| { - let _ = buf.put_u32_be(i); - Ok(()) - }), - ) - .unwrap(); - } - - for i in 0..100u32 { - wal - .get_or_insert_with_value_builder::<()>( - &i.to_be_bytes(), - ValueBuilder::<_>::once(4, |buf| { - let _ = buf.put_u32_be(i * 2); - Ok(()) - }), - ) - .unwrap(); - } - - for i in 0..100u32 { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - - let wal = wal.reader(); - for i in 0..100u32 { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } -} - -pub(crate) fn insert_batch>(wal: &mut W) { - const N: u32 = 100; - - let mut batch = vec![]; - - for i in 0..N { - batch.push(Entry::new(i.to_be_bytes(), i.to_be_bytes())); - } - - wal.insert_batch(&mut batch).unwrap(); - - wal - .insert(&1000u32.to_be_bytes(), &1000u32.to_be_bytes()) - .unwrap(); - - for i in 0..N { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - - assert_eq!( - wal.get(&1000u32.to_be_bytes()).unwrap(), - 1000u32.to_be_bytes() - ); - - let wal = wal.reader(); - for i in 0..N { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - - assert_eq!( - wal.get(&1000u32.to_be_bytes()).unwrap(), - 1000u32.to_be_bytes() - ); -} - -pub(crate) fn insert_batch_with_key_builder>(wal: &mut W) { - const N: u32 = 100; - - let mut batch = vec![]; - - for i in 0..N { - batch.push(EntryWithKeyBuilder::new( - KeyBuilder::new(4, move |buf: &mut VacantBuffer<'_>| buf.put_u32_be(i)), - i.to_be_bytes(), - )); - } - - wal.insert_batch_with_key_builder(&mut batch).unwrap(); - - for i in 0..N { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - - let wal = wal.reader(); - for i in 0..N { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } -} - -pub(crate) fn insert_batch_with_value_builder>(wal: &mut W) { - const N: u32 = 100; - - let mut batch = vec![]; - for i in 0..N { - batch.push(EntryWithValueBuilder::new( - i.to_be_bytes(), - ValueBuilder::new(4, move |buf: &mut VacantBuffer<'_>| buf.put_u32_be(i)), - )); - } - - wal.insert_batch_with_value_builder(&mut batch).unwrap(); - - for i in 0..N { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - - let wal = wal.reader(); - for i in 0..N { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } -} - -pub(crate) fn insert_batch_with_builders>(wal: &mut W) { - const N: u32 = 100; - - let mut batch = vec![]; - - for i in 0..N { - batch.push(EntryWithBuilders::new( - KeyBuilder::new(4, move |buf: &mut VacantBuffer<'_>| buf.put_u32_be(i)), - ValueBuilder::new(4, move |buf: &mut VacantBuffer<'_>| buf.put_u32_be(i)), - )); - } - - wal.insert_batch_with_builders(&mut batch).unwrap(); - - for i in 0..N { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } - - let wal = wal.reader(); - for i in 0..N { - assert_eq!(wal.get(&i.to_be_bytes()).unwrap(), i.to_be_bytes()); - } -} - -pub(crate) fn zero_reserved>(wal: &mut W) { - unsafe { - assert_eq!(wal.reserved_slice(), &[]); - assert_eq!(wal.reserved_slice_mut(), &mut []); - - let reader = wal.reader(); - assert_eq!(reader.reserved_slice(), &[]); - } -} - -pub(crate) fn reserved>(wal: &mut W) { - unsafe { - let buf = wal.reserved_slice_mut(); - buf.copy_from_slice(b"al8n"); - assert_eq!(wal.reserved_slice(), b"al8n"); - assert_eq!(wal.reserved_slice_mut(), b"al8n"); - - let reader = wal.reader(); - assert_eq!(reader.reserved_slice(), b"al8n"); - } -} diff --git a/src/types.rs b/src/types.rs new file mode 100644 index 0000000..7747b50 --- /dev/null +++ b/src/types.rs @@ -0,0 +1,199 @@ +use dbutils::leb128::encoded_u64_varint_len; +pub use dbutils::{ + buffer::{BufWriter, BufWriterOnce, VacantBuffer}, + types::*, +}; + +use crate::{utils::merge_lengths, CHECKSUM_SIZE, RECORD_FLAG_SIZE, VERSION_SIZE}; + +pub(crate) mod base; +pub(crate) mod multiple_version; + +const ENTRY_FLAGS_SIZE: usize = core::mem::size_of::(); + +/// The kind of the Write-Ahead Log. +/// +/// Currently, there are two kinds of Write-Ahead Log: +/// 1. Plain: The Write-Ahead Log is plain, which means it does not support multiple versions. +/// 2. MultipleVersion: The Write-Ahead Log supports multiple versions. +#[derive(Debug, PartialEq, Eq)] +#[repr(u8)] +#[non_exhaustive] +pub enum Kind { + /// The Write-Ahead Log is plain, which means it does not support multiple versions. + Plain = 0, + /// The Write-Ahead Log supports multiple versions. + MultipleVersion = 1, +} + +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +impl TryFrom for Kind { + type Error = crate::error::UnknownKind; + + #[inline] + fn try_from(value: u8) -> Result { + Ok(match value { + 0 => Self::Plain, + 1 => Self::MultipleVersion, + _ => return Err(crate::error::UnknownKind(value)), + }) + } +} + +bitflags::bitflags! { + /// The flags for each entry. + #[derive(Debug, Copy, Clone)] + pub struct EntryFlags: u8 { + /// First bit: 1 indicates removed + const REMOVED = 0b00000001; + /// Second bit: 1 indicates the key is pointer + const POINTER = 0b00000010; + /// Third bit: 1 indicates the entry contains a version + const VERSIONED = 0b00000100; + } +} + +impl EntryFlags { + pub(crate) const SIZE: usize = core::mem::size_of::(); +} + +#[derive(Debug)] +pub(crate) struct EncodedEntryMeta { + pub(crate) packed_kvlen_size: usize, + pub(crate) packed_kvlen: u64, + pub(crate) entry_size: u32, + pub(crate) klen: usize, + pub(crate) vlen: usize, + pub(crate) versioned: bool, + batch: bool, +} + +impl EncodedEntryMeta { + #[inline] + pub(crate) const fn new(key_len: usize, value_len: usize, versioned: bool) -> Self { + // Cast to u32 is safe, because we already checked those values before calling this function. + + let len = merge_lengths(key_len as u32, value_len as u32); + let len_size = encoded_u64_varint_len(len); + let version_size = if versioned { VERSION_SIZE } else { 0 }; + let elen = RECORD_FLAG_SIZE as u32 + + len_size as u32 + + ENTRY_FLAGS_SIZE as u32 + + version_size as u32 + + key_len as u32 + + value_len as u32 + + CHECKSUM_SIZE as u32; + + Self { + packed_kvlen_size: len_size, + batch: false, + packed_kvlen: len, + entry_size: elen, + klen: key_len, + vlen: value_len, + versioned, + } + } + + #[inline] + pub(crate) const fn batch(key_len: usize, value_len: usize, versioned: bool) -> Self { + // Cast to u32 is safe, because we already checked those values before calling this function. + + let len = merge_lengths(key_len as u32, value_len as u32); + let len_size = encoded_u64_varint_len(len); + let version_size = if versioned { VERSION_SIZE } else { 0 }; + let elen = len_size as u32 + + EntryFlags::SIZE as u32 + + version_size as u32 + + key_len as u32 + + value_len as u32; + + Self { + packed_kvlen_size: len_size, + packed_kvlen: len, + entry_size: elen, + klen: key_len, + vlen: value_len, + versioned, + batch: true, + } + } + + #[inline] + pub(crate) const fn batch_zero(versioned: bool) -> Self { + Self { + packed_kvlen_size: 0, + packed_kvlen: 0, + entry_size: 0, + klen: 0, + vlen: 0, + versioned, + batch: true, + } + } + + #[inline] + pub(crate) const fn entry_flag_offset(&self) -> usize { + if self.batch { + return self.packed_kvlen_size; + } + + RECORD_FLAG_SIZE + self.packed_kvlen_size + } + + #[inline] + pub(crate) const fn version_offset(&self) -> usize { + self.entry_flag_offset() + ENTRY_FLAGS_SIZE + } + + #[inline] + pub(crate) const fn key_offset(&self) -> usize { + if self.versioned { + self.version_offset() + VERSION_SIZE + } else { + self.version_offset() + } + } + + #[inline] + pub(crate) const fn value_offset(&self) -> usize { + self.key_offset() + self.klen + } + + #[inline] + pub(crate) const fn checksum_offset(&self) -> usize { + if self.batch { + self.value_offset() + self.vlen + } else { + self.entry_size as usize - CHECKSUM_SIZE + } + } +} + +macro_rules! builder_ext { + ($($name:ident),+ $(,)?) => { + $( + paste::paste! { + impl $name { + #[doc = "Creates a new `" $name "` with the given size and builder closure which requires `FnOnce`."] + #[inline] + pub const fn once(size: usize, f: F) -> Self + where + F: for<'a> FnOnce(&mut dbutils::buffer::VacantBuffer<'a>) -> Result, + { + Self { size, f } + } + } + } + )* + }; +} + +dbutils::builder!( + /// A value builder for the wal, which requires the value size for accurate allocation and a closure to build the value. + pub ValueBuilder; + /// A key builder for the wal, which requires the key size for accurate allocation and a closure to build the key. + pub KeyBuilder; +); + +builder_ext!(ValueBuilder, KeyBuilder,); diff --git a/src/types/base.rs b/src/types/base.rs new file mode 100644 index 0000000..1449d3e --- /dev/null +++ b/src/types/base.rs @@ -0,0 +1,296 @@ +use dbutils::types::{KeyRef, Type}; +use skl::LazyRef; + +use crate::{memtable::MemtableEntry, sealed::WithoutVersion}; + +/// The reference to an entry in the generic WALs. +pub struct Entry<'a, E> +where + E: MemtableEntry<'a>, + E::Key: Type, + E::Value: Type, +{ + ent: E, + key: LazyRef<'a, E::Key>, + value: LazyRef<'a, E::Value>, +} + +impl<'a, E> core::fmt::Debug for Entry<'a, E> +where + E: MemtableEntry<'a> + core::fmt::Debug, + E::Key: Type, + E::Value: Type, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Entry") + .field("key", &self.key()) + .field("value", &self.value()) + .finish() + } +} + +impl<'a, E> Clone for Entry<'a, E> +where + E: MemtableEntry<'a> + Clone, + E::Key: Type, + E::Value: Type, +{ + #[inline] + fn clone(&self) -> Self { + Self { + ent: self.ent.clone(), + key: self.key.clone(), + value: self.value.clone(), + } + } +} + +impl<'a, E> Entry<'a, E> +where + E: MemtableEntry<'a> + WithoutVersion, + E::Key: Type, + E::Value: Type, +{ + #[inline] + pub(crate) fn new(ent: E) -> Self { + let raw_key = ent.key().as_slice(); + let raw_value = ent.value().as_slice(); + unsafe { + Self { + key: LazyRef::from_raw(raw_key), + value: LazyRef::from_raw(raw_value), + ent, + } + } + } +} + +impl<'a, E> Entry<'a, E> +where + E: MemtableEntry<'a> + WithoutVersion, + E::Key: Type + Ord, + ::Ref<'a>: KeyRef<'a, E::Key>, + E::Value: Type, +{ + /// Returns the next entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + #[allow(clippy::should_implement_trait)] + pub fn next(&mut self) -> Option { + self.ent.next().map(Self::new) + } + + /// Returns the previous entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + pub fn prev(&mut self) -> Option { + self.ent.prev().map(Self::new) + } +} + +impl<'a, E> Entry<'a, E> +where + E: MemtableEntry<'a>, + E::Key: Type, + E::Value: Type, +{ + /// Returns the key of the entry. + #[inline] + pub fn key(&self) -> &::Ref<'a> { + self.key.get() + } + + /// Returns the raw key of the entry. + #[inline] + pub fn raw_key(&self) -> &[u8] { + self.key.raw().expect("Entry's raw key cannot be None") + } + + /// Returns the value of the entry. + #[inline] + pub fn value(&self) -> &::Ref<'a> { + self.value.get() + } + + /// Returns the raw value of the entry. + #[inline] + pub fn raw_value(&self) -> &[u8] { + self.value.raw().expect("Entry's raw value cannot be None") + } +} + +/// The reference to a key of the entry in the generic WALs. +pub struct Key<'a, E> +where + E: MemtableEntry<'a>, + E::Key: Type, +{ + ent: E, + key: LazyRef<'a, E::Key>, +} + +impl<'a, E> core::fmt::Debug for Key<'a, E> +where + E: MemtableEntry<'a> + core::fmt::Debug, + E::Key: Type, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Key").field("key", &self.key()).finish() + } +} + +impl<'a, E> Clone for Key<'a, E> +where + E: MemtableEntry<'a> + Clone, + E::Key: Type, +{ + #[inline] + fn clone(&self) -> Self { + Self { + ent: self.ent.clone(), + key: self.key.clone(), + } + } +} + +impl<'a, E> Key<'a, E> +where + E::Key: Type + Ord, + ::Ref<'a>: KeyRef<'a, E::Key>, + E: MemtableEntry<'a>, +{ + /// Returns the next entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + #[allow(clippy::should_implement_trait)] + pub fn next(&mut self) -> Option { + self.ent.next().map(Self::new) + } + + /// Returns the previous entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + pub fn prev(&mut self) -> Option { + self.ent.prev().map(Self::new) + } +} + +impl<'a, E> Key<'a, E> +where + E::Key: Type, + E: MemtableEntry<'a>, +{ + /// Returns the key of the entry. + #[inline] + pub fn key(&self) -> &::Ref<'a> { + self.key.get() + } + + /// Returns the raw key of the entry. + #[inline] + pub fn raw_key(&self) -> &[u8] { + self.key.raw().expect("Key's raw key cannot be None") + } + + #[inline] + pub(crate) fn new(ent: E) -> Self { + let raw_key = ent.key().as_slice(); + unsafe { + Self { + key: LazyRef::from_raw(raw_key), + ent, + } + } + } +} + +/// The reference to a value of the entry in the generic WALs. +pub struct Value<'a, E> +where + E::Value: Type, + E: MemtableEntry<'a>, +{ + ent: E, + raw_key: &'a [u8], + value: LazyRef<'a, E::Value>, +} + +impl<'a, E> core::fmt::Debug for Value<'a, E> +where + E: MemtableEntry<'a> + core::fmt::Debug, + E::Value: Type, + ::Ref<'a>: core::fmt::Debug, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Value") + .field("value", &self.value()) + .finish() + } +} + +impl<'a, E> Clone for Value<'a, E> +where + E: MemtableEntry<'a> + Clone, + E::Value: Type, +{ + #[inline] + fn clone(&self) -> Self { + Self { + ent: self.ent.clone(), + raw_key: self.raw_key, + value: self.value.clone(), + } + } +} + +impl<'a, E> Value<'a, E> +where + E: MemtableEntry<'a>, + E::Value: Type, +{ + #[inline] + pub(crate) fn new(ent: E) -> Self { + let raw_key = ent.key().as_slice(); + let raw_value = ent.value().as_slice(); + unsafe { + Self { + raw_key, + value: LazyRef::from_raw(raw_value), + ent, + } + } + } + + /// Returns the next entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + #[allow(clippy::should_implement_trait)] + pub fn next(&mut self) -> Option { + self.ent.next().map(Self::new) + } + + /// Returns the previous entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + pub fn prev(&mut self) -> Option { + self.ent.prev().map(Self::new) + } + + /// Returns the value of the entry. + #[inline] + pub fn value(&self) -> &::Ref<'a> { + self.value.get() + } + + /// Returns the raw value of the entry. + #[inline] + pub fn raw_value(&self) -> &[u8] { + self.value.raw().expect("Value's raw value cannot be None") + } +} diff --git a/src/types/multiple_version.rs b/src/types/multiple_version.rs new file mode 100644 index 0000000..be36128 --- /dev/null +++ b/src/types/multiple_version.rs @@ -0,0 +1,525 @@ +use dbutils::types::{KeyRef, Type}; +use skl::LazyRef; + +use crate::memtable::VersionedMemtableEntry; + +/// The reference to an entry in the generic WALs. +pub struct Entry<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Key: Type, + E::Value: Type, +{ + ent: E, + key: LazyRef<'a, E::Key>, + value: LazyRef<'a, E::Value>, + version: u64, + query_version: u64, +} + +impl<'a, E> core::fmt::Debug for Entry<'a, E> +where + E: VersionedMemtableEntry<'a> + core::fmt::Debug, + E::Key: Type, + E::Value: Type, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Entry") + .field("key", &self.key()) + .field("value", &self.value()) + .field("version", &self.version) + .finish() + } +} + +impl<'a, E> Clone for Entry<'a, E> +where + E: VersionedMemtableEntry<'a> + Clone, + E::Key: Type, + E::Value: Type, +{ + #[inline] + fn clone(&self) -> Self { + Self { + ent: self.ent.clone(), + key: self.key.clone(), + value: self.value.clone(), + version: self.version, + query_version: self.query_version, + } + } +} + +impl<'a, E> Entry<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Key: Type, + E::Value: Type, +{ + #[inline] + pub(crate) fn with_version(ent: E, query_version: u64) -> Self { + let version = ent.version(); + let raw_key = ent.key().as_slice(); + let raw_value = ent + .value() + .expect("value must be present on Entry") + .as_slice(); + unsafe { + Self { + key: LazyRef::from_raw(raw_key), + value: LazyRef::from_raw(raw_value), + version, + query_version, + ent, + } + } + } +} + +impl<'a, E> Entry<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Key: Type + Ord, + ::Ref<'a>: KeyRef<'a, E::Key>, + E::Value: Type, +{ + /// Returns the next entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + #[allow(clippy::should_implement_trait)] + pub fn next(&mut self) -> Option { + self + .ent + .next() + .map(|ent| Self::with_version(ent, self.query_version)) + } + + /// Returns the previous entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + pub fn prev(&mut self) -> Option { + self + .ent + .prev() + .map(|ent| Self::with_version(ent, self.query_version)) + } +} + +impl<'a, E> Entry<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Key: Type, + E::Value: Type, +{ + /// Returns the version of the entry. + #[inline] + pub const fn version(&self) -> u64 { + self.version + } + + /// Returns the key of the entry. + #[inline] + pub fn key(&self) -> &::Ref<'a> { + self.key.get() + } + + /// Returns the raw key of the entry. + #[inline] + pub fn raw_key(&self) -> &'a [u8] { + self.key.raw().expect("Entry's raw key cannot be None") + } + + /// Returns the value of the entry. + #[inline] + pub fn value(&self) -> &::Ref<'a> { + self.value.get() + } + + /// Returns the raw value of the entry. + #[inline] + pub fn raw_value(&self) -> &'a [u8] { + self.value.raw().expect("Entry's raw value cannot be None") + } +} + +/// The reference to a key of the entry in the generic WALs. +pub struct Key<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Key: Type, +{ + ent: E, + key: LazyRef<'a, E::Key>, + version: u64, + query_version: u64, +} + +impl<'a, E> core::fmt::Debug for Key<'a, E> +where + E: VersionedMemtableEntry<'a> + core::fmt::Debug, + E::Key: Type, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Key") + .field("key", &self.key()) + .field("version", &self.version) + .finish() + } +} + +impl<'a, E> Clone for Key<'a, E> +where + E: VersionedMemtableEntry<'a> + Clone, + E::Key: Type, +{ + #[inline] + fn clone(&self) -> Self { + Self { + ent: self.ent.clone(), + key: self.key.clone(), + version: self.version, + query_version: self.query_version, + } + } +} + +impl<'a, E> Key<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Key: Type, +{ + #[inline] + pub(crate) fn with_version(ent: E, query_version: u64) -> Self { + let raw_key = ent.key().as_slice(); + let version = ent.version(); + Self { + key: unsafe { LazyRef::from_raw(raw_key) }, + version, + query_version, + ent, + } + } +} + +impl<'a, E> Key<'a, E> +where + E::Key: Type + Ord, + ::Ref<'a>: KeyRef<'a, E::Key>, + E: VersionedMemtableEntry<'a>, +{ + /// Returns the next entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + #[allow(clippy::should_implement_trait)] + pub fn next(&mut self) -> Option { + self + .ent + .next() + .map(|ent| Self::with_version(ent, self.query_version)) + } + + /// Returns the previous entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + pub fn prev(&mut self) -> Option { + self + .ent + .prev() + .map(|ent| Self::with_version(ent, self.query_version)) + } +} + +impl<'a, E> Key<'a, E> +where + E::Key: Type, + E: VersionedMemtableEntry<'a>, +{ + /// Returns the version of the entry. + #[inline] + pub const fn version(&self) -> u64 { + self.version + } + + /// Returns the key of the entry. + #[inline] + pub fn key(&self) -> &::Ref<'a> { + self.key.get() + } + + /// Returns the raw key of the entry. + #[inline] + pub fn raw_key(&self) -> &'a [u8] { + self.key.raw().expect("Key's raw key cannot be None") + } +} + +/// The reference to a value of the entry in the generic WALs. +pub struct Value<'a, E> +where + E::Value: Type, + E: VersionedMemtableEntry<'a>, +{ + ent: E, + raw_key: &'a [u8], + value: LazyRef<'a, E::Value>, + version: u64, + query_version: u64, +} + +impl<'a, E> core::fmt::Debug for Value<'a, E> +where + E: VersionedMemtableEntry<'a> + core::fmt::Debug, + E::Value: Type, + ::Ref<'a>: core::fmt::Debug, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Value") + .field("value", &self.value()) + .field("version", &self.version) + .finish() + } +} + +impl<'a, E> Clone for Value<'a, E> +where + E: VersionedMemtableEntry<'a> + Clone, + E::Value: Type, +{ + #[inline] + fn clone(&self) -> Self { + Self { + ent: self.ent.clone(), + raw_key: self.raw_key, + value: self.value.clone(), + version: self.version, + query_version: self.query_version, + } + } +} + +impl<'a, E> Value<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Value: Type, +{ + #[inline] + pub(crate) fn with_version(ent: E, query_version: u64) -> Self { + let raw_key = ent.key().as_slice(); + let raw_value = ent + .value() + .expect("value must be present on Value") + .as_slice(); + let version = ent.version(); + Self { + raw_key, + value: unsafe { LazyRef::from_raw(raw_value) }, + version, + query_version, + ent, + } + } +} + +impl<'a, E> Value<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Value: Type, +{ + /// Returns the next entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + #[allow(clippy::should_implement_trait)] + pub fn next(&mut self) -> Option { + self + .ent + .next() + .map(|ent| Self::with_version(ent, self.query_version)) + } + + /// Returns the previous entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + pub fn prev(&mut self) -> Option { + self + .ent + .prev() + .map(|ent| Self::with_version(ent, self.query_version)) + } +} + +impl<'a, E> Value<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Value: Type, +{ + /// Returns the version of the entry. + #[inline] + pub const fn version(&self) -> u64 { + self.version + } + + /// Returns the value of the entry. + #[inline] + pub fn value(&self) -> &::Ref<'a> { + self.value.get() + } + + /// Returns the raw value of the entry. + #[inline] + pub fn raw_value(&self) -> &'a [u8] { + self.value.raw().expect("Value's raw value cannot be None") + } +} + +/// The reference to an entry in the generic WALs. +pub struct MultipleVersionEntry<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Key: Type, + E::Value: Type, +{ + ent: E, + key: LazyRef<'a, E::Key>, + value: Option>, + version: u64, + query_version: u64, +} + +impl<'a, E> core::fmt::Debug for MultipleVersionEntry<'a, E> +where + E: VersionedMemtableEntry<'a> + core::fmt::Debug, + E::Key: Type, + E::Value: Type, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("MultipleVersionEntry") + .field("key", &self.key()) + .field("value", &self.value()) + .field("version", &self.version) + .finish() + } +} + +impl<'a, E> Clone for MultipleVersionEntry<'a, E> +where + E: VersionedMemtableEntry<'a> + Clone, + E::Key: Type, + E::Value: Type, +{ + #[inline] + fn clone(&self) -> Self { + Self { + ent: self.ent.clone(), + key: self.key.clone(), + value: self.value.clone(), + version: self.version, + query_version: self.query_version, + } + } +} + +impl<'a, E> MultipleVersionEntry<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Key: Type, + E::Value: Type, +{ + #[inline] + pub(crate) fn with_version(ent: E, query_version: u64) -> Self { + let raw_key = ent.key().as_slice(); + let raw_value = ent.value().map(|v| v.as_slice()); + let version = ent.version(); + unsafe { + Self { + key: LazyRef::from_raw(raw_key), + value: raw_value.map(|v| LazyRef::from_raw(v)), + version, + query_version, + ent, + } + } + } +} + +impl<'a, E> MultipleVersionEntry<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Key: Ord + Type, + for<'b> ::Ref<'b>: KeyRef<'b, E::Key>, + E::Value: Type, +{ + /// Returns the next entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + #[allow(clippy::should_implement_trait)] + pub fn next(&mut self) -> Option { + self + .ent + .next() + .map(|ent| Self::with_version(ent, self.query_version)) + } + + /// Returns the previous entry in the generic WALs. + /// + /// This does not move the cursor. + #[inline] + pub fn prev(&mut self) -> Option { + self + .ent + .prev() + .map(|ent| Self::with_version(ent, self.query_version)) + } +} + +impl<'a, E> MultipleVersionEntry<'a, E> +where + E: VersionedMemtableEntry<'a>, + E::Key: Type, + E::Value: Type, +{ + /// Returns the version of the entry. + #[inline] + pub const fn version(&self) -> u64 { + self.version + } + + /// Returns the key of the entry. + #[inline] + pub fn key(&self) -> &::Ref<'a> { + self.key.get() + } + + /// Returns the raw key of the entry. + #[inline] + pub fn raw_key(&self) -> &'a [u8] { + self + .key + .raw() + .expect("MultipleVersionEntry's raw key cannot be None") + } + + /// Returns the value of the entry. + #[inline] + pub fn value(&self) -> Option<&::Ref<'a>> { + self.value.as_deref() + } + + /// Returns the raw value of the entry. + #[inline] + pub fn raw_value(&self) -> Option<&'a [u8]> { + match self.value.as_ref() { + None => None, + Some(v) => Some( + v.raw() + .expect("MultipleVersionEntry's raw value cannot be None if value exists"), + ), + } + } +} diff --git a/src/unsync.rs b/src/unsync.rs deleted file mode 100644 index 9e3885e..0000000 --- a/src/unsync.rs +++ /dev/null @@ -1,372 +0,0 @@ -use core::{cell::UnsafeCell, ops::RangeBounds}; -use std::{collections::BTreeSet, rc::Rc}; - -use super::*; - -use checksum::BuildChecksumer; -use either::Either; -use error::Error; -use pointer::Pointer; -use rarena_allocator::unsync::Arena; -use wal::sealed::{Constructor, Sealed}; - -pub use super::{ - builder::Builder, - wal::{Batch, BatchWithBuilders, BatchWithKeyBuilder, BatchWithValueBuilder, ImmutableWal, Wal}, - Comparator, KeyBuilder, VacantBuffer, ValueBuilder, -}; - -/// Iterators for the `OrderWal`. -pub mod iter; -use iter::*; - -mod c; -use c::*; - -#[cfg(all( - test, - any( - all_tests, - test_unsync_constructor, - test_unsync_insert, - test_unsync_get, - test_unsync_iters, - ) -))] -mod tests; - -/// An ordered write-ahead log implementation for single thread environments. -/// -/// Only the first instance of the WAL can write to the log, while the rest can only read from the log. -// ```text -// +----------------------+-------------------------+--------------------+ -// | magic text (6 bytes) | magic version (2 bytes) | header (8 bytes) | -// +----------------------+-------------------------+--------------------+---------------------+-----------------+--------------------+ -// | flag (1 byte) | key len (4 bytes) | key (n bytes) | value len (4 bytes) | value (n bytes) | checksum (8 bytes) | -// +----------------------+-------------------------+--------------------+---------------------+-----------------|--------------------+ -// | flag (1 byte) | key len (4 bytes) | key (n bytes) | value len (4 bytes) | value (n bytes) | checksum (8 bytes) | -// +----------------------+-------------------------+--------------------+---------------------+-----------------+--------------------+ -// | flag (1 byte) | key len (4 bytes) | key (n bytes) | value len (4 bytes) | value (n bytes) | checksum (8 bytes) | -// +----------------------+-------------------------+--------------------+---------------------+-----------------+--------------------+ -// | ... | ... | ... | ... | ... | ... | -// +----------------------+-------------------------+--------------------+---------------------+-----------------+--------------------+ -// | ... | ... | ... | ... | ... | ... | -// +----------------------+-------------------------+--------------------+---------------------+-----------------+--------------------+ -// ``` -pub struct OrderWal { - core: Rc>>, - _s: PhantomData, -} - -impl Constructor for OrderWal -where - C: Comparator + 'static, -{ - type Allocator = Arena; - type Core = OrderWalCore; - type Pointer = Pointer; - - #[inline] - fn allocator(&self) -> &Self::Allocator { - &self.core().arena - } - - #[inline] - fn from_core(core: Self::Core) -> Self { - Self { - core: Rc::new(UnsafeCell::new(core)), - _s: PhantomData, - } - } -} - -impl OrderWal { - /// Returns the path of the WAL if it is backed by a file. - /// - /// ## Example - /// - /// ```rust - /// use orderwal::{unsync::OrderWal, Wal, Builder}; - /// - /// // A in-memory WAL - /// let wal = Builder::new().with_capacity(100).alloc::().unwrap(); - /// - /// assert!(wal.path_buf().is_none()); - /// ``` - pub fn path_buf(&self) -> Option<&std::rc::Rc> { - self.core().arena.path() - } - - #[inline] - fn core(&self) -> &OrderWalCore { - unsafe { &*self.core.get() } - } -} - -impl Sealed for OrderWal -where - C: Comparator + 'static, -{ - #[inline] - fn hasher(&self) -> &S { - &self.core().cks - } - - #[inline] - fn options(&self) -> &Options { - &self.core().opts - } - - #[inline] - fn comparator(&self) -> &C { - &self.core().cmp - } - - #[inline] - fn insert_pointer(&self, ptr: Pointer) - where - C: Comparator, - { - unsafe { - (*self.core.get()).map.insert(ptr); - } - } - - #[inline] - fn insert_pointers(&self, ptrs: impl Iterator>) - where - C: Comparator, - { - unsafe { - (*self.core.get()).map.extend(ptrs); - } - } -} - -impl ImmutableWal for OrderWal -where - C: Comparator + 'static, -{ - type Iter<'a> - = Iter<'a, C> - where - Self: 'a, - C: Comparator; - type Range<'a, Q, R> - = Range<'a, C> - where - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - Self: 'a, - C: Comparator; - - type Keys<'a> - = Keys<'a, C> - where - Self: 'a, - C: Comparator; - - type RangeKeys<'a, Q, R> - = RangeKeys<'a, C> - where - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - Self: 'a, - C: Comparator; - - type Values<'a> - = Values<'a, C> - where - Self: 'a, - C: Comparator; - - type RangeValues<'a, Q, R> - = RangeValues<'a, C> - where - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - Self: 'a, - C: Comparator; - - #[inline] - fn options(&self) -> &Options { - &self.core().opts - } - - #[inline] - fn path(&self) -> Option<&std::path::Path> { - self.core().arena.path().map(|p| p.as_ref().as_path()) - } - - /// Returns the number of entries in the WAL. - #[inline] - fn len(&self) -> usize { - self.core().map.len() - } - - /// Returns `true` if the WAL is empty. - #[inline] - fn is_empty(&self) -> bool { - self.core().map.is_empty() - } - - #[inline] - fn maximum_key_size(&self) -> u32 { - self.core().opts.maximum_key_size() - } - - #[inline] - fn maximum_value_size(&self) -> u32 { - self.core().opts.maximum_value_size() - } - - #[inline] - fn remaining(&self) -> u32 { - self.core().arena.remaining() as u32 - } - - #[inline] - fn contains_key(&self, key: &Q) -> bool - where - [u8]: Borrow, - Q: ?Sized + Ord, - C: Comparator, - { - self.core().map.contains(key) - } - - #[inline] - fn iter(&self) -> Self::Iter<'_> - where - C: Comparator, - { - Iter::new(self.core().map.iter()) - } - - #[inline] - fn range(&self, range: R) -> Self::Range<'_, Q, R> - where - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - C: Comparator, - { - Range::new(self.core().map.range(range)) - } - - #[inline] - fn keys(&self) -> Self::Keys<'_> - where - C: Comparator, - { - Keys::new(self.core().map.iter()) - } - - #[inline] - fn range_keys(&self, range: R) -> Self::RangeKeys<'_, Q, R> - where - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - C: Comparator, - { - RangeKeys::new(self.core().map.range(range)) - } - - #[inline] - fn values(&self) -> Self::Values<'_> - where - C: Comparator, - { - Values::new(self.core().map.iter()) - } - - #[inline] - fn range_values(&self, range: R) -> Self::RangeValues<'_, Q, R> - where - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - C: Comparator, - { - RangeValues::new(self.core().map.range(range)) - } - - #[inline] - fn first(&self) -> Option<(&[u8], &[u8])> - where - C: Comparator, - { - self - .core() - .map - .first() - .map(|ent| (ent.as_key_slice(), ent.as_value_slice())) - } - - #[inline] - fn last(&self) -> Option<(&[u8], &[u8])> - where - C: Comparator, - { - self - .core() - .map - .last() - .map(|ent| (ent.as_key_slice(), ent.as_value_slice())) - } - - #[inline] - fn get(&self, key: &Q) -> Option<&[u8]> - where - [u8]: Borrow, - Q: ?Sized + Ord, - C: Comparator, - { - self.core().map.get(key).map(|ent| ent.as_value_slice()) - } -} - -impl Wal for OrderWal -where - C: Comparator + 'static, -{ - type Reader = Self; - - #[inline] - fn reader(&self) -> Self::Reader { - Self { - core: self.core.clone(), - _s: PhantomData, - } - } - - fn get_or_insert_with_value_builder( - &mut self, - key: &[u8], - vb: ValueBuilder) -> Result<(), E>>, - ) -> Result, Either> - where - C: Comparator + CheapClone, - S: BuildChecksumer, - { - self - .check( - key.len(), - vb.size() as usize, - self.maximum_key_size(), - self.maximum_value_size(), - self.read_only(), - ) - .map_err(Either::Right)?; - - if let Some(ent) = self.core().map.get(key) { - return Ok(Some(ent.as_value_slice())); - } - - self.insert_with_value_builder::(key, vb).map(|_| None) - } -} diff --git a/src/unsync/c.rs b/src/unsync/c.rs deleted file mode 100644 index 04175ba..0000000 --- a/src/unsync/c.rs +++ /dev/null @@ -1,48 +0,0 @@ -use wal::sealed::{Base, WalCore}; - -use super::*; - -pub struct OrderWalCore { - pub(super) arena: Arena, - pub(super) map: BTreeSet>, - pub(super) opts: Options, - pub(super) cmp: C, - pub(super) cks: S, -} - -impl Base for BTreeSet> -where - C: Comparator, -{ - type Pointer = Pointer; - - fn insert(&mut self, ele: Self::Pointer) { - BTreeSet::insert(self, ele); - } -} - -impl WalCore for OrderWalCore -where - C: Comparator, -{ - type Allocator = Arena; - type Base = BTreeSet>; - type Pointer = Pointer; - - #[inline] - fn construct( - arena: Arena, - set: BTreeSet>, - opts: Options, - cmp: C, - checksumer: S, - ) -> Self { - Self { - arena, - map: set, - cmp, - opts, - cks: checksumer, - } - } -} diff --git a/src/unsync/iter.rs b/src/unsync/iter.rs deleted file mode 100644 index d4eda63..0000000 --- a/src/unsync/iter.rs +++ /dev/null @@ -1,234 +0,0 @@ -use core::iter::FusedIterator; -use std::collections::btree_set; - -use super::*; - -/// Iterator over the entries in the WAL. -pub struct Iter<'a, C> { - iter: btree_set::Iter<'a, Pointer>, -} - -impl<'a, C> Iter<'a, C> { - #[inline] - pub(super) fn new(iter: btree_set::Iter<'a, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, C> Iterator for Iter<'a, C> { - type Item = (&'a [u8], &'a [u8]); - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| { - let k = ptr.as_key_slice(); - let v = ptr.as_value_slice(); - (k, v) - }) - } -} - -impl DoubleEndedIterator for Iter<'_, C> { - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| { - let k = ptr.as_key_slice(); - let v = ptr.as_value_slice(); - (k, v) - }) - } -} - -impl FusedIterator for Iter<'_, C> {} - -/// Iterator over the keys in the WAL. -pub struct Keys<'a, C> { - iter: btree_set::Iter<'a, Pointer>, -} - -impl<'a, C> Keys<'a, C> { - #[inline] - pub(super) fn new(iter: btree_set::Iter<'a, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, C> Iterator for Keys<'a, C> { - type Item = &'a [u8]; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| ptr.as_key_slice()) - } -} - -impl DoubleEndedIterator for Keys<'_, C> { - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| ptr.as_key_slice()) - } -} - -impl FusedIterator for Keys<'_, C> {} - -/// Iterator over the values in the WAL. -pub struct Values<'a, C> { - iter: btree_set::Iter<'a, Pointer>, -} - -impl<'a, C> Values<'a, C> { - #[inline] - pub(super) fn new(iter: btree_set::Iter<'a, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, C> Iterator for Values<'a, C> { - type Item = &'a [u8]; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| ptr.as_value_slice()) - } -} - -impl DoubleEndedIterator for Values<'_, C> { - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| ptr.as_value_slice()) - } -} - -impl FusedIterator for Values<'_, C> {} - -/// An iterator over a subset of the entries in the WAL. -pub struct Range<'a, C> -where - C: Comparator, -{ - iter: btree_set::Range<'a, Pointer>, -} - -impl<'a, C> Range<'a, C> -where - C: Comparator, -{ - #[inline] - pub(super) fn new(iter: btree_set::Range<'a, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, C> Iterator for Range<'a, C> -where - C: Comparator, -{ - type Item = (&'a [u8], &'a [u8]); - - #[inline] - fn next(&mut self) -> Option { - self - .iter - .next() - .map(|ptr| (ptr.as_key_slice(), ptr.as_value_slice())) - } -} - -impl DoubleEndedIterator for Range<'_, C> -where - C: Comparator, -{ - #[inline] - fn next_back(&mut self) -> Option { - self - .iter - .next_back() - .map(|ptr| (ptr.as_key_slice(), ptr.as_value_slice())) - } -} - -impl FusedIterator for Range<'_, C> where C: Comparator {} - -/// An iterator over the keys in a subset of the entries in the WAL. -pub struct RangeKeys<'a, C> -where - C: Comparator, -{ - iter: btree_set::Range<'a, Pointer>, -} - -impl<'a, C> RangeKeys<'a, C> -where - C: Comparator, -{ - #[inline] - pub(super) fn new(iter: btree_set::Range<'a, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, C> Iterator for RangeKeys<'a, C> -where - C: Comparator, -{ - type Item = &'a [u8]; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| ptr.as_key_slice()) - } -} - -impl DoubleEndedIterator for RangeKeys<'_, C> -where - C: Comparator, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| ptr.as_key_slice()) - } -} - -impl FusedIterator for RangeKeys<'_, C> where C: Comparator {} - -/// An iterator over the values in a subset of the entries in the WAL. -pub struct RangeValues<'a, C> -where - C: Comparator, -{ - iter: btree_set::Range<'a, Pointer>, -} - -impl<'a, C> RangeValues<'a, C> -where - C: Comparator, -{ - #[inline] - pub(super) fn new(iter: btree_set::Range<'a, Pointer>) -> Self { - Self { iter } - } -} - -impl<'a, C> Iterator for RangeValues<'a, C> -where - C: Comparator, -{ - type Item = &'a [u8]; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| ptr.as_value_slice()) - } -} - -impl DoubleEndedIterator for RangeValues<'_, C> -where - C: Comparator, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| ptr.as_value_slice()) - } -} - -impl FusedIterator for RangeValues<'_, C> where C: Comparator {} diff --git a/src/unsync/tests.rs b/src/unsync/tests.rs deleted file mode 100644 index df919bc..0000000 --- a/src/unsync/tests.rs +++ /dev/null @@ -1,15 +0,0 @@ -use super::*; - -#[cfg(all(test, any(test_unsync_constructor, all_tests)))] -mod constructor; - -#[cfg(all(test, any(test_unsync_insert, all_tests)))] -mod insert; - -#[cfg(all(test, any(test_unsync_iters, all_tests)))] -mod iter; - -#[cfg(all(test, any(test_unsync_get, all_tests)))] -mod get; - -const MB: u32 = 1024 * 1024; diff --git a/src/unsync/tests/constructor.rs b/src/unsync/tests/constructor.rs deleted file mode 100644 index 03e864a..0000000 --- a/src/unsync/tests/constructor.rs +++ /dev/null @@ -1,3 +0,0 @@ -use super::*; - -common_unittests!(unsync::constructor::OrderWal); diff --git a/src/unsync/tests/get.rs b/src/unsync/tests/get.rs deleted file mode 100644 index c9eefcf..0000000 --- a/src/unsync/tests/get.rs +++ /dev/null @@ -1,3 +0,0 @@ -use super::*; - -common_unittests!(unsync::get::OrderWal); diff --git a/src/unsync/tests/insert.rs b/src/unsync/tests/insert.rs deleted file mode 100644 index 99bb41a..0000000 --- a/src/unsync/tests/insert.rs +++ /dev/null @@ -1,5 +0,0 @@ -use super::*; - -common_unittests!(unsync::insert::OrderWal); - -common_unittests!(unsync::insert_batch::OrderWal); diff --git a/src/unsync/tests/iter.rs b/src/unsync/tests/iter.rs deleted file mode 100644 index f20d78e..0000000 --- a/src/unsync/tests/iter.rs +++ /dev/null @@ -1,3 +0,0 @@ -use super::*; - -common_unittests!(unsync::iters::OrderWal); diff --git a/src/utils.rs b/src/utils.rs index fa043a3..cde84a7 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,6 +1,4 @@ -pub use dbutils::leb128::*; - -use super::*; +pub use dbutils::leb128; /// Merge two `u32` into a `u64`. /// @@ -16,89 +14,7 @@ pub(crate) const fn merge_lengths(a: u32, b: u32) -> u64 { /// - high 32 bits: the first `u32` /// - low 32 bits: the second `u32` #[inline] +#[cfg(all(feature = "std", not(target_family = "wasm")))] pub(crate) const fn split_lengths(len: u64) -> (u32, u32) { ((len >> 32) as u32, len as u32) } - -/// - The first `usize` is the length of the encoded `klen + vlen` -/// - The second `u64` is encoded `klen + vlen` -/// - The third `u32` is the full entry size -#[inline] -pub(crate) const fn entry_size(key_len: u32, value_len: u32) -> (usize, u64, u32) { - let len = merge_lengths(key_len, value_len); - let len_size = encoded_u64_varint_len(len); - let elen = STATUS_SIZE as u32 + len_size as u32 + key_len + value_len + CHECKSUM_SIZE as u32; - - (len_size, len, elen) -} - -#[inline] -pub(crate) const fn arena_options(reserved: u32) -> ArenaOptions { - ArenaOptions::new() - .with_magic_version(CURRENT_VERSION) - .with_freelist(Freelist::None) - .with_reserved((HEADER_SIZE + reserved as usize) as u32) - .with_unify(true) -} - -#[inline] -pub(crate) const fn min_u64(a: u64, b: u64) -> u64 { - if a < b { - a - } else { - b - } -} - -#[inline] -pub(crate) const fn check( - klen: usize, - vlen: usize, - max_key_size: u32, - max_value_size: u32, - ro: bool, -) -> Result<(), error::Error> { - if ro { - return Err(error::Error::read_only()); - } - - let max_ksize = min_u64(max_key_size as u64, u32::MAX as u64); - let max_vsize = min_u64(max_value_size as u64, u32::MAX as u64); - - if max_ksize < klen as u64 { - return Err(error::Error::key_too_large(klen as u64, max_key_size)); - } - - if max_vsize < vlen as u64 { - return Err(error::Error::value_too_large(vlen as u64, max_value_size)); - } - - let (_, _, elen) = entry_size(klen as u32, vlen as u32); - - if elen == u32::MAX { - return Err(error::Error::entry_too_large( - elen as u64, - min_u64(max_key_size as u64 + max_value_size as u64, u32::MAX as u64), - )); - } - - Ok(()) -} - -#[inline] -pub(crate) fn check_batch_entry( - klen: usize, - vlen: usize, - max_key_size: u32, - max_value_size: u32, -) -> Result<(), Error> { - if klen > max_key_size as usize { - return Err(Error::key_too_large(klen as u64, max_key_size)); - } - - if vlen > max_value_size as usize { - return Err(Error::value_too_large(vlen as u64, max_value_size)); - } - - Ok(()) -} diff --git a/src/wal.rs b/src/wal.rs index d159d04..8536da5 100644 --- a/src/wal.rs +++ b/src/wal.rs @@ -1,489 +1,9 @@ -use checksum::BuildChecksumer; -use core::ops::{Bound, RangeBounds}; +pub(crate) mod base; +pub(crate) mod iter; +pub(crate) mod multiple_version; -use super::{pointer::Pointer, *}; +mod query; +pub(crate) use query::*; -pub(crate) mod sealed; - -mod batch; -pub use batch::*; - -/// An abstract layer for the immutable write-ahead log. -pub trait ImmutableWal: sealed::Constructor { - /// The iterator type. - type Iter<'a>: Iterator + DoubleEndedIterator - where - Self: 'a, - C: Comparator; - - /// The iterator type over a subset of entries in the WAL. - type Range<'a, Q, R>: Iterator + DoubleEndedIterator - where - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - Self: 'a, - C: Comparator; - - /// The keys iterator type. - type Keys<'a>: Iterator + DoubleEndedIterator - where - Self: 'a, - C: Comparator; - - /// The iterator type over a subset of keys in the WAL. - type RangeKeys<'a, Q, R>: Iterator + DoubleEndedIterator - where - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - Self: 'a, - C: Comparator; - - /// The values iterator type. - type Values<'a>: Iterator + DoubleEndedIterator - where - Self: 'a, - C: Comparator; - - /// The iterator type over a subset of values in the WAL. - type RangeValues<'a, Q, R>: Iterator + DoubleEndedIterator - where - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - Self: 'a, - C: Comparator; - - /// Returns the reserved space in the WAL. - /// - /// ## Safety - /// - The writer must ensure that the returned slice is not modified. - /// - This method is not thread-safe, so be careful when using it. - unsafe fn reserved_slice<'a>(&'a self) -> &'a [u8] - where - Self::Allocator: 'a, - { - let reserved = self.options().reserved(); - if reserved == 0 { - return &[]; - } - - let allocator = self.allocator(); - let reserved_slice = allocator.reserved_slice(); - &reserved_slice[HEADER_SIZE..] - } - - /// Returns the path of the WAL if it is backed by a file. - fn path(&self) -> Option<&std::path::Path>; - - /// Returns the number of entries in the WAL. - fn len(&self) -> usize; - - /// Returns `true` if the WAL is empty. - #[inline] - fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the maximum key size allowed in the WAL. - #[inline] - fn maximum_key_size(&self) -> u32 { - self.options().maximum_key_size() - } - - /// Returns the maximum value size allowed in the WAL. - #[inline] - fn maximum_value_size(&self) -> u32 { - self.options().maximum_value_size() - } - - /// Returns the remaining capacity of the WAL. - #[inline] - fn remaining(&self) -> u32 { - self.allocator().remaining() as u32 - } - - /// Returns the capacity of the WAL. - #[inline] - fn capacity(&self) -> u32 { - self.options().capacity() - } - - /// Returns the options used to create this WAL instance. - fn options(&self) -> &Options; - - /// Returns `true` if the WAL contains the specified key. - fn contains_key(&self, key: &Q) -> bool - where - [u8]: Borrow, - Q: ?Sized + Ord, - C: Comparator; - - /// Returns an iterator over the entries in the WAL. - fn iter(&self) -> Self::Iter<'_> - where - C: Comparator; - - /// Returns an iterator over a subset of entries in the WAL. - fn range(&self, range: R) -> Self::Range<'_, Q, R> - where - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - C: Comparator; - - /// Returns an iterator over the keys in the WAL. - fn keys(&self) -> Self::Keys<'_> - where - C: Comparator; - - /// Returns an iterator over a subset of keys in the WAL. - fn range_keys(&self, range: R) -> Self::RangeKeys<'_, Q, R> - where - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - C: Comparator; - - /// Returns an iterator over the values in the WAL. - fn values(&self) -> Self::Values<'_> - where - C: Comparator; - - /// Returns an iterator over a subset of values in the WAL. - fn range_values(&self, range: R) -> Self::RangeValues<'_, Q, R> - where - R: RangeBounds, - [u8]: Borrow, - Q: Ord + ?Sized, - C: Comparator; - - /// Returns the first key-value pair in the map. The key in this pair is the minimum key in the wal. - fn first(&self) -> Option<(&[u8], &[u8])> - where - C: Comparator; - - /// Returns the last key-value pair in the map. The key in this pair is the maximum key in the wal. - fn last(&self) -> Option<(&[u8], &[u8])> - where - C: Comparator; - - /// Returns the value associated with the key. - fn get(&self, key: &Q) -> Option<&[u8]> - where - [u8]: Borrow, - Q: ?Sized + Ord, - C: Comparator; - - /// Returns a value associated to the highest element whose key is below the given bound. - /// If no such element is found then `None` is returned. - // TODO: implement this method for unsync::OrderWal when BTreeMap::upper_bound is stable - #[inline] - fn upper_bound(&self, bound: Bound<&Q>) -> Option<&[u8]> - where - [u8]: Borrow, - Q: ?Sized + Ord, - C: Comparator, - { - self - .range((Bound::Unbounded, bound)) - .last() - .map(|ent| ent.0) - } - - /// Returns a value associated to the lowest element whose key is above the given bound. - /// If no such element is found then `None` is returned. - // TODO: implement this method for unsync::OrderWal when BTreeMap::lower_bound is stable - #[inline] - fn lower_bound(&self, bound: Bound<&Q>) -> Option<&[u8]> - where - [u8]: Borrow, - Q: ?Sized + Ord, - C: Comparator, - { - self - .range((bound, Bound::Unbounded)) - .next() - .map(|ent| ent.0) - } -} - -/// An abstract layer for the write-ahead log. -pub trait Wal: - sealed::Sealed> + ImmutableWal -{ - /// The read only reader type for this wal. - type Reader: ImmutableWal; - - /// Returns `true` if this WAL instance is read-only. - fn read_only(&self) -> bool { - self.allocator().read_only() - } - - /// Returns the mutable reference to the reserved slice. - /// - /// ## Safety - /// - The caller must ensure that the there is no others accessing reserved slice for either read or write. - /// - This method is not thread-safe, so be careful when using it. - unsafe fn reserved_slice_mut<'a>(&'a mut self) -> &'a mut [u8] - where - Self::Allocator: 'a, - { - let reserved = sealed::Sealed::options(self).reserved(); - if reserved == 0 { - return &mut []; - } - - let allocator = self.allocator(); - let reserved_slice = allocator.reserved_slice_mut(); - &mut reserved_slice[HEADER_SIZE..] - } - - /// Flushes the to disk. - fn flush(&self) -> Result<(), Error> { - if !self.read_only() { - self.allocator().flush().map_err(Into::into) - } else { - Err(Error::read_only()) - } - } - - /// Flushes the to disk. - fn flush_async(&self) -> Result<(), Error> { - if !self.read_only() { - self.allocator().flush_async().map_err(Into::into) - } else { - Err(Error::read_only()) - } - } - - /// Returns the read-only view for the WAL. - fn reader(&self) -> Self::Reader; - - /// Get or insert a new entry into the WAL. - fn get_or_insert(&mut self, key: &[u8], value: &[u8]) -> Result, Error> - where - C: Comparator + CheapClone, - S: BuildChecksumer, - { - self - .get_or_insert_with_value_builder::<()>( - key, - ValueBuilder::once(value.len() as u32, |buf| { - buf.put_slice_unchecked(value); - Ok(()) - }), - ) - .map_err(|e| e.unwrap_right()) - } - - /// Get or insert a new entry into the WAL. - fn get_or_insert_with_value_builder( - &mut self, - key: &[u8], - vb: ValueBuilder) -> Result<(), E>>, - ) -> Result, Either> - where - C: Comparator + CheapClone, - S: BuildChecksumer; - - /// Inserts a key-value pair into the WAL. This method - /// allows the caller to build the key in place. - /// - /// See also [`insert_with_value_builder`](Wal::insert_with_value_builder) and [`insert_with_builders`](Wal::insert_with_builders). - fn insert_with_key_builder( - &mut self, - kb: KeyBuilder) -> Result<(), E>>, - value: &[u8], - ) -> Result<(), Either> - where - C: Comparator + CheapClone, - S: BuildChecksumer, - { - self - .check( - kb.size() as usize, - value.len(), - self.maximum_key_size(), - self.maximum_value_size(), - self.read_only(), - ) - .map_err(Either::Right)?; - - self - .insert_with_in::( - kb, - ValueBuilder::once(value.len() as u32, |buf| { - buf.put_slice(value).unwrap(); - Ok(()) - }), - ) - .map(|ptr| self.insert_pointer(ptr)) - .map_err(Among::into_left_right) - } - - /// Inserts a key-value pair into the WAL. This method - /// allows the caller to build the value in place. - /// - /// See also [`insert_with_key_builder`](Wal::insert_with_key_builder) and [`insert_with_builders`](Wal::insert_with_builders). - fn insert_with_value_builder( - &mut self, - key: &[u8], - vb: ValueBuilder) -> Result<(), E>>, - ) -> Result<(), Either> - where - C: Comparator + CheapClone, - S: BuildChecksumer, - { - self - .check( - key.len(), - vb.size() as usize, - self.maximum_key_size(), - self.maximum_value_size(), - self.read_only(), - ) - .map_err(Either::Right)?; - - self - .insert_with_in::<(), E>( - KeyBuilder::once(key.len() as u32, |buf| { - buf.put_slice_unchecked(key); - Ok(()) - }), - vb, - ) - .map(|ptr| self.insert_pointer(ptr)) - .map_err(Among::into_middle_right) - } - - /// Inserts a key-value pair into the WAL. This method - /// allows the caller to build the key and value in place. - fn insert_with_builders( - &mut self, - kb: KeyBuilder) -> Result<(), KE>>, - vb: ValueBuilder) -> Result<(), VE>>, - ) -> Result<(), Among> - where - C: Comparator + CheapClone, - S: BuildChecksumer, - { - self - .check( - kb.size() as usize, - vb.size() as usize, - self.maximum_key_size(), - self.maximum_value_size(), - self.read_only(), - ) - .map_err(Among::Right)?; - - self - .insert_with_in(kb, vb) - .map(|ptr| self.insert_pointer(ptr)) - } - - /// Inserts a batch of key-value pairs into the WAL. - fn insert_batch_with_key_builder( - &mut self, - batch: &mut B, - ) -> Result<(), Either> - where - B: BatchWithKeyBuilder>, - B::Value: Borrow<[u8]>, - C: Comparator + CheapClone, - S: BuildChecksumer, - { - if self.read_only() { - return Err(Either::Right(Error::read_only())); - } - - self - .insert_batch_with_key_builder_in(batch) - .map(|_| self.insert_pointers(batch.iter_mut().map(|ent| ent.pointer.take().unwrap()))) - } - - /// Inserts a batch of key-value pairs into the WAL. - fn insert_batch_with_value_builder( - &mut self, - batch: &mut B, - ) -> Result<(), Either> - where - B: BatchWithValueBuilder>, - B::Key: Borrow<[u8]>, - C: Comparator + CheapClone, - S: BuildChecksumer, - { - if self.read_only() { - return Err(Either::Right(Error::read_only())); - } - - self - .insert_batch_with_value_builder_in(batch) - .map(|_| self.insert_pointers(batch.iter_mut().map(|ent| ent.pointer.take().unwrap()))) - } - - /// Inserts a batch of key-value pairs into the WAL. - fn insert_batch_with_builders( - &mut self, - batch: &mut B, - ) -> Result<(), Among> - where - B: BatchWithBuilders>, - C: Comparator + CheapClone, - S: BuildChecksumer, - { - if self.read_only() { - return Err(Among::Right(Error::read_only())); - } - - self - .insert_batch_with_builders_in(batch) - .map(|_| self.insert_pointers(batch.iter_mut().map(|ent| ent.pointer.take().unwrap()))) - } - - /// Inserts a batch of key-value pairs into the WAL. - fn insert_batch>(&mut self, batch: &mut B) -> Result<(), Error> - where - C: Comparator + CheapClone, - S: BuildChecksumer, - { - if self.read_only() { - return Err(Error::read_only()); - } - - self - .insert_batch_in(batch) - .map(|_| self.insert_pointers(batch.iter_mut().map(|ent| ent.pointer.take().unwrap()))) - } - - /// Inserts a key-value pair into the WAL. - fn insert(&mut self, key: &[u8], value: &[u8]) -> Result<(), Error> - where - C: Comparator + CheapClone, - S: BuildChecksumer, - { - self.check( - key.len(), - value.len(), - self.maximum_key_size(), - self.maximum_value_size(), - self.read_only(), - )?; - - self - .insert_with_in::<(), ()>( - KeyBuilder::once(key.len() as u32, |buf: &mut VacantBuffer<'_>| { - buf.put_slice_unchecked(key); - Ok(()) - }), - ValueBuilder::once(value.len() as u32, |buf: &mut VacantBuffer<'_>| { - buf.put_slice_unchecked(value); - Ok(()) - }), - ) - .map(|ptr| self.insert_pointer(ptr)) - .map_err(Among::unwrap_right) - } -} +mod pointer; +pub use pointer::*; diff --git a/src/wal/base.rs b/src/wal/base.rs new file mode 100644 index 0000000..4709478 --- /dev/null +++ b/src/wal/base.rs @@ -0,0 +1,687 @@ +use core::ops::{Bound, RangeBounds}; + +use among::Among; +use dbutils::{ + buffer::VacantBuffer, + checksum::BuildChecksumer, + equivalent::Comparable, + types::{KeyRef, MaybeStructured, Type}, +}; +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +use rarena_allocator::Allocator; +use ref_cast::RefCast; +use skl::{either::Either, KeySize}; + +use crate::{ + batch::Batch, + error::Error, + memtable::{BaseTable, Memtable, MemtableEntry}, + sealed::{Constructable, Wal, WalReader}, + types::{base::Entry, BufWriter, KeyBuilder, ValueBuilder}, + Options, +}; + +use super::{Query, QueryRange, Slice}; + +mod iter; +pub use iter::*; + +/// An abstract layer for the immutable write-ahead log. +pub trait Reader: Constructable { + /// Returns the reserved space in the WAL. + /// + /// ## Safety + /// - The writer must ensure that the returned slice is not modified. + /// - This method is not thread-safe, so be careful when using it. + #[inline] + unsafe fn reserved_slice(&self) -> &[u8] { + self.as_wal().reserved_slice() + } + + /// Returns the path of the WAL if it is backed by a file. + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + #[inline] + fn path(&self) -> Option<&<::Allocator as Allocator>::Path> { + self.as_wal().path() + } + + /// Returns the number of entries in the WAL. + #[inline] + fn len(&self) -> usize + where + Self::Memtable: Memtable, + ::Key: Type + Ord, + ::Value: Type, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + self.as_wal().len() + } + + /// Returns `true` if the WAL is empty. + #[inline] + fn is_empty(&self) -> bool + where + Self::Memtable: Memtable, + ::Key: Type + Ord, + ::Value: Type, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + self.as_wal().is_empty() + } + + /// Returns the maximum key size allowed in the WAL. + #[inline] + fn maximum_key_size(&self) -> KeySize { + self.as_wal().maximum_key_size() + } + + /// Returns the maximum value size allowed in the WAL. + #[inline] + fn maximum_value_size(&self) -> u32 { + self.as_wal().maximum_value_size() + } + + /// Returns the remaining capacity of the WAL. + #[inline] + fn remaining(&self) -> u32 { + self.as_wal().remaining() + } + + /// Returns the capacity of the WAL. + #[inline] + fn capacity(&self) -> u32 { + self.as_wal().capacity() + } + + /// Returns the options used to create this WAL instance. + #[inline] + fn options(&self) -> &Options { + self.as_wal().options() + } + + /// Returns an iterator over the entries in the WAL. + #[inline] + fn iter( + &self, + ) -> Iter< + '_, + <>::Memtable as BaseTable>::Iterator<'_>, + Self::Memtable, + > + where + Self::Memtable: Memtable, + ::Key: Type + Ord, + ::Value: Type, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + Iter::new(BaseIter::new(self.as_wal().iter())) + } + + /// Returns an iterator over a subset of entries in the WAL. + #[inline] + fn range<'a, Q, R>( + &'a self, + range: R, + ) -> Range<'a, R, Q, >::Memtable> + where + R: RangeBounds, + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: Memtable, + for<'b> ::Item<'b>: MemtableEntry<'b>, + ::Key: Type + Ord, + ::Value: Type, + { + Range::new(BaseIter::new(self.as_wal().range(QueryRange::new(range)))) + } + + /// Returns an iterator over the keys in the WAL. + #[inline] + fn keys( + &self, + ) -> Keys< + '_, + <>::Memtable as BaseTable>::Iterator<'_>, + Self::Memtable, + > + where + Self::Memtable: Memtable, + ::Key: Type + Ord, + ::Value: Type, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + Keys::new(BaseIter::new(self.as_wal().iter())) + } + + /// Returns an iterator over a subset of keys in the WAL. + #[inline] + fn range_keys<'a, Q, R>( + &'a self, + range: R, + ) -> RangeKeys<'a, R, Q, >::Memtable> + where + R: RangeBounds, + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: Memtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> ::Item<'b>: MemtableEntry<'b>, + { + RangeKeys::new(BaseIter::new(WalReader::range( + self.as_wal(), + QueryRange::new(range), + ))) + } + + /// Returns an iterator over the values in the WAL. + #[inline] + fn values( + &self, + ) -> Values< + '_, + <>::Memtable as BaseTable>::Iterator<'_>, + Self::Memtable, + > + where + Self::Memtable: Memtable, + ::Key: Type, + ::Value: Type, + for<'a> ::Item<'a>: MemtableEntry<'a>, + { + Values::new(BaseIter::new(self.as_wal().iter())) + } + + /// Returns an iterator over a subset of values in the WAL. + #[inline] + fn range_values<'a, Q, R>( + &'a self, + range: R, + ) -> RangeValues<'a, R, Q, >::Memtable> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: Memtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> ::Item<'b>: MemtableEntry<'b>, + { + RangeValues::new(BaseIter::new(self.as_wal().range(QueryRange::new(range)))) + } + + /// Returns the first key-value pair in the map. The key in this pair is the minimum key in the wal. + #[inline] + fn first(&self) -> Option::Item<'_>>> + where + Self::Memtable: Memtable, + for<'a> ::Item<'a>: MemtableEntry<'a>, + ::Key: Ord + Type, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type, + { + self.as_wal().first().map(Entry::new) + } + + /// Returns the last key-value pair in the map. The key in this pair is the maximum key in the wal. + #[inline] + fn last(&self) -> Option::Item<'_>>> + where + Self::Memtable: Memtable, + for<'a> ::Item<'a>: MemtableEntry<'a>, + ::Key: Ord + Type, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type, + { + WalReader::last(self.as_wal()).map(Entry::new) + } + + /// Returns `true` if the key exists in the WAL. + #[inline] + fn contains_key<'a, Q>(&'a self, key: &Q) -> bool + where + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: Memtable, + for<'b> ::Item<'b>: MemtableEntry<'b>, + ::Key: Ord + Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + ::Value: Type, + { + self.as_wal().contains_key(Query::<_, Q>::ref_cast(key)) + } + + /// Returns `true` if the key exists in the WAL. + /// + /// ## Safety + /// - The given `key` must be valid to construct to `K::Ref` without remaining. + #[inline] + unsafe fn contains_key_by_bytes(&self, key: &[u8]) -> bool + where + Self::Memtable: Memtable, + for<'b> ::Item<'b>: MemtableEntry<'b>, + ::Key: Ord + Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + ::Value: Type, + { + self + .as_wal() + .contains_key(Slice::<::Key>::ref_cast(key)) + } + + /// Gets the value associated with the key. + #[inline] + fn get<'a, Q>(&'a self, key: &Q) -> Option::Item<'a>>> + where + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: Memtable, + for<'b> ::Item<'b>: MemtableEntry<'b>, + ::Key: Ord + Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + ::Value: Type, + { + self + .as_wal() + .get(Query::<_, Q>::ref_cast(key)) + .map(Entry::new) + } + + /// Gets the value associated with the key. + /// + /// ## Safety + /// - The given `key` must be valid to construct to `K::Ref` without remaining. + #[inline] + unsafe fn get_by_bytes( + &self, + key: &[u8], + ) -> Option::Item<'_>>> + where + Self::Memtable: Memtable, + for<'a> ::Item<'a>: MemtableEntry<'a>, + ::Key: Ord + Type, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type, + { + self + .as_wal() + .get(Slice::<::Key>::ref_cast(key)) + .map(Entry::new) + } + + /// Returns a value associated to the highest element whose key is below the given bound. + /// If no such element is found then `None` is returned. + #[inline] + fn upper_bound<'a, Q>( + &'a self, + bound: Bound<&Q>, + ) -> Option::Item<'a>>> + where + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: Memtable, + for<'b> ::Item<'b>: MemtableEntry<'b>, + ::Key: Ord + Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + ::Value: Type, + { + self + .as_wal() + .upper_bound(bound.map(Query::<_, Q>::ref_cast)) + .map(Entry::new) + } + + /// Returns a value associated to the highest element whose key is below the given bound. + /// If no such element is found then `None` is returned. + /// + /// ## Safety + /// - The given `key` in `Bound` must be valid to construct to `K::Ref` without remaining. + #[inline] + unsafe fn upper_bound_by_bytes( + &self, + bound: Bound<&[u8]>, + ) -> Option::Item<'_>>> + where + Self::Memtable: Memtable, + for<'a> ::Item<'a>: MemtableEntry<'a>, + ::Key: Ord + Type, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type, + { + self + .as_wal() + .upper_bound(bound.map(Slice::<::Key>::ref_cast)) + .map(Entry::new) + } + + /// Returns a value associated to the lowest element whose key is above the given bound. + /// If no such element is found then `None` is returned. + #[inline] + fn lower_bound<'a, Q>( + &'a self, + bound: Bound<&Q>, + ) -> Option::Item<'a>>> + where + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: Memtable, + for<'b> ::Item<'b>: MemtableEntry<'b>, + ::Key: Ord + Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + ::Value: Type, + { + self + .as_wal() + .lower_bound(bound.map(Query::<::Key, Q>::ref_cast)) + .map(Entry::new) + } + + /// Returns a value associated to the lowest element whose key is above the given bound. + /// If no such element is found then `None` is returned. + /// + /// ## Safety + /// - The given `key` in `Bound` must be valid to construct to `K::Ref` without remaining. + #[inline] + unsafe fn lower_bound_by_bytes( + &self, + bound: Bound<&[u8]>, + ) -> Option::Item<'_>>> + where + Self::Memtable: Memtable, + for<'b> ::Item<'b>: MemtableEntry<'b>, + ::Key: Ord + Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + ::Value: Type, + { + self + .as_wal() + .lower_bound(bound.map(Slice::<::Key>::ref_cast)) + .map(Entry::new) + } +} + +impl Reader for T +where + T: Constructable, + T::Memtable: Memtable, + for<'a> ::Item<'a>: MemtableEntry<'a>, +{ +} + +/// An abstract layer for the write-ahead log. +pub trait Writer: Reader +where + Self::Reader: Reader, + Self::Memtable: Memtable, + for<'a> ::Item<'a>: MemtableEntry<'a>, +{ + /// Returns `true` if this WAL instance is read-only. + #[inline] + fn read_only(&self) -> bool { + self.as_wal().read_only() + } + + /// Returns the mutable reference to the reserved slice. + /// + /// ## Safety + /// - The caller must ensure that the there is no others accessing reserved slice for either read or write. + /// - This method is not thread-safe, so be careful when using it. + #[inline] + unsafe fn reserved_slice_mut<'a>(&'a mut self) -> &'a mut [u8] + where + Self::Allocator: 'a, + { + self.as_wal().reserved_slice_mut() + } + + /// Flushes the to disk. + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + fn flush(&self) -> Result<(), Error> { + self.as_wal().flush() + } + + /// Flushes the to disk. + #[inline] + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + fn flush_async(&self) -> Result<(), Error> { + self.as_wal().flush_async() + } + + /// Returns the read-only view for the WAL. + fn reader(&self) -> Self::Reader; + + /// Inserts a key-value pair into the WAL. This method + /// allows the caller to build the key in place. + /// + /// See also [`insert_with_value_builder`](Writer::insert_with_value_builder) and [`insert_with_builders`](Writer::insert_with_builders). + #[inline] + fn insert_with_key_builder<'a, E>( + &'a mut self, + kb: KeyBuilder) -> Result>, + value: impl Into::Value>>, + ) -> Result< + (), + Among::Value as Type>::Error, Error>, + > + where + Self::Checksumer: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + self.as_wal().insert(None, kb, value.into()) + } + + /// Inserts a key-value pair into the WAL. This method + /// allows the caller to build the value in place. + /// + /// See also [`insert_with_key_builder`](Writer::insert_with_key_builder) and [`insert_with_builders`](Writer::insert_with_builders). + #[inline] + fn insert_with_value_builder<'a, E>( + &'a mut self, + key: impl Into::Key>>, + vb: ValueBuilder) -> Result>, + ) -> Result< + (), + Among<<::Key as Type>::Error, E, Error>, + > + where + Self::Checksumer: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + self.as_wal().insert(None, key.into(), vb) + } + + /// Inserts a key-value pair into the WAL. This method + /// allows the caller to build the key and value in place. + #[inline] + fn insert_with_builders<'a, KE, VE>( + &'a mut self, + kb: KeyBuilder) -> Result>, + vb: ValueBuilder) -> Result>, + ) -> Result<(), Among>> + where + Self::Checksumer: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + self.as_wal().insert(None, kb, vb) + } + + /// Inserts a key-value pair into the WAL. + #[inline] + fn insert<'a>( + &'a mut self, + key: impl Into::Key>>, + value: impl Into::Value>>, + ) -> Result< + (), + Among< + <::Key as Type>::Error, + <::Value as Type>::Error, + Error, + >, + > + where + Self::Checksumer: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + self.as_wal().insert(None, key.into(), value.into()) + } + + /// Removes a key-value pair from the WAL. This method + /// allows the caller to build the key in place. + #[inline] + fn remove_with_builder<'a, KE>( + &'a mut self, + kb: KeyBuilder) -> Result>, + ) -> Result<(), Either>> + where + Self::Checksumer: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + self.as_wal().remove(None, kb) + } + + /// Removes a key-value pair from the WAL. + #[inline] + fn remove<'a>( + &'a mut self, + key: impl Into::Key>>, + ) -> Result<(), Either<<::Key as Type>::Error, Error>> + where + Self::Checksumer: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + self.as_wal().remove(None, key.into()) + } + + /// Inserts a batch of key-value pairs into the WAL. + #[inline] + fn insert_batch<'a, B>( + &'a mut self, + batch: &mut B, + ) -> Result< + (), + Among< + <::Key as Type>::Error, + <::Value as Type>::Error, + Error, + >, + > + where + B: Batch< + Self::Memtable, + Key = MaybeStructured<'a, ::Key>, + Value = MaybeStructured<'a, ::Value>, + >, + Self::Checksumer: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + self.as_wal().insert_batch::(batch) + } + + /// Inserts a batch of key-value pairs into the WAL. + #[inline] + fn insert_batch_with_key_builder<'a, B>( + &'a mut self, + batch: &mut B, + ) -> Result< + (), + Among< + ::Error, + <::Value as Type>::Error, + Error, + >, + > + where + B: Batch::Value>>, + B::Key: BufWriter, + Self::Checksumer: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + self.as_wal().insert_batch::(batch) + } + + /// Inserts a batch of key-value pairs into the WAL. + #[inline] + fn insert_batch_with_value_builder<'a, B>( + &'a mut self, + batch: &mut B, + ) -> Result< + (), + Among< + <::Key as Type>::Error, + ::Error, + Error, + >, + > + where + B: Batch::Key>>, + B::Value: BufWriter, + Self::Checksumer: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + self.as_wal().insert_batch::(batch) + } + + /// Inserts a batch of key-value pairs into the WAL. + #[inline] + fn insert_batch_with_builders<'a, KB, VB, B>( + &'a mut self, + batch: &mut B, + ) -> Result<(), Among>> + where + B: Batch, + KB: BufWriter, + VB: BufWriter, + Self::Checksumer: BuildChecksumer, + Self::Memtable: BaseTable, + ::Key: Type + Ord + 'static, + <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type + 'static, + { + self.as_wal().insert_batch::(batch) + } +} diff --git a/src/wal/base/iter.rs b/src/wal/base/iter.rs new file mode 100644 index 0000000..1547e77 --- /dev/null +++ b/src/wal/base/iter.rs @@ -0,0 +1,476 @@ +use core::{iter::FusedIterator, marker::PhantomData, ops::RangeBounds}; + +use crate::{ + memtable::{BaseEntry, Memtable, MemtableEntry}, + types::base::{Entry, Key, Value}, + wal::{KeyPointer, ValuePointer}, +}; + +use dbutils::{equivalent::Comparable, types::Type}; + +use super::{Query, QueryRange}; + +/// Iterator over the entries in the WAL. +pub struct BaseIter<'a, I, M> +where + M: Memtable, + for<'b> M::Item<'b>: MemtableEntry<'b>, +{ + iter: I, + head: Option<(KeyPointer, ValuePointer)>, + tail: Option<(KeyPointer, ValuePointer)>, + _m: PhantomData<&'a ()>, +} + +impl BaseIter<'_, I, M> +where + M: Memtable, + for<'b> M::Item<'b>: MemtableEntry<'b>, +{ + #[inline] + pub(super) fn new(iter: I) -> Self { + Self { + iter, + head: None, + tail: None, + _m: PhantomData, + } + } +} + +impl<'a, I, M> Iterator for BaseIter<'a, I, M> +where + M: Memtable + 'a, + for<'b> M::Item<'b>: MemtableEntry<'b>, + I: Iterator>, +{ + type Item = M::Item<'a>; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().inspect(|ent| { + self.head = Some((ent.key(), ent.value())); + }) + } +} + +impl<'a, I, M> DoubleEndedIterator for BaseIter<'a, I, M> +where + M: Memtable + 'a, + for<'b> M::Item<'b>: MemtableEntry<'b>, + I: DoubleEndedIterator>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.next_back().inspect(|ent| { + self.tail = Some((ent.key(), ent.value())); + }) + } +} + +impl<'a, I, M> FusedIterator for BaseIter<'a, I, M> +where + M: Memtable + 'a, + for<'b> M::Item<'b>: MemtableEntry<'b>, + I: FusedIterator>, +{ +} + +/// Iterator over the entries in the WAL. +pub struct Iter<'a, I, M> +where + M: Memtable, + for<'b> M::Item<'b>: MemtableEntry<'b>, +{ + iter: BaseIter<'a, I, M>, +} + +impl<'a, I, M> Iter<'a, I, M> +where + M: Memtable, + for<'b> M::Item<'b>: MemtableEntry<'b>, +{ + #[inline] + pub(super) fn new(iter: BaseIter<'a, I, M>) -> Self { + Self { iter } + } +} + +impl<'a, I, M> Iterator for Iter<'a, I, M> +where + M: Memtable + 'a, + for<'b> M::Item<'b>: MemtableEntry<'b>, + M::Key: Type + Ord, + M::Value: Type, + I: Iterator>, +{ + type Item = Entry<'a, M::Item<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(Entry::new) + } +} + +impl<'a, I, M> DoubleEndedIterator for Iter<'a, I, M> +where + M: Memtable + 'a, + for<'b> M::Item<'b>: MemtableEntry<'b>, + M::Key: Type + Ord, + M::Value: Type, + I: DoubleEndedIterator>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Entry::new) + } +} + +impl<'a, I, M> FusedIterator for Iter<'a, I, M> +where + M: Memtable + 'a, + for<'b> M::Item<'b>: MemtableEntry<'b>, + M::Key: Type + Ord, + M::Value: Type, + I: FusedIterator>, +{ +} + +/// Iterator over the keys in the WAL. +pub struct Keys<'a, I, M> +where + M: Memtable, + for<'b> M::Item<'b>: MemtableEntry<'b>, +{ + iter: BaseIter<'a, I, M>, +} + +impl<'a, I, M> Keys<'a, I, M> +where + M: Memtable, + for<'b> M::Item<'b>: MemtableEntry<'b>, +{ + #[inline] + pub(super) fn new(iter: BaseIter<'a, I, M>) -> Self { + Self { iter } + } +} + +impl<'a, I, M> Iterator for Keys<'a, I, M> +where + M: Memtable + 'a, + M::Key: Type, + for<'b> M::Item<'b>: MemtableEntry<'b>, + I: Iterator>, +{ + type Item = Key<'a, M::Item<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(Key::new) + } +} + +impl<'a, I, M> DoubleEndedIterator for Keys<'a, I, M> +where + M: Memtable + 'a, + M::Key: Type, + for<'b> M::Item<'b>: MemtableEntry<'b>, + I: DoubleEndedIterator>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Key::new) + } +} + +impl<'a, I, M> FusedIterator for Keys<'a, I, M> +where + M: Memtable + 'a, + M::Key: Type, + for<'b> M::Item<'b>: MemtableEntry<'b>, + I: FusedIterator>, +{ +} + +/// Iterator over the values in the WAL. +pub struct Values<'a, I, M> +where + M: Memtable, + for<'b> M::Item<'b>: MemtableEntry<'b>, +{ + iter: BaseIter<'a, I, M>, +} + +impl<'a, I, M> Values<'a, I, M> +where + M: Memtable, + for<'b> M::Item<'b>: MemtableEntry<'b>, +{ + #[inline] + pub(super) fn new(iter: BaseIter<'a, I, M>) -> Self { + Self { iter } + } +} + +impl<'a, I, M> Iterator for Values<'a, I, M> +where + M: Memtable + 'a, + M::Value: Type, + for<'b> M::Item<'b>: MemtableEntry<'b>, + I: Iterator>, +{ + type Item = Value<'a, M::Item<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(Value::new) + } +} + +impl<'a, I, M> DoubleEndedIterator for Values<'a, I, M> +where + M: Memtable + 'a, + M::Value: Type, + for<'b> M::Item<'b>: MemtableEntry<'b>, + I: DoubleEndedIterator>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Value::new) + } +} + +impl<'a, I, M> FusedIterator for Values<'a, I, M> +where + M: Memtable + 'a, + M::Value: Type, + for<'b> M::Item<'b>: MemtableEntry<'b>, + I: FusedIterator>, +{ +} + +/// An iterator over a subset of the entries in the WAL. +pub struct Range<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ + iter: BaseIter<'a, B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, B>, +} + +impl<'a, R, Q, B> Range<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ + #[inline] + pub(super) fn new( + iter: BaseIter<'a, B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, B>, + ) -> Self { + Self { iter } + } +} + +impl<'a, R, Q, B> Iterator for Range<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: Iterator>, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ + type Item = Entry<'a, B::Item<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(Entry::new) + } +} + +impl<'a, R, Q, B> DoubleEndedIterator for Range<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + DoubleEndedIterator>, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Entry::new) + } +} + +impl<'a, R, Q, B> FusedIterator for Range<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + FusedIterator>, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ +} + +/// An iterator over the keys in a subset of the entries in the WAL. +pub struct RangeKeys<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ + iter: BaseIter<'a, B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, B>, +} + +impl<'a, R, Q, B> RangeKeys<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ + #[inline] + pub(super) fn new( + iter: BaseIter<'a, B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, B>, + ) -> Self { + Self { iter } + } +} + +impl<'a, R, Q, B> Iterator for RangeKeys<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: Iterator>, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ + type Item = Key<'a, B::Item<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(Key::new) + } +} + +impl<'a, R, Q, B> DoubleEndedIterator for RangeKeys<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + DoubleEndedIterator>, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Key::new) + } +} + +impl<'a, R, Q, B> FusedIterator for RangeKeys<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + FusedIterator>, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ +} + +/// An iterator over the values in a subset of the entries in the WAL. +pub struct RangeValues<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ + iter: BaseIter<'a, B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, B>, +} + +impl<'a, R, Q, B> RangeValues<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ + #[inline] + pub(super) fn new( + iter: BaseIter<'a, B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, B>, + ) -> Self { + Self { iter } + } +} + +impl<'a, R, Q, B> Iterator for RangeValues<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: Iterator>, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ + type Item = Value<'a, B::Item<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(Value::new) + } +} + +impl<'a, R, Q, B> DoubleEndedIterator for RangeValues<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + DoubleEndedIterator>, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Value::new) + } +} + +impl<'a, R, Q, B> FusedIterator for RangeValues<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: Memtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + FusedIterator>, + for<'b> B::Item<'b>: MemtableEntry<'b>, +{ +} diff --git a/src/wal/batch.rs b/src/wal/batch.rs deleted file mode 100644 index c06df3e..0000000 --- a/src/wal/batch.rs +++ /dev/null @@ -1,219 +0,0 @@ -use core::borrow::Borrow; - -use dbutils::{buffer::VacantBuffer, Comparator}; - -use super::entry::{ - Entry, EntryWithBuilders, EntryWithKeyBuilder, EntryWithValueBuilder, GenericEntry, -}; - -/// A batch of keys and values that can be inserted into the [`Wal`](super::Wal). -pub trait Batch { - /// The key type. - type Key: Borrow<[u8]>; - - /// The value type. - type Value: Borrow<[u8]>; - - /// The [`Comparator`] type. - type Comparator: Comparator; - - /// The iterator type. - type IterMut<'a>: Iterator> - where - Self: 'a; - - /// Returns an iterator over the keys and values. - fn iter_mut(&mut self) -> Self::IterMut<'_>; -} - -impl Batch for T -where - K: Borrow<[u8]>, - V: Borrow<[u8]>, - C: Comparator, - for<'a> &'a mut T: IntoIterator>, -{ - type Key = K; - type Value = V; - type Comparator = C; - - type IterMut<'a> - = <&'a mut T as IntoIterator>::IntoIter - where - Self: 'a; - - fn iter_mut(&mut self) -> Self::IterMut<'_> { - IntoIterator::into_iter(self) - } -} - -/// A batch of keys and values that can be inserted into the [`Wal`](super::Wal). -/// Comparing to [`Batch`], this trait is used to build -/// the key in place. -pub trait BatchWithKeyBuilder { - /// The key builder type. - type KeyBuilder: Fn(&mut VacantBuffer<'_>) -> Result<(), Self::Error>; - - /// The error for the key builder. - type Error; - - /// The value type. - type Value; - - /// The iterator type. - type IterMut<'a>: Iterator> - where - Self: 'a; - - /// Returns an iterator over the keys and values. - fn iter_mut(&mut self) -> Self::IterMut<'_>; -} - -impl BatchWithKeyBuilder

for T -where - KB: Fn(&mut VacantBuffer<'_>) -> Result<(), E>, - for<'a> &'a mut T: IntoIterator>, - P: 'static, -{ - type KeyBuilder = KB; - type Error = E; - type Value = V; - - type IterMut<'a> - = <&'a mut T as IntoIterator>::IntoIter - where - Self: 'a; - - fn iter_mut(&mut self) -> Self::IterMut<'_> { - IntoIterator::into_iter(self) - } -} - -/// A batch of keys and values that can be inserted into the [`Wal`](super::Wal). -/// Comparing to [`Batch`], this trait is used to build -/// the value in place. -pub trait BatchWithValueBuilder { - /// The value builder type. - type ValueBuilder: Fn(&mut VacantBuffer<'_>) -> Result<(), Self::Error>; - - /// The error for the value builder. - type Error; - - /// The key type. - type Key; - - /// The iterator type. - type IterMut<'a>: Iterator> - where - Self: 'a; - - /// Returns an iterator over the keys and values. - fn iter_mut(&mut self) -> Self::IterMut<'_>; -} - -impl BatchWithValueBuilder

for T -where - VB: Fn(&mut VacantBuffer<'_>) -> Result<(), E>, - for<'a> &'a mut T: IntoIterator>, - P: 'static, -{ - type Key = K; - type Error = E; - type ValueBuilder = VB; - - type IterMut<'a> - = <&'a mut T as IntoIterator>::IntoIter - where - Self: 'a; - - fn iter_mut(&mut self) -> Self::IterMut<'_> { - IntoIterator::into_iter(self) - } -} - -/// A batch of keys and values that can be inserted into the [`Wal`](super::Wal). -/// Comparing to [`Batch`], this trait is used to build -/// the key and value in place. -pub trait BatchWithBuilders { - /// The value builder type. - type ValueBuilder: Fn(&mut VacantBuffer<'_>) -> Result<(), Self::ValueError>; - - /// The error for the value builder. - type ValueError; - - /// The value builder type. - type KeyBuilder: Fn(&mut VacantBuffer<'_>) -> Result<(), Self::KeyError>; - - /// The error for the value builder. - type KeyError; - - /// The iterator type. - type IterMut<'a>: Iterator< - Item = &'a mut EntryWithBuilders, - > - where - Self: 'a; - - /// Returns an iterator over the keys and values. - fn iter_mut(&mut self) -> Self::IterMut<'_>; -} - -impl BatchWithBuilders

for T -where - VB: Fn(&mut VacantBuffer<'_>) -> Result<(), VE>, - KB: Fn(&mut VacantBuffer<'_>) -> Result<(), KE>, - for<'a> &'a mut T: IntoIterator>, - P: 'static, -{ - type KeyBuilder = KB; - type KeyError = KE; - type ValueBuilder = VB; - type ValueError = VE; - - type IterMut<'a> - = <&'a mut T as IntoIterator>::IntoIter - where - Self: 'a; - - fn iter_mut(&mut self) -> Self::IterMut<'_> { - IntoIterator::into_iter(self) - } -} - -/// The container for entries in the [`GenericBatch`]. -pub trait GenericBatch<'e> { - /// The key type. - type Key: 'e; - - /// The value type. - type Value: 'e; - - /// The mutable iterator type. - type IterMut<'a>: Iterator> - where - Self: 'e, - 'e: 'a; - - /// Returns an mutable iterator over the keys and values. - fn iter_mut(&'e mut self) -> Self::IterMut<'e>; -} - -impl<'e, K, V, T> GenericBatch<'e> for T -where - K: 'e, - V: 'e, - for<'a> &'a mut T: IntoIterator>, -{ - type Key = K; - type Value = V; - - type IterMut<'a> - = <&'a mut T as IntoIterator>::IntoIter - where - Self: 'e, - 'e: 'a; - - fn iter_mut(&'e mut self) -> Self::IterMut<'e> { - IntoIterator::into_iter(self) - } -} diff --git a/src/wal/iter.rs b/src/wal/iter.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/src/wal/iter.rs @@ -0,0 +1 @@ + diff --git a/src/wal/multiple_version.rs b/src/wal/multiple_version.rs new file mode 100644 index 0000000..111dd06 --- /dev/null +++ b/src/wal/multiple_version.rs @@ -0,0 +1,1134 @@ +use core::ops::{Bound, RangeBounds}; + +use among::Among; +use dbutils::{ + buffer::VacantBuffer, + checksum::BuildChecksumer, + equivalent::Comparable, + types::{KeyRef, MaybeStructured, Type}, +}; +#[cfg(all(feature = "memmap", not(target_family = "wasm")))] +use rarena_allocator::Allocator; +use ref_cast::RefCast; +use skl::{either::Either, KeySize}; + +use crate::{ + batch::Batch, + error::Error, + memtable::{BaseTable, MultipleVersionMemtable, VersionedMemtableEntry}, + sealed::{Constructable, MultipleVersionWalReader, Wal}, + types::{ + multiple_version::{Entry, MultipleVersionEntry}, + BufWriter, KeyBuilder, ValueBuilder, + }, + Options, +}; + +use super::{Query, QueryRange, Slice}; + +mod iter; +pub use iter::*; + +/// An abstract layer for the immutable write-ahead log. +pub trait Reader: Constructable { + /// Returns the reserved space in the WAL. + /// + /// ## Safety + /// - The writer must ensure that the returned slice is not modified. + /// - This method is not thread-safe, so be careful when using it. + #[inline] + unsafe fn reserved_slice(&self) -> &[u8] { + self.as_wal().reserved_slice() + } + + /// Returns the path of the WAL if it is backed by a file. + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + #[inline] + fn path(&self) -> Option<&<::Allocator as Allocator>::Path> { + self.as_wal().path() + } + + /// Returns the maximum key size allowed in the WAL. + #[inline] + fn maximum_key_size(&self) -> KeySize { + self.as_wal().maximum_key_size() + } + + /// Returns the maximum value size allowed in the WAL. + #[inline] + fn maximum_value_size(&self) -> u32 { + self.as_wal().maximum_value_size() + } + + /// Returns the maximum version in the WAL. + #[inline] + fn maximum_version(&self) -> u64 + where + Self::Memtable: MultipleVersionMemtable + 'static, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + Wal::memtable(self.as_wal()).maximum_version() + } + + /// Returns the minimum version in the WAL. + #[inline] + fn minimum_version(&self) -> u64 + where + Self::Memtable: MultipleVersionMemtable + 'static, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + Wal::memtable(self.as_wal()).minimum_version() + } + + /// Returns `true` if the WAL may contain an entry whose version is less or equal to the given version. + #[inline] + fn may_contain_version(&self, version: u64) -> bool + where + Self::Memtable: MultipleVersionMemtable + 'static, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + Wal::memtable(self.as_wal()).may_contain_version(version) + } + + /// Returns the remaining capacity of the WAL. + #[inline] + fn remaining(&self) -> u32 { + self.as_wal().remaining() + } + + /// Returns the capacity of the WAL. + #[inline] + fn capacity(&self) -> u32 { + self.as_wal().capacity() + } + + /// Returns the options used to create this WAL instance. + #[inline] + fn options(&self) -> &Options { + self.as_wal().options() + } + + /// Returns an iterator over the entries in the WAL. + #[inline] + fn iter( + &self, + version: u64, + ) -> Iter< + '_, + <>::Memtable as BaseTable>::Iterator<'_>, + Self::Memtable, + > + where + Self::Memtable: MultipleVersionMemtable + 'static, + ::Key: Type + Ord, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type, + { + Iter::new(BaseIter::new(version, self.as_wal().iter(version))) + } + + /// Returns an iterator over the entries (all versions) in the WAL. + #[inline] + fn iter_all_versions( + &self, + version: u64, + ) -> MultipleVersionIter< + '_, + <>::Memtable as MultipleVersionMemtable>::IterAll<'_>, + Self::Memtable, + > + where + Self::Memtable: MultipleVersionMemtable + 'static, + ::Key: Type + Ord, + for<'a> <::Key as Type>::Ref<'a>: + KeyRef<'a, ::Key>, + ::Value: Type, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + { + MultipleVersionIter::new(MultipleVersionBaseIter::new( + version, + self.as_wal().iter_all_versions(version), + )) + } + + /// Returns an iterator over a subset of entries in the WAL. + #[inline] + fn range<'a, Q, R>( + &'a self, + version: u64, + range: R, + ) -> Range<'a, R, Q, >::Memtable> + where + R: RangeBounds, + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + Range::new(BaseIter::new( + version, + self.as_wal().range(version, QueryRange::new(range)), + )) + } + + /// Returns an iterator over a subset of entries (all versions) in the WAL. + #[inline] + fn range_all_versions<'a, Q, R>( + &'a self, + version: u64, + range: R, + ) -> MultipleVersionRange<'a, R, Q, >::Memtable> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + MultipleVersionRange::new(MultipleVersionBaseIter::new( + version, + self + .as_wal() + .range_all_versions(version, QueryRange::new(range)), + )) + } + + /// Returns an iterator over the keys in the WAL. + #[inline] + fn keys( + &self, + version: u64, + ) -> Keys< + '_, + <>::Memtable as BaseTable>::Iterator<'_>, + Self::Memtable, + > + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + Keys::new(BaseIter::new(version, self.as_wal().iter(version))) + } + + /// Returns an iterator over a subset of keys in the WAL. + #[inline] + fn range_keys<'a, Q, R>( + &'a self, + version: u64, + range: R, + ) -> RangeKeys<'a, R, Q, >::Memtable> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + RangeKeys::new(BaseIter::new( + version, + self.as_wal().range(version, QueryRange::new(range)), + )) + } + + /// Returns an iterator over the values in the WAL. + #[inline] + fn values( + &self, + version: u64, + ) -> Values< + '_, + <>::Memtable as BaseTable>::Iterator<'_>, + Self::Memtable, + > + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + Values::new(BaseIter::new(version, self.as_wal().iter(version))) + } + + /// Returns an iterator over a subset of values in the WAL. + #[inline] + fn range_values<'a, Q, R>( + &'a self, + version: u64, + range: R, + ) -> RangeValues<'a, R, Q, >::Memtable> + where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + RangeValues::new(BaseIter::new( + version, + self.as_wal().range(version, QueryRange::new(range)), + )) + } + + /// Returns the first key-value pair in the map. The key in this pair is the minimum key in the wal. + #[inline] + fn first(&self, version: u64) -> Option::Item<'_>>> + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .first(version) + .map(|ent| Entry::with_version(ent, version)) + } + + /// Returns the first key-value pair in the map. The key in this pair is the minimum key in the wal. + /// + /// Compared to [`first`](Reader::first), this method returns a versioned item, which means that the returned item + /// may already be marked as removed. + #[inline] + fn first_versioned( + &self, + version: u64, + ) -> Option< + MultipleVersionEntry<'_, ::VersionedItem<'_>>, + > + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .first_versioned(version) + .map(|ent| MultipleVersionEntry::with_version(ent, version)) + } + + /// Returns the last key-value pair in the map. The key in this pair is the maximum key in the wal. + #[inline] + fn last(&self, version: u64) -> Option::Item<'_>>> + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + MultipleVersionWalReader::last(self.as_wal(), version) + .map(|ent| Entry::with_version(ent, version)) + } + + /// Returns the last key-value pair in the map. The key in this pair is the maximum key in the wal. + /// + /// Compared to [`last`](Reader::last), this method returns a versioned item, which means that the returned item + /// may already be marked as removed. + #[inline] + fn last_versioned( + &self, + version: u64, + ) -> Option< + MultipleVersionEntry<'_, ::VersionedItem<'_>>, + > + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .last_versioned(version) + .map(|ent| MultipleVersionEntry::with_version(ent, version)) + } + + /// Returns `true` if the key exists in the WAL. + #[inline] + fn contains_key<'a, Q>(&'a self, version: u64, key: &Q) -> bool + where + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .contains_key(version, Query::<_, Q>::ref_cast(key)) + } + + /// Returns `true` if the key exists in the WAL. + /// + /// Compared to [`contains_key`](Reader::contains_key), this method returns `true` even if the latest is marked as removed. + #[inline] + fn contains_key_versioned<'a, Q>(&'a self, version: u64, key: &Q) -> bool + where + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .contains_key_versioned(version, Query::<_, Q>::ref_cast(key)) + } + + /// Returns `true` if the key exists in the WAL. + /// + /// ## Safety + /// - The given `key` must be valid to construct to `K::Ref` without remaining. + #[inline] + unsafe fn contains_key_by_bytes(&self, version: u64, key: &[u8]) -> bool + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self.as_wal().contains_key(version, Slice::ref_cast(key)) + } + + /// Returns `true` if the key exists in the WAL. + /// + /// Compared to [`contains_key_by_bytes`](Reader::contains_key_by_bytes), this method returns `true` even if the latest is marked as removed. + /// + /// ## Safety + /// - The given `key` must be valid to construct to `K::Ref` without remaining. + #[inline] + unsafe fn contains_key_versioned_by_bytes(&self, version: u64, key: &[u8]) -> bool + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .contains_key_versioned(version, Slice::ref_cast(key)) + } + + /// Gets the value associated with the key. + #[inline] + fn get<'a, Q>( + &'a self, + version: u64, + key: &Q, + ) -> Option::Item<'a>>> + where + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .get(version, Query::<_, Q>::ref_cast(key)) + .map(|ent| Entry::with_version(ent, version)) + } + + /// Gets the value associated with the key. + /// + /// Compared to [`get`](Reader::get), this method returns a versioned item, which means that the returned item + /// may already be marked as removed. + #[inline] + fn get_versioned<'a, Q>( + &'a self, + version: u64, + key: &Q, + ) -> Option< + MultipleVersionEntry<'a, ::VersionedItem<'a>>, + > + where + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .get_versioned(version, Query::<_, Q>::ref_cast(key)) + .map(|ent| MultipleVersionEntry::with_version(ent, version)) + } + + /// Gets the value associated with the key. + /// + /// ## Safety + /// - The given `key` must be valid to construct to `K::Ref` without remaining. + #[inline] + unsafe fn get_by_bytes( + &self, + version: u64, + key: &[u8], + ) -> Option::Item<'_>>> + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .get(version, Slice::ref_cast(key)) + .map(|ent| Entry::with_version(ent, version)) + } + + /// Gets the value associated with the key. + /// + /// Compared to [`get_by_bytes`](Reader::get_by_bytes), this method returns a versioned item, which means that the returned item + /// may already be marked as removed. + /// + /// ## Safety + /// - The given `key` must be valid to construct to `K::Ref` without remaining. + #[inline] + unsafe fn get_versioned_by_bytes( + &self, + version: u64, + key: &[u8], + ) -> Option< + MultipleVersionEntry<'_, ::VersionedItem<'_>>, + > + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .get_versioned(version, Slice::ref_cast(key)) + .map(|ent| MultipleVersionEntry::with_version(ent, version)) + } + + /// Returns a value associated to the highest element whose key is below the given bound. + /// If no such element is found then `None` is returned. + #[inline] + fn upper_bound<'a, Q>( + &'a self, + version: u64, + bound: Bound<&Q>, + ) -> Option::Item<'a>>> + where + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .upper_bound(version, bound.map(Query::ref_cast)) + .map(|ent| Entry::with_version(ent, version)) + } + + /// Returns a value associated to the highest element whose key is below the given bound. + /// + /// Compared to [`upper_bound`](Reader::upper_bound), this method returns a versioned item, which means that the returned item + /// may already be marked as removed. + #[inline] + fn upper_bound_versioned<'a, Q>( + &'a self, + version: u64, + bound: Bound<&Q>, + ) -> Option< + MultipleVersionEntry<'a, ::VersionedItem<'a>>, + > + where + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .upper_bound_versioned(version, bound.map(Query::ref_cast)) + .map(|ent| MultipleVersionEntry::with_version(ent, version)) + } + + /// Returns a value associated to the highest element whose key is below the given bound. + /// If no such element is found then `None` is returned. + /// + /// ## Safety + /// - The given `key` in `Bound` must be valid to construct to `K::Ref` without remaining. + #[inline] + unsafe fn upper_bound_by_bytes( + &self, + version: u64, + bound: Bound<&[u8]>, + ) -> Option::Item<'_>>> + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .upper_bound(version, bound.map(Slice::ref_cast)) + .map(|ent| Entry::with_version(ent, version)) + } + + /// Returns a value associated to the highest element whose key is below the given bound. + /// If no such element is found then `None` is returned. + /// + /// Compared to [`upper_bound_by_bytes`](Reader::upper_bound_by_bytes), this method returns a versioned item, which means that the returned item + /// + /// ## Safety + /// - The given `key` in `Bound` must be valid to construct to `K::Ref` without remaining. + #[inline] + unsafe fn upper_bound_versioned_by_bytes( + &self, + version: u64, + bound: Bound<&[u8]>, + ) -> Option< + MultipleVersionEntry<'_, ::VersionedItem<'_>>, + > + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .upper_bound_versioned(version, bound.map(Slice::ref_cast)) + .map(|ent| MultipleVersionEntry::with_version(ent, version)) + } + + /// Returns a value associated to the lowest element whose key is above the given bound. + /// If no such element is found then `None` is returned. + #[inline] + fn lower_bound<'a, Q>( + &'a self, + version: u64, + bound: Bound<&Q>, + ) -> Option::Item<'a>>> + where + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .lower_bound(version, bound.map(Query::ref_cast)) + .map(|ent| Entry::with_version(ent, version)) + } + + /// Returns a value associated to the lowest element whose key is above the given bound. + /// If no such element is found then `None` is returned. + /// + /// Compared to [`lower_bound`](Reader::lower_bound), this method returns a versioned item, which means that the returned item + /// may already be marked as removed. + #[inline] + fn lower_bound_versioned<'a, Q>( + &'a self, + version: u64, + bound: Bound<&Q>, + ) -> Option< + MultipleVersionEntry<'a, ::VersionedItem<'a>>, + > + where + Q: ?Sized + Comparable<<::Key as Type>::Ref<'a>>, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .lower_bound_versioned(version, bound.map(Query::ref_cast)) + .map(|ent| MultipleVersionEntry::with_version(ent, version)) + } + + /// Returns a value associated to the lowest element whose key is above the given bound. + /// If no such element is found then `None` is returned. + /// + /// ## Safety + /// - The given `key` in `Bound` must be valid to construct to `K::Ref` without remaining. + #[inline] + unsafe fn lower_bound_by_bytes( + &self, + version: u64, + bound: Bound<&[u8]>, + ) -> Option::Item<'_>>> + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .lower_bound(version, bound.map(Slice::ref_cast)) + .map(|ent| Entry::with_version(ent, version)) + } + + /// Returns a value associated to the lowest element whose key is above the given bound. + /// If no such element is found then `None` is returned. + /// + /// Compared to [`lower_bound_by_bytes`](Reader::lower_bound_by_bytes), this method returns a versioned item, which means that the returned item + /// may already be marked as removed. + /// + /// ## Safety + /// - The given `key` in `Bound` must be valid to construct to `K::Ref` without remaining. + #[inline] + unsafe fn lower_bound_versioned_by_bytes( + &self, + version: u64, + bound: Bound<&[u8]>, + ) -> Option< + MultipleVersionEntry<'_, ::VersionedItem<'_>>, + > + where + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord, + ::Value: Type, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .lower_bound_versioned( + version, + bound.map(Slice::<::Key>::ref_cast), + ) + .map(|ent| MultipleVersionEntry::with_version(ent, version)) + } +} + +impl Reader for T +where + T: Constructable, + T::Memtable: MultipleVersionMemtable, + for<'a> ::Item<'a>: VersionedMemtableEntry<'a>, + for<'a> ::VersionedItem<'a>: VersionedMemtableEntry<'a>, +{ +} + +/// An abstract layer for the write-ahead log. +pub trait Writer: Reader +where + Self::Reader: Reader, +{ + /// Returns `true` if this WAL instance is read-only. + #[inline] + fn read_only(&self) -> bool { + self.as_wal().read_only() + } + + /// Returns the mutable reference to the reserved slice. + /// + /// ## Safety + /// - The caller must ensure that the there is no others accessing reserved slice for either read or write. + /// - This method is not thread-safe, so be careful when using it. + #[inline] + unsafe fn reserved_slice_mut<'a>(&'a mut self) -> &'a mut [u8] + where + Self::Allocator: 'a, + { + self.as_wal().reserved_slice_mut() + } + + /// Flushes the to disk. + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + #[inline] + fn flush(&self) -> Result<(), Error> { + self.as_wal().flush() + } + + /// Flushes the to disk. + #[cfg(all(feature = "memmap", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))] + #[inline] + fn flush_async(&self) -> Result<(), Error> { + self.as_wal().flush_async() + } + + /// Returns the read-only view for the WAL. + fn reader(&self) -> Self::Reader; + + /// Inserts a key-value pair into the WAL. This method + /// allows the caller to build the key in place. + /// + /// See also [`insert_with_value_builder`](Writer::insert_with_value_builder) and [`insert_with_builders`](Writer::insert_with_builders). + #[inline] + fn insert_with_key_builder<'a, E>( + &'a mut self, + version: u64, + kb: KeyBuilder) -> Result>, + value: impl Into::Value>>, + ) -> Result< + (), + Among::Value as Type>::Error, Error>, + > + where + Self::Checksumer: BuildChecksumer, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord + 'static, + ::Value: Type + 'static, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self.as_wal().insert(Some(version), kb, value.into()) + } + + /// Inserts a key-value pair into the WAL. This method + /// allows the caller to build the value in place. + /// + /// See also [`insert_with_key_builder`](Writer::insert_with_key_builder) and [`insert_with_builders`](Writer::insert_with_builders). + #[inline] + fn insert_with_value_builder<'a, E>( + &'a mut self, + version: u64, + key: impl Into::Key>>, + vb: ValueBuilder) -> Result>, + ) -> Result< + (), + Among<<::Key as Type>::Error, E, Error>, + > + where + Self::Checksumer: BuildChecksumer, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord + 'static, + ::Value: Type + 'static, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self.as_wal().insert(Some(version), key.into(), vb) + } + + /// Inserts a key-value pair into the WAL. This method + /// allows the caller to build the key and value in place. + #[inline] + fn insert_with_builders( + &mut self, + version: u64, + kb: KeyBuilder) -> Result>, + vb: ValueBuilder) -> Result>, + ) -> Result<(), Among>> + where + Self::Checksumer: BuildChecksumer, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord + 'static, + ::Value: Type + 'static, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self.as_wal().insert(Some(version), kb, vb) + } + + /// Inserts a key-value pair into the WAL. + #[inline] + fn insert<'a>( + &'a mut self, + version: u64, + key: impl Into::Key>>, + value: impl Into::Value>>, + ) -> Result< + (), + Among< + <::Key as Type>::Error, + <::Value as Type>::Error, + Error, + >, + > + where + Self::Checksumer: BuildChecksumer, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord + 'static, + ::Value: Type + 'static, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self + .as_wal() + .insert(Some(version), key.into(), value.into()) + } + + /// Removes a key-value pair from the WAL. This method + /// allows the caller to build the key in place. + #[inline] + fn remove_with_builder( + &mut self, + version: u64, + kb: KeyBuilder) -> Result>, + ) -> Result<(), Either>> + where + Self::Checksumer: BuildChecksumer, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord + 'static, + ::Value: Type + 'static, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self.as_wal().remove(Some(version), kb) + } + + /// Removes a key-value pair from the WAL. + #[inline] + fn remove<'a>( + &'a mut self, + version: u64, + key: impl Into::Key>>, + ) -> Result<(), Either<<::Key as Type>::Error, Error>> + where + Self::Checksumer: BuildChecksumer, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord + 'static, + ::Value: Type + 'static, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self.as_wal().remove(Some(version), key.into()) + } + + /// Inserts a batch of key-value pairs into the WAL. + #[inline] + fn insert_batch<'a, B>( + &'a mut self, + batch: &mut B, + ) -> Result< + (), + Among< + <::Key as Type>::Error, + <::Value as Type>::Error, + Error, + >, + > + where + B: Batch< + Self::Memtable, + Key = MaybeStructured<'a, ::Key>, + Value = MaybeStructured<'a, ::Value>, + >, + Self::Checksumer: BuildChecksumer, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord + 'static, + ::Value: Type + 'static, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self.as_wal().insert_batch::(batch) + } + + /// Inserts a batch of key-value pairs into the WAL. + #[inline] + fn insert_batch_with_key_builder<'a, B>( + &'a mut self, + batch: &mut B, + ) -> Result< + (), + Among< + ::Error, + <::Value as Type>::Error, + Error, + >, + > + where + B: Batch::Value>>, + B::Key: BufWriter, + Self::Checksumer: BuildChecksumer, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord + 'static, + ::Value: Type + 'static, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self.as_wal().insert_batch::(batch) + } + + /// Inserts a batch of key-value pairs into the WAL. + #[inline] + fn insert_batch_with_value_builder<'a, B>( + &'a mut self, + batch: &mut B, + ) -> Result< + (), + Among< + <::Key as Type>::Error, + ::Error, + Error, + >, + > + where + B: Batch::Key>>, + B::Value: BufWriter, + Self::Checksumer: BuildChecksumer, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord + 'static, + ::Value: Type + 'static, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self.as_wal().insert_batch::(batch) + } + + /// Inserts a batch of key-value pairs into the WAL. + #[inline] + fn insert_batch_with_builders( + &mut self, + batch: &mut B, + ) -> Result<(), Among>> + where + B: Batch, + KB: BufWriter, + VB: BufWriter, + Self::Checksumer: BuildChecksumer, + Self::Memtable: MultipleVersionMemtable, + ::Key: Type + Ord + 'static, + ::Value: Type + 'static, + for<'b> <::Key as Type>::Ref<'b>: + KeyRef<'b, ::Key>, + for<'b> ::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> ::VersionedItem<'b>: + VersionedMemtableEntry<'b>, + { + self.as_wal().insert_batch::(batch) + } +} diff --git a/src/wal/multiple_version/iter.rs b/src/wal/multiple_version/iter.rs new file mode 100644 index 0000000..82262c6 --- /dev/null +++ b/src/wal/multiple_version/iter.rs @@ -0,0 +1,881 @@ +use core::{iter::FusedIterator, marker::PhantomData, ops::RangeBounds}; + +use dbutils::{equivalent::Comparable, types::Type}; + +use crate::{ + memtable::{BaseEntry, MultipleVersionMemtable, VersionedMemtableEntry}, + types::multiple_version::{Entry, Key, MultipleVersionEntry, Value}, + wal::{KeyPointer, ValuePointer}, +}; + +use super::{Query, QueryRange}; + +/// Iterator over the entries in the WAL. +pub struct BaseIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + iter: I, + version: u64, + head: Option<(KeyPointer, ValuePointer)>, + tail: Option<(KeyPointer, ValuePointer)>, + _m: PhantomData<&'a ()>, +} + +impl<'a, I, M> BaseIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + pub(super) fn new(version: u64, iter: I) -> Self { + Self { + version, + iter, + head: None, + tail: None, + _m: PhantomData, + } + } + + /// Returns the query version of the iterator. + #[inline] + pub(super) const fn version(&self) -> u64 { + self.version + } +} + +impl<'a, I, M> Iterator for BaseIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: Iterator>, +{ + type Item = M::Item<'a>; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().inspect(|ent| { + self.head = Some((ent.key(), ent.value().unwrap())); + }) + } +} + +impl<'a, I, M> DoubleEndedIterator for BaseIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: DoubleEndedIterator>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.next_back().inspect(|ent| { + self.tail = Some((ent.key(), ent.value().unwrap())); + }) + } +} + +impl<'a, I, M> FusedIterator for BaseIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: FusedIterator>, +{ +} + +/// Iterator over the entries in the WAL. +pub struct MultipleVersionBaseIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + iter: I, + version: u64, + head: Option<(KeyPointer, Option>)>, + tail: Option<(KeyPointer, Option>)>, + _m: PhantomData<&'a ()>, +} + +impl<'a, I, M> MultipleVersionBaseIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + pub(super) fn new(version: u64, iter: I) -> Self { + Self { + version, + iter, + head: None, + tail: None, + _m: PhantomData, + } + } + + /// Returns the query version of the iterator. + #[inline] + pub(super) const fn version(&self) -> u64 { + self.version + } +} + +impl<'a, I, M> Iterator for MultipleVersionBaseIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: Iterator>, +{ + type Item = M::VersionedItem<'a>; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().inspect(|ent| { + self.head = Some((ent.key(), ent.value())); + }) + } +} + +impl<'a, I, M> DoubleEndedIterator for MultipleVersionBaseIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: DoubleEndedIterator>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.next_back().inspect(|ent| { + self.tail = Some((ent.key(), ent.value())); + }) + } +} + +impl<'a, I, M> FusedIterator for MultipleVersionBaseIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: FusedIterator>, +{ +} + +/// Iterator over the entries in the WAL. +pub struct Iter<'a, I, M> +where + M: MultipleVersionMemtable, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + iter: BaseIter<'a, I, M>, + version: u64, +} + +impl<'a, I, M> Iter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + pub(super) fn new(iter: BaseIter<'a, I, M>) -> Self { + Self { + version: iter.version(), + iter, + } + } + + /// Returns the query version of the entries in the iterator. + #[inline] + pub const fn version(&self) -> u64 { + self.version + } +} + +impl<'a, I, M> Iterator for Iter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + M::Key: Type + Ord, + M::Value: Type, + I: Iterator>, +{ + type Item = Entry<'a, M::Item<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self + .iter + .next() + .map(|ent| Entry::with_version(ent, self.version)) + } +} + +impl<'a, I, M> DoubleEndedIterator for Iter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + M::Key: Type + Ord, + M::Value: Type, + I: DoubleEndedIterator>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self + .iter + .next_back() + .map(|ent| Entry::with_version(ent, self.version)) + } +} + +impl<'a, I, M> FusedIterator for Iter<'a, I, M> +where + M::Key: Type + Ord, + M::Value: Type, + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: FusedIterator>, +{ +} + +/// Iterator over the keys in the WAL. +pub struct Keys<'a, I, M> +where + M: MultipleVersionMemtable, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + iter: BaseIter<'a, I, M>, + version: u64, +} + +impl<'a, I, M> Keys<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + pub(super) fn new(iter: BaseIter<'a, I, M>) -> Self { + Self { + version: iter.version(), + iter, + } + } + + /// Returns the query version of the keys in the iterator. + #[inline] + pub const fn version(&self) -> u64 { + self.version + } +} + +impl<'a, I, M> Iterator for Keys<'a, I, M> +where + M::Key: Type, + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: Iterator>, +{ + type Item = Key<'a, M::Item<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self + .iter + .next() + .map(|ent| Key::with_version(ent, self.version)) + } +} + +impl<'a, I, M> DoubleEndedIterator for Keys<'a, I, M> +where + M::Key: Type, + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: DoubleEndedIterator>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self + .iter + .next_back() + .map(|ent| Key::with_version(ent, self.version)) + } +} + +impl<'a, I, M> FusedIterator for Keys<'a, I, M> +where + M::Key: Type, + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: FusedIterator>, +{ +} + +/// Iterator over the values in the WAL. +pub struct Values<'a, I, M> +where + M: MultipleVersionMemtable, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + iter: BaseIter<'a, I, M>, + version: u64, +} + +impl<'a, I, M> Values<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + pub(super) fn new(iter: BaseIter<'a, I, M>) -> Self { + Self { + version: iter.version(), + iter, + } + } + + /// Returns the query version of the values in the iterator. + #[inline] + pub const fn version(&self) -> u64 { + self.version + } +} + +impl<'a, I, M> Iterator for Values<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + M::Value: Type, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: Iterator>, +{ + type Item = Value<'a, M::Item<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self + .iter + .next() + .map(|ent| Value::with_version(ent, self.version)) + } +} + +impl<'a, I, M> DoubleEndedIterator for Values<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + M::Value: Type, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: DoubleEndedIterator>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self + .iter + .next_back() + .map(|ent| Value::with_version(ent, self.version)) + } +} + +impl<'a, I, M> FusedIterator for Values<'a, I, M> +where + M::Value: Type, + M: MultipleVersionMemtable + 'a, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: FusedIterator>, +{ +} + +/// An iterator over a subset of the entries in the WAL. +pub struct Range<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable, + B::Key: Type + Ord, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + iter: BaseIter<'a, B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, B>, + version: u64, +} + +impl<'a, R, Q, B> Range<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + pub(super) fn new( + iter: BaseIter<'a, B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, B>, + ) -> Self { + Self { + version: iter.version(), + iter, + } + } + + /// Returns the query version of the entries in the iterator. + #[inline] + pub const fn version(&self) -> u64 { + self.version + } +} + +impl<'a, R, Q, B> Iterator for Range<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: Iterator>, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + type Item = Entry<'a, B::Item<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self + .iter + .next() + .map(|ent| Entry::with_version(ent, self.version)) + } +} + +impl<'a, R, Q, B> DoubleEndedIterator for Range<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + DoubleEndedIterator>, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self + .iter + .next_back() + .map(|ent| Entry::with_version(ent, self.version)) + } +} + +impl<'a, R, Q, B> FusedIterator for Range<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + FusedIterator>, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ +} + +/// An iterator over the keys in a subset of the entries in the WAL. +pub struct RangeKeys<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable, + B::Key: Type + Ord, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + iter: BaseIter<'a, B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, B>, + version: u64, +} + +impl<'a, R, Q, B> RangeKeys<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + pub(super) fn new( + iter: BaseIter<'a, B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, B>, + ) -> Self { + Self { + version: iter.version(), + iter, + } + } + + /// Returns the query version of the keys in the iterator. + #[inline] + pub const fn version(&self) -> u64 { + self.version + } +} + +impl<'a, R, Q, B> Iterator for RangeKeys<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: Iterator>, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + type Item = Key<'a, B::Item<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self + .iter + .next() + .map(|ent| Key::with_version(ent, self.version)) + } +} + +impl<'a, R, Q, B> DoubleEndedIterator for RangeKeys<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + DoubleEndedIterator>, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self + .iter + .next_back() + .map(|ent| Key::with_version(ent, self.version)) + } +} + +impl<'a, R, Q, B> FusedIterator for RangeKeys<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + FusedIterator>, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ +} + +/// An iterator over the values in a subset of the entries in the WAL. +pub struct RangeValues<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable, + B::Key: Type + Ord, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + iter: BaseIter<'a, B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, B>, + version: u64, +} + +impl<'a, R, Q, B> RangeValues<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + pub(super) fn new( + iter: BaseIter<'a, B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, B>, + ) -> Self { + Self { + version: iter.version(), + iter, + } + } + + /// Returns the query version of the iterator. + #[inline] + pub const fn version(&self) -> u64 { + self.version + } +} + +impl<'a, R, Q, B> Iterator for RangeValues<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: Iterator>, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + type Item = Value<'a, B::Item<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self + .iter + .next() + .map(|ent| Value::with_version(ent, self.version)) + } +} + +impl<'a, R, Q, B> DoubleEndedIterator for RangeValues<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + DoubleEndedIterator>, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self + .iter + .next_back() + .map(|ent| Value::with_version(ent, self.version)) + } +} + +impl<'a, R, Q, B> FusedIterator for RangeValues<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::Range<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + FusedIterator>, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ +} + +/// Iterator over the entries in the WAL. +pub struct MultipleVersionIter<'a, I, M> +where + M: MultipleVersionMemtable, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + iter: MultipleVersionBaseIter<'a, I, M>, + version: u64, +} + +impl<'a, I, M> MultipleVersionIter<'a, I, M> +where + M: MultipleVersionMemtable, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + pub(super) fn new(iter: MultipleVersionBaseIter<'a, I, M>) -> Self { + Self { + version: iter.version(), + iter, + } + } + + /// Returns the query version of the entries in the iterator. + #[inline] + pub const fn version(&self) -> u64 { + self.version + } +} + +impl<'a, I, M> Iterator for MultipleVersionIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + M::Key: Type + Ord, + M::Value: Type, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: Iterator>, +{ + type Item = MultipleVersionEntry<'a, M::VersionedItem<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self + .iter + .next() + .map(|ent| MultipleVersionEntry::with_version(ent, self.version)) + } +} + +impl<'a, I, M> DoubleEndedIterator for MultipleVersionIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + M::Key: Type + Ord, + M::Value: Type, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: DoubleEndedIterator>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self + .iter + .next_back() + .map(|ent| MultipleVersionEntry::with_version(ent, self.version)) + } +} + +impl<'a, I, M> FusedIterator for MultipleVersionIter<'a, I, M> +where + M: MultipleVersionMemtable + 'a, + M::Key: Type + Ord, + M::Value: Type, + for<'b> M::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> M::VersionedItem<'b>: VersionedMemtableEntry<'b>, + I: FusedIterator>, +{ +} + +/// An iterator over a subset of the entries in the WAL. +pub struct MultipleVersionRange<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable, + B::Key: Type + Ord, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + iter: MultipleVersionBaseIter< + 'a, + B::RangeAll<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, + B, + >, + version: u64, +} + +impl<'a, R, Q, B> MultipleVersionRange<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + pub(super) fn new( + iter: MultipleVersionBaseIter< + 'a, + B::RangeAll<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>, + B, + >, + ) -> Self { + Self { + version: iter.version(), + iter, + } + } + + /// Returns the query version of the entries in the iterator. + #[inline] + pub const fn version(&self) -> u64 { + self.version + } +} + +impl<'a, R, Q, B> Iterator for MultipleVersionRange<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::RangeAll<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + Iterator>, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + type Item = MultipleVersionEntry<'a, B::VersionedItem<'a>>; + + #[inline] + fn next(&mut self) -> Option { + self + .iter + .next() + .map(|ent| MultipleVersionEntry::with_version(ent, self.version)) + } +} + +impl<'a, R, Q, B> DoubleEndedIterator for MultipleVersionRange<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::RangeAll<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + DoubleEndedIterator>, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ + #[inline] + fn next_back(&mut self) -> Option { + self + .iter + .next_back() + .map(|ent| MultipleVersionEntry::with_version(ent, self.version)) + } +} + +impl<'a, R, Q, B> FusedIterator for MultipleVersionRange<'a, R, Q, B> +where + R: RangeBounds + 'a, + Q: ?Sized + Comparable<::Ref<'a>>, + B: MultipleVersionMemtable + 'a, + B::Key: Type + Ord, + B::Value: Type, + B::RangeAll<'a, Query<'a, B::Key, Q>, QueryRange<'a, B::Key, Q, R>>: + FusedIterator>, + for<'b> B::Item<'b>: VersionedMemtableEntry<'b>, + for<'b> B::VersionedItem<'b>: VersionedMemtableEntry<'b>, +{ +} diff --git a/src/wal/pointer.rs b/src/wal/pointer.rs new file mode 100644 index 0000000..2b79257 --- /dev/null +++ b/src/wal/pointer.rs @@ -0,0 +1,249 @@ +use core::{cmp, marker::PhantomData, mem, slice}; + +use dbutils::{ + buffer::VacantBuffer, + equivalent::Comparable, + types::{KeyRef, Type, TypeRef}, +}; + +use crate::types::EntryFlags; + +const PTR_SIZE: usize = mem::size_of::(); +const U32_SIZE: usize = mem::size_of::(); + +pub struct ValuePointer { + ptr: *const u8, + len: usize, + _m: PhantomData, +} + +unsafe impl Send for ValuePointer {} +unsafe impl Sync for ValuePointer {} + +impl core::fmt::Debug for ValuePointer { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("ValuePointer") + .field("ptr", &self.ptr) + .field("value", &self.as_slice()) + .finish() + } +} + +impl Clone for ValuePointer { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +impl Copy for ValuePointer {} + +impl ValuePointer { + #[inline] + pub(crate) fn new(len: usize, ptr: *const u8) -> Self { + Self { + ptr, + len, + _m: PhantomData, + } + } + + #[inline] + pub(crate) fn as_slice<'a>(&self) -> &'a [u8] { + if self.len == 0 { + return &[]; + } + + // SAFETY: `ptr` is a valid pointer to `len` bytes. + unsafe { slice::from_raw_parts(self.ptr, self.len) } + } +} + +impl Type for ValuePointer +where + V: ?Sized, +{ + type Ref<'a> = Self; + + type Error = (); + + #[inline] + fn encoded_len(&self) -> usize { + const SIZE: usize = PTR_SIZE + U32_SIZE; + SIZE + } + + #[inline] + fn encode_to_buffer(&self, buf: &mut VacantBuffer<'_>) -> Result { + // Safe to cast to u32 here, because the key and value length are guaranteed to be less than or equal to u32::MAX. + let val_len = self.len as u32; + let ptr = self.ptr as usize; + + buf.set_len(self.encoded_len()); + buf[0..PTR_SIZE].copy_from_slice(&ptr.to_le_bytes()); + + buf[PTR_SIZE..PTR_SIZE + U32_SIZE].copy_from_slice(&val_len.to_le_bytes()); + + Ok(PTR_SIZE + U32_SIZE) + } +} + +impl<'a, V: ?Sized> TypeRef<'a> for ValuePointer { + unsafe fn from_slice(src: &'a [u8]) -> Self { + let ptr = usize_to_addr(usize::from_le_bytes((&src[..PTR_SIZE]).try_into().unwrap())); + let len = + u32::from_le_bytes((&src[PTR_SIZE..PTR_SIZE + U32_SIZE]).try_into().unwrap()) as usize; + + Self::new(len, ptr) + } +} + +#[doc(hidden)] +pub struct KeyPointer { + flag: EntryFlags, + ptr: *const u8, + len: usize, + _m: PhantomData, +} + +unsafe impl Send for KeyPointer {} +unsafe impl Sync for KeyPointer {} + +impl core::fmt::Debug for KeyPointer { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("KeyPointer") + .field("ptr", &self.ptr) + .field("flag", &self.flag) + .field("key", &self.as_slice()) + .finish() + } +} + +impl Clone for KeyPointer { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +impl Copy for KeyPointer {} + +impl KeyPointer { + #[inline] + pub(crate) fn new(flag: EntryFlags, len: usize, ptr: *const u8) -> Self { + Self { + ptr, + flag, + len, + _m: PhantomData, + } + } + + #[inline] + pub(crate) fn as_slice<'a>(&self) -> &'a [u8] { + if self.len == 0 { + return &[]; + } + + // SAFETY: `ptr` is a valid pointer to `len` bytes. + unsafe { slice::from_raw_parts(self.ptr, self.len) } + } +} + +impl PartialEq for KeyPointer { + fn eq(&self, other: &Self) -> bool { + self.as_slice() == other.as_slice() + } +} + +impl Eq for KeyPointer {} + +impl<'a, K> PartialOrd for KeyPointer +where + K: Type + Ord + ?Sized, + K::Ref<'a>: KeyRef<'a, K>, +{ + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl<'a, K> Ord for KeyPointer +where + K: Type + Ord + ?Sized, + K::Ref<'a>: KeyRef<'a, K>, +{ + fn cmp(&self, other: &Self) -> cmp::Ordering { + // SAFETY: WALs guarantee that the self and other must be the same as the result returned by `::encode`. + unsafe { as KeyRef>::compare_binary(self.as_slice(), other.as_slice()) } + } +} + +impl Type for KeyPointer +where + K: ?Sized, +{ + type Ref<'a> = Self; + + type Error = (); + + #[inline] + fn encoded_len(&self) -> usize { + const SIZE: usize = PTR_SIZE + U32_SIZE + mem::size_of::(); + SIZE + } + + #[inline] + fn encode_to_buffer(&self, buf: &mut VacantBuffer<'_>) -> Result { + // Safe to cast to u32 here, because the key and value length are guaranteed to be less than or equal to u32::MAX. + let key_len = self.len as u32; + let ptr = self.ptr as usize; + + buf.set_len(self.encoded_len()); + buf[0..PTR_SIZE].copy_from_slice(&ptr.to_le_bytes()); + + let mut offset = PTR_SIZE; + buf[offset] = self.flag.bits(); + offset += 1; + buf[offset..offset + U32_SIZE].copy_from_slice(&key_len.to_le_bytes()); + + Ok(offset + U32_SIZE) + } +} + +impl<'a, K: ?Sized> TypeRef<'a> for KeyPointer { + unsafe fn from_slice(src: &'a [u8]) -> Self { + let ptr = usize_to_addr(usize::from_le_bytes((&src[..PTR_SIZE]).try_into().unwrap())); + let mut offset = PTR_SIZE; + let flag = EntryFlags::from_bits_retain(src[offset]); + offset += 1; + let key_len = + u32::from_le_bytes((&src[offset..offset + U32_SIZE]).try_into().unwrap()) as usize; + + Self::new(flag, key_len, ptr) + } +} + +impl<'a, K> KeyRef<'a, Self> for KeyPointer +where + K: Type + Ord + ?Sized, + K::Ref<'a>: KeyRef<'a, K>, +{ + #[inline] + fn compare(&self, a: &Q) -> cmp::Ordering + where + Q: ?Sized + Ord + Comparable, + { + Comparable::compare(a, self).reverse() + } + + #[inline] + unsafe fn compare_binary(a: &[u8], b: &[u8]) -> cmp::Ordering { + as KeyRef>::compare_binary(a, b) + } +} + +#[inline] +const fn usize_to_addr(addr: usize) -> *const T { + addr as *const T +} diff --git a/src/wal/query.rs b/src/wal/query.rs new file mode 100644 index 0000000..51a68d8 --- /dev/null +++ b/src/wal/query.rs @@ -0,0 +1,107 @@ +use core::{ + cmp, + marker::PhantomData, + ops::{Bound, RangeBounds}, +}; + +use dbutils::{ + equivalent::{Comparable, Equivalent}, + types::{KeyRef, Type, TypeRef}, +}; +use ref_cast::RefCast; + +use super::KeyPointer; + +#[derive(ref_cast::RefCast)] +#[repr(transparent)] +pub struct Slice<'a, K: ?Sized> { + _k: PhantomData<&'a K>, + data: [u8], +} + +impl<'a, K> Equivalent> for Slice<'a, K> +where + K: Type + ?Sized, + K::Ref<'a>: KeyRef<'a, K>, +{ + fn equivalent(&self, key: &KeyPointer) -> bool { + self.data.eq(key.as_slice()) + } +} + +impl<'a, K> Comparable> for Slice<'a, K> +where + K: Type + ?Sized, + K::Ref<'a>: KeyRef<'a, K>, +{ + fn compare(&self, p: &KeyPointer) -> cmp::Ordering { + unsafe { as KeyRef>::compare_binary(&self.data, p.as_slice()) } + } +} + +pub struct QueryRange<'a, K: ?Sized, Q: ?Sized, R> +where + R: RangeBounds, +{ + r: R, + _q: PhantomData<(&'a Q, &'a K)>, +} + +impl QueryRange<'_, K, Q, R> +where + R: RangeBounds, +{ + #[inline] + pub(super) const fn new(r: R) -> Self { + Self { r, _q: PhantomData } + } +} + +impl<'a, K: ?Sized, Q: ?Sized, R> RangeBounds> for QueryRange<'a, K, Q, R> +where + R: RangeBounds, +{ + #[inline] + fn start_bound(&self) -> Bound<&Query<'a, K, Q>> { + self.r.start_bound().map(RefCast::ref_cast) + } + + fn end_bound(&self) -> Bound<&Query<'a, K, Q>> { + self.r.end_bound().map(RefCast::ref_cast) + } +} + +#[derive(ref_cast::RefCast)] +#[repr(transparent)] +pub struct Query<'a, K, Q> +where + K: ?Sized, + Q: ?Sized, +{ + _k: PhantomData<&'a K>, + key: Q, +} + +impl<'a, K, Q> Equivalent> for Query<'a, K, Q> +where + K: Type + ?Sized, + Q: ?Sized + Equivalent>, +{ + #[inline] + fn equivalent(&self, p: &KeyPointer) -> bool { + let kr = unsafe { as TypeRef<'_>>::from_slice(p.as_slice()) }; + Equivalent::equivalent(&self.key, &kr) + } +} + +impl<'a, K, Q> Comparable> for Query<'a, K, Q> +where + K: Type + ?Sized, + Q: ?Sized + Comparable>, +{ + #[inline] + fn compare(&self, p: &KeyPointer) -> cmp::Ordering { + let kr = unsafe { as TypeRef<'_>>::from_slice(p.as_slice()) }; + Comparable::compare(&self.key, &kr) + } +} diff --git a/src/wal/sealed.rs b/src/wal/sealed.rs deleted file mode 100644 index 3b29e2c..0000000 --- a/src/wal/sealed.rs +++ /dev/null @@ -1,601 +0,0 @@ -use core::ptr::NonNull; - -use checksum::{BuildChecksumer, Checksumer}; -use rarena_allocator::{ArenaPosition, BytesRefMut}; - -use super::*; - -pub trait Pointer: Sized { - type Comparator; - - fn new(klen: usize, vlen: usize, ptr: *const u8, cmp: Self::Comparator) -> Self; -} - -pub trait Base: Default { - type Pointer: Pointer; - - fn insert(&mut self, ele: Self::Pointer) - where - Self::Pointer: Ord + 'static; -} - -impl

Base for SkipSet

-where - P: Pointer + Send, -{ - type Pointer = P; - - fn insert(&mut self, ele: Self::Pointer) - where - P: Ord + 'static, - { - SkipSet::insert(self, ele); - } -} - -pub trait WalCore { - type Allocator: Allocator; - type Base: Base; - type Pointer: Pointer; - - fn construct(arena: Self::Allocator, base: Self::Base, opts: Options, cmp: C, cks: S) -> Self; -} - -macro_rules! preprocess_batch { - ($this:ident($batch:ident)) => {{ - $batch - .iter_mut() - .try_fold((0u32, 0u64), |(num_entries, size), ent| { - let klen = ent.key_len(); - let vlen = ent.value_len(); - $this.check_batch_entry(klen, vlen).map(|_| { - let merged_len = merge_lengths(klen as u32, vlen as u32); - let merged_len_size = encoded_u64_varint_len(merged_len); - let ent_size = klen as u64 + vlen as u64 + merged_len_size as u64; - ent.meta = BatchEncodedEntryMeta::new(klen, vlen, merged_len, merged_len_size); - (num_entries + 1, size + ent_size) - }) - }) - .and_then(|(num_entries, batch_encoded_size)| { - // safe to cast batch_encoded_size to u32 here, we already checked it's less than capacity (less than u32::MAX). - let batch_meta = merge_lengths(num_entries, batch_encoded_size as u32); - let batch_meta_size = encoded_u64_varint_len(batch_meta); - let allocator = $this.allocator(); - let remaining = allocator.remaining() as u64; - let total_size = - STATUS_SIZE as u64 + batch_meta_size as u64 + batch_encoded_size + CHECKSUM_SIZE as u64; - if total_size > remaining { - return Err(Error::insufficient_space(total_size, remaining as u32)); - } - - let mut buf = allocator - .alloc_bytes(total_size as u32) - .map_err(Error::from_insufficient_space)?; - - let flag = Flags::BATCHING; - - unsafe { - buf.put_u8_unchecked(flag.bits()); - buf.put_u64_varint_unchecked(batch_meta); - } - - Ok((1 + batch_meta_size, allocator, buf)) - }) - }}; -} - -pub trait Sealed: Constructor { - #[inline] - fn check( - &self, - klen: usize, - vlen: usize, - max_key_size: u32, - max_value_size: u32, - ro: bool, - ) -> Result<(), Error> { - crate::check(klen, vlen, max_key_size, max_value_size, ro) - } - - #[inline] - fn check_batch_entry(&self, klen: usize, vlen: usize) -> Result<(), Error> { - let opts = self.options(); - let max_key_size = opts.maximum_key_size(); - let max_value_size = opts.maximum_value_size(); - - crate::utils::check_batch_entry(klen, vlen, max_key_size, max_value_size) - } - - fn hasher(&self) -> &S; - - fn options(&self) -> &Options; - - fn comparator(&self) -> &C; - - fn insert_pointer(&self, ptr: Self::Pointer) - where - C: Comparator; - - fn insert_pointers(&self, ptrs: impl Iterator) - where - C: Comparator; - - fn insert_batch_with_key_builder_in( - &mut self, - batch: &mut B, - ) -> Result<(), Either> - where - B: BatchWithKeyBuilder>, - B::Value: Borrow<[u8]>, - C: Comparator + CheapClone, - S: BuildChecksumer, - { - let (mut cursor, allocator, mut buf) = preprocess_batch!(self(batch)).map_err(Either::Right)?; - - unsafe { - let cmp = self.comparator(); - - for ent in batch.iter_mut() { - let klen = ent.key_len(); - let vlen = ent.value_len(); - let merged_kv_len = ent.meta.kvlen; - let merged_kv_len_size = ent.meta.kvlen_size; - let remaining = buf.remaining(); - if remaining < merged_kv_len_size + klen + vlen { - return Err(Either::Right(Error::larger_batch_size( - buf.capacity() as u32 - ))); - } - - let ent_len_size = buf.put_u64_varint_unchecked(merged_kv_len); - let ptr = buf.as_mut_ptr().add(cursor + ent_len_size); - buf.set_len(cursor + ent_len_size + klen); - let f = ent.key_builder().builder(); - f(&mut VacantBuffer::new(klen, NonNull::new_unchecked(ptr))).map_err(Either::Left)?; - - cursor += ent_len_size + klen; - cursor += vlen; - buf.put_slice_unchecked(ent.value().borrow()); - ent.pointer = Some(Pointer::new(klen, vlen, ptr, cmp.cheap_clone())); - } - - self - .insert_batch_helper(allocator, buf, cursor) - .map_err(Either::Right) - } - } - - fn insert_batch_with_value_builder_in( - &mut self, - batch: &mut B, - ) -> Result<(), Either> - where - B: BatchWithValueBuilder>, - B::Key: Borrow<[u8]>, - C: Comparator + CheapClone, - S: BuildChecksumer, - { - let (mut cursor, allocator, mut buf) = preprocess_batch!(self(batch)).map_err(Either::Right)?; - - unsafe { - let cmp = self.comparator(); - - for ent in batch.iter_mut() { - let klen = ent.key_len(); - let vlen = ent.value_len(); - let merged_kv_len = ent.meta.kvlen; - let merged_kv_len_size = ent.meta.kvlen_size; - let remaining = buf.remaining(); - if remaining < merged_kv_len_size + klen + vlen { - return Err(Either::Right(Error::larger_batch_size( - buf.capacity() as u32 - ))); - } - - let ent_len_size = buf.put_u64_varint_unchecked(merged_kv_len); - let ptr = buf.as_mut_ptr().add(cursor + ent_len_size); - cursor += klen + ent_len_size; - buf.put_slice_unchecked(ent.key().borrow()); - buf.set_len(cursor + vlen); - let f = ent.vb.builder(); - let mut vacant_buffer = VacantBuffer::new(klen, NonNull::new_unchecked(ptr.add(klen))); - f(&mut vacant_buffer).map_err(Either::Left)?; - - cursor += vlen; - ent.pointer = Some(Pointer::new(klen, vlen, ptr, cmp.cheap_clone())); - } - - self - .insert_batch_helper(allocator, buf, cursor) - .map_err(Either::Right) - } - } - - fn insert_batch_with_builders_in( - &mut self, - batch: &mut B, - ) -> Result<(), Among> - where - B: BatchWithBuilders>, - C: Comparator + CheapClone, - S: BuildChecksumer, - { - let (mut cursor, allocator, mut buf) = preprocess_batch!(self(batch)).map_err(Among::Right)?; - - unsafe { - let cmp = self.comparator(); - - for ent in batch.iter_mut() { - let klen = ent.key_len(); - let vlen = ent.value_len(); - let merged_kv_len = ent.meta.kvlen; - let merged_kv_len_size = ent.meta.kvlen_size; - - let remaining = buf.remaining(); - if remaining < merged_kv_len_size + klen + vlen { - return Err(Among::Right( - Error::larger_batch_size(buf.capacity() as u32), - )); - } - - let ent_len_size = buf.put_u64_varint_unchecked(merged_kv_len); - let ptr = buf.as_mut_ptr().add(cursor + ent_len_size); - buf.set_len(cursor + ent_len_size + klen); - let f = ent.key_builder().builder(); - f(&mut VacantBuffer::new(klen, NonNull::new_unchecked(ptr))).map_err(Among::Left)?; - cursor += ent_len_size + klen; - buf.set_len(cursor + vlen); - let f = ent.value_builder().builder(); - f(&mut VacantBuffer::new( - klen, - NonNull::new_unchecked(ptr.add(klen)), - )) - .map_err(Among::Middle)?; - cursor += vlen; - ent.pointer = Some(Pointer::new(klen, vlen, ptr, cmp.cheap_clone())); - } - - self - .insert_batch_helper(allocator, buf, cursor) - .map_err(Among::Right) - } - } - - fn insert_batch_in>(&mut self, batch: &mut B) -> Result<(), Error> - where - C: Comparator + CheapClone, - S: BuildChecksumer, - { - let (mut cursor, allocator, mut buf) = preprocess_batch!(self(batch))?; - - unsafe { - let cmp = self.comparator(); - - for ent in batch.iter_mut() { - let klen = ent.key_len(); - let vlen = ent.value_len(); - let merged_kv_len = ent.meta.kvlen; - let merged_kv_len_size = ent.meta.kvlen_size; - - let remaining = buf.remaining(); - if remaining < merged_kv_len_size + klen + vlen { - return Err(Error::larger_batch_size(buf.capacity() as u32)); - } - - let ent_len_size = buf.put_u64_varint_unchecked(merged_kv_len); - let ptr = buf.as_mut_ptr().add(cursor + ent_len_size); - cursor += ent_len_size + klen; - buf.put_slice_unchecked(ent.key().borrow()); - cursor += vlen; - buf.put_slice_unchecked(ent.value().borrow()); - ent.pointer = Some(Pointer::new(klen, vlen, ptr, cmp.cheap_clone())); - } - - self.insert_batch_helper(allocator, buf, cursor) - } - } - - fn insert_with_in( - &mut self, - kb: KeyBuilder) -> Result<(), KE>>, - vb: ValueBuilder) -> Result<(), VE>>, - ) -> Result> - where - C: Comparator + CheapClone, - S: BuildChecksumer, - { - let (klen, kf) = kb.into_components(); - let (vlen, vf) = vb.into_components(); - let (len_size, kvlen, elen) = entry_size(klen, vlen); - let klen = klen as usize; - let vlen = vlen as usize; - let allocator = self.allocator(); - let is_ondisk = allocator.is_ondisk(); - let buf = allocator.alloc_bytes(elen); - let mut cks = self.hasher().build_checksumer(); - - match buf { - Err(e) => Err(Among::Right(Error::from_insufficient_space(e))), - Ok(mut buf) => { - unsafe { - // We allocate the buffer with the exact size, so it's safe to write to the buffer. - let flag = Flags::COMMITTED.bits(); - - cks.update(&[flag]); - - buf.put_u8_unchecked(Flags::empty().bits()); - let written = buf.put_u64_varint_unchecked(kvlen); - debug_assert_eq!( - written, len_size, - "the precalculated size should be equal to the written size" - ); - - let ko = STATUS_SIZE + written; - buf.set_len(ko + klen + vlen); - - kf(&mut VacantBuffer::new( - klen, - NonNull::new_unchecked(buf.as_mut_ptr().add(ko)), - )) - .map_err(Among::Left)?; - - let vo = ko + klen; - vf(&mut VacantBuffer::new( - vlen, - NonNull::new_unchecked(buf.as_mut_ptr().add(vo)), - )) - .map_err(Among::Middle)?; - - let cks = { - cks.update(&buf[1..]); - cks.digest() - }; - buf.put_u64_le_unchecked(cks); - - // commit the entry - buf[0] |= Flags::COMMITTED.bits(); - - if self.options().sync() && is_ondisk { - allocator - .flush_header_and_range(buf.offset(), elen as usize) - .map_err(|e| Among::Right(e.into()))?; - } - - buf.detach(); - let cmp = self.comparator().cheap_clone(); - let ptr = buf.as_ptr().add(ko); - Ok(Pointer::new(klen, vlen, ptr, cmp)) - } - } - } - } -} - -trait SealedExt: Sealed { - unsafe fn insert_batch_helper( - &self, - allocator: &Self::Allocator, - mut buf: BytesRefMut<'_, Self::Allocator>, - cursor: usize, - ) -> Result<(), Error> - where - S: BuildChecksumer, - { - let total_size = buf.capacity(); - if cursor + CHECKSUM_SIZE != total_size { - return Err(Error::batch_size_mismatch( - total_size as u32 - CHECKSUM_SIZE as u32, - cursor as u32, - )); - } - - let mut cks = self.hasher().build_checksumer(); - let committed_flag = Flags::BATCHING | Flags::COMMITTED; - cks.update(&[committed_flag.bits()]); - cks.update(&buf[1..]); - let checksum = cks.digest(); - buf.put_u64_le_unchecked(checksum); - - // commit the entry - buf[0] = committed_flag.bits(); - let buf_cap = buf.capacity(); - - if self.options().sync() && allocator.is_ondisk() { - allocator.flush_header_and_range(buf.offset(), buf_cap)?; - } - buf.detach(); - Ok(()) - } -} - -impl SealedExt for T where T: Sealed {} - -pub trait Constructor: Sized { - type Allocator: Allocator; - type Core: WalCore; - type Pointer: Pointer; - - fn allocator(&self) -> &Self::Allocator; - - fn new_in(arena: Self::Allocator, opts: Options, cmp: C, cks: S) -> Result { - unsafe { - let slice = arena.reserved_slice_mut(); - slice[0..6].copy_from_slice(&MAGIC_TEXT); - slice[6..8].copy_from_slice(&opts.magic_version().to_le_bytes()); - } - - arena - .flush_range(0, HEADER_SIZE) - .map(|_| >::construct(arena, Default::default(), opts, cmp, cks)) - .map_err(Into::into) - } - - fn replay( - arena: Self::Allocator, - opts: Options, - ro: bool, - cmp: C, - checksumer: S, - ) -> Result - where - C: CheapClone, - S: BuildChecksumer, - Self::Pointer: Ord + 'static, - { - let slice = arena.reserved_slice(); - let magic_text = &slice[0..6]; - let magic_version = u16::from_le_bytes(slice[6..8].try_into().unwrap()); - - if magic_text != MAGIC_TEXT { - return Err(Error::magic_text_mismatch()); - } - - if magic_version != opts.magic_version() { - return Err(Error::magic_version_mismatch()); - } - - let mut set = >::Base::default(); - - let mut cursor = arena.data_offset(); - let allocated = arena.allocated(); - - loop { - unsafe { - // we reached the end of the arena, if we have any remaining, then if means two possibilities: - // 1. the remaining is a partial entry, but it does not be persisted to the disk, so following the write-ahead log principle, we should discard it. - // 2. our file may be corrupted, so we discard the remaining. - if cursor + STATUS_SIZE > allocated { - if !ro && cursor < allocated { - arena.rewind(ArenaPosition::Start(cursor as u32)); - arena.flush()?; - } - break; - } - - let header = arena.get_u8(cursor).unwrap(); - let flag = Flags::from_bits_retain(header); - - if !flag.contains(Flags::BATCHING) { - let (readed, encoded_len) = arena.get_u64_varint(cursor + STATUS_SIZE).map_err(|e| { - #[cfg(feature = "tracing")] - tracing::error!(err=%e); - - Error::corrupted(e) - })?; - let (key_len, value_len) = split_lengths(encoded_len); - let key_len = key_len as usize; - let value_len = value_len as usize; - // Same as above, if we reached the end of the arena, we should discard the remaining. - let cks_offset = STATUS_SIZE + readed + key_len + value_len; - if cks_offset + CHECKSUM_SIZE > allocated { - // If the entry is committed, then it means our file is truncated, so we should report corrupted. - if flag.contains(Flags::COMMITTED) { - return Err(Error::corrupted("file is truncated")); - } - - if !ro { - arena.rewind(ArenaPosition::Start(cursor as u32)); - arena.flush()?; - } - - break; - } - - let cks = arena.get_u64_le(cursor + cks_offset).unwrap(); - - if cks != checksumer.checksum_one(arena.get_bytes(cursor, cks_offset)) { - return Err(Error::corrupted("checksum mismatch")); - } - - // If the entry is not committed, we should not rewind - if !flag.contains(Flags::COMMITTED) { - if !ro { - arena.rewind(ArenaPosition::Start(cursor as u32)); - arena.flush()?; - } - - break; - } - - set.insert(Pointer::new( - key_len, - value_len, - arena.get_pointer(cursor + STATUS_SIZE + readed), - cmp.cheap_clone(), - )); - cursor += cks_offset + CHECKSUM_SIZE; - } else { - let (readed, encoded_len) = arena.get_u64_varint(cursor + STATUS_SIZE).map_err(|e| { - #[cfg(feature = "tracing")] - tracing::error!(err=%e); - - Error::corrupted(e) - })?; - - let (num_entries, encoded_data_len) = split_lengths(encoded_len); - - // Same as above, if we reached the end of the arena, we should discard the remaining. - let cks_offset = STATUS_SIZE + readed + encoded_data_len as usize; - if cks_offset + CHECKSUM_SIZE > allocated { - // If the entry is committed, then it means our file is truncated, so we should report corrupted. - if flag.contains(Flags::COMMITTED) { - return Err(Error::corrupted("file is truncated")); - } - - if !ro { - arena.rewind(ArenaPosition::Start(cursor as u32)); - arena.flush()?; - } - - break; - } - - let cks = arena.get_u64_le(cursor + cks_offset).unwrap(); - let mut batch_data_buf = arena.get_bytes(cursor, cks_offset); - if cks != checksumer.checksum_one(batch_data_buf) { - return Err(Error::corrupted("checksum mismatch")); - } - - let mut sub_cursor = 0; - batch_data_buf = &batch_data_buf[1 + readed..]; - for _ in 0..num_entries { - let (kvlen, ent_len) = decode_u64_varint(batch_data_buf).map_err(|e| { - #[cfg(feature = "tracing")] - tracing::error!(err=%e); - - Error::corrupted(e) - })?; - - let (klen, vlen) = split_lengths(ent_len); - let klen = klen as usize; - let vlen = vlen as usize; - - let ptr = Pointer::new( - klen, - vlen, - arena.get_pointer(cursor + STATUS_SIZE + readed + sub_cursor + kvlen), - cmp.cheap_clone(), - ); - set.insert(ptr); - let ent_len = kvlen + klen + vlen; - sub_cursor += kvlen + klen + vlen; - batch_data_buf = &batch_data_buf[ent_len..]; - } - - debug_assert_eq!( - sub_cursor, encoded_data_len as usize, - "expected encoded batch data size is not equal to the actual size" - ); - - cursor += cks_offset + CHECKSUM_SIZE; - } - } - } - - Ok(>::construct( - arena, set, opts, cmp, checksumer, - )) - } - - fn from_core(core: Self::Core) -> Self; -}