diff --git a/ReadMe.adoc b/ReadMe.adoc
index 1321c28ff..c1a32d637 100644
--- a/ReadMe.adoc
+++ b/ReadMe.adoc
@@ -3,7 +3,8 @@ Chronicle Software
:css-signature: demo
:toc: macro
:toclevels: 2
-:icons: font
+:lang: en-GB
+:source-highlighter: rouge
image:https://maven-badges.herokuapp.com/maven-central/net.openhft/chronicle-map/badge.svg[caption="",link=https://maven-badges.herokuapp.com/maven-central/net.openhft/chronicle-map]
image:https://javadoc.io/badge2/net.openhft/chronicle-map/javadoc.svg[link="https://www.javadoc.io/doc/net.openhft/chronicle-map/latest/index.html"]
diff --git a/benchmark/README.adoc b/benchmark/README.adoc
index 87f39efd4..27d84f16e 100644
--- a/benchmark/README.adoc
+++ b/benchmark/README.adoc
@@ -1,7 +1,9 @@
= Benchmark results
Peter Lawrey
+:lang: en-GB
+:source-highlighter: rouge
-Summary of results on a Ryzen 9 5950X with a Corsair MP600 driver
+Summary of results on a Ryzen 9 5950X with a Corsair MP600 drive
== 1 million reads and 1 million writes each second of 500 byte messages
@@ -98,4 +100,4 @@ worst: 8896.51 9191.42 9191.42 588251.14 8232.96
------------------------------------------------------------------------------------------
```
-NOTE: Running this dataset on a 64 GB machine had lower outliers when run without graphical interface to save some memory and contention.
\ No newline at end of file
+NOTE: Running this dataset on a 64 GB machine had lower outliers when run without graphical interface to save some memory and contention.
diff --git a/checkstyle-suppressions.xml b/checkstyle-suppressions.xml
index e1badbb7f..aed5c7a4d 100644
--- a/checkstyle-suppressions.xml
+++ b/checkstyle-suppressions.xml
@@ -15,4 +15,57 @@
-
\ No newline at end of file
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/CM_Compatibility_and_Versioning.adoc b/docs/CM_Compatibility_and_Versioning.adoc
index 2cdefa6ae..b1fc20f31 100644
--- a/docs/CM_Compatibility_and_Versioning.adoc
+++ b/docs/CM_Compatibility_and_Versioning.adoc
@@ -5,6 +5,8 @@ Neil Clifford
:css-signature: demo
:toc-placement: macro
:icons: font
+:lang: en-GB
+:source-highlighter: rouge
toc::[]
diff --git a/docs/CM_Download.adoc b/docs/CM_Download.adoc
index 5d9cd796b..7ff28c18a 100644
--- a/docs/CM_Download.adoc
+++ b/docs/CM_Download.adoc
@@ -5,6 +5,8 @@ Neil Clifford
:css-signature: demo
:toc-placement: macro
:icons: font
+:lang: en-GB
+:source-highlighter: rouge
toc::[]
diff --git a/docs/CM_FAQs.adoc b/docs/CM_FAQs.adoc
index 8cdbc3f5f..9ab5e1a2a 100644
--- a/docs/CM_FAQs.adoc
+++ b/docs/CM_FAQs.adoc
@@ -5,6 +5,8 @@ Neil Clifford, Peter Lawrey, Rob Austin, Jerry Shea
:css-signature: demo
:toc-placement: preamble
:icons: font
+:lang: en-GB
+:source-highlighter: rouge
This document provides information for some common tasks within Chronicle Map.
@@ -91,12 +93,12 @@ This is because Chronicle uses the page cache and RAM is in effect a cache to th
There are two cases where having a high-speed disk will give you a real benefit:
-==== 1. Data rate
+==== Data rate
If the rate of data that you are writing exceeds the disk write speed.
In most applications this is unlikely to occur.
-==== 2. Page cache misses
+==== Page cache misses
When you get a page cache miss.
For Chronicle queues which write and read messages lineally across memory, we mitigate this situation with the use of the Chronicle pre-toucher.
diff --git a/docs/CM_Features.adoc b/docs/CM_Features.adoc
index 62e15d6df..22289ade2 100644
--- a/docs/CM_Features.adoc
+++ b/docs/CM_Features.adoc
@@ -5,6 +5,8 @@ Neil Clifford
:css-signature: demo
:toc-placement: macro
:icons: font
+:lang: en-GB
+:source-highlighter: rouge
toc::[]
@@ -12,7 +14,7 @@ Chronicle Map is an in-memory, key-value store, designed for low-latency, and/or
== Features
-- **Ultra low latency**: Chronicle Map targets median latency of both read and write queries of less than 1 microsecond in https://github.com/OpenHFT/Chronicle-Map/search?l=java&q=perf&type=Code[certain tests].
+- **Ultra low latency**: Chronicle Map targets median latency of both read and write queries of less than 1 µs in https://github.com/OpenHFT/Chronicle-Map/search?l=java&q=perf&type=Code[certain tests].
- **High concurrency**: Write queries scale well up to the number of hardware execution threads in the server.
Read queries never block each other.
@@ -23,7 +25,7 @@ Read queries never block each other.
- Multiple processes can access a Chronicle Map concurrently.
At the same time, the data store is *in-process* for each of the accessing processes.
-Out-of-process approach to IPC is simply incompatible with Chronicle Map's median latency target of < 1 μs.
+Out-of-process approach to IPC is simply incompatible with Chronicle Map's median latency target of < 1 µs.
- Replication
diff --git a/docs/CM_Replication.adoc b/docs/CM_Replication.adoc
index 82c58bc3d..a5e78d6bc 100644
--- a/docs/CM_Replication.adoc
+++ b/docs/CM_Replication.adoc
@@ -6,6 +6,9 @@ Neil Clifford
:toc-placement: macro
:icons: font
+:lang: en-GB
+:source-highlighter: rouge
+
toc::[]
== Enterprise Edition
diff --git a/docs/CM_Tutorial.adoc b/docs/CM_Tutorial.adoc
index 22cd29044..cbda1ebcf 100644
--- a/docs/CM_Tutorial.adoc
+++ b/docs/CM_Tutorial.adoc
@@ -6,6 +6,9 @@ Neil Clifford
:toc-placement: macro
:icons: font
+:lang: en-GB
+:source-highlighter: rouge
+
toc::[]
This document describes the Chronicle Map tutorial supplied with the project.
diff --git a/docs/CM_Tutorial_Behaviour.adoc b/docs/CM_Tutorial_Behaviour.adoc
index d8a1f445a..a3a33f0b5 100644
--- a/docs/CM_Tutorial_Behaviour.adoc
+++ b/docs/CM_Tutorial_Behaviour.adoc
@@ -6,6 +6,9 @@ Neil Clifford
:toc-placement: macro
:icons: font
+:lang: en-GB
+:source-highlighter: rouge
+
toc::[]
== Customization
@@ -333,4 +336,3 @@ System.out.println(map2.get("1"));
'''
<>
-
diff --git a/docs/CM_Tutorial_Bytes.adoc b/docs/CM_Tutorial_Bytes.adoc
index 93b09d042..b3cefb260 100644
--- a/docs/CM_Tutorial_Bytes.adoc
+++ b/docs/CM_Tutorial_Bytes.adoc
@@ -6,6 +6,9 @@ Neil Clifford
:toc-placement: macro
:icons: font
+:lang: en-GB
+:source-highlighter: rouge
+
toc::[]
This pair of interfaces is configured using `ChronicleMapBuilder.keyMarshallers()` or
diff --git a/docs/CM_Tutorial_DataAccess.adoc b/docs/CM_Tutorial_DataAccess.adoc
index 2a1cb4918..aa44f1d3f 100644
--- a/docs/CM_Tutorial_DataAccess.adoc
+++ b/docs/CM_Tutorial_DataAccess.adoc
@@ -6,6 +6,9 @@ Neil Clifford
:toc-placement: macro
:icons: font
+:lang: en-GB
+:source-highlighter: rouge
+
toc::[]
This pair of interfaces is configured using `ChronicleMapBuilder.keyReaderAndDataAccess()` or
@@ -166,4 +169,4 @@ NOTE: If you configure `byte[]` key, or value type, then this pair of serializer
'''
-<>
\ No newline at end of file
+<>
diff --git a/docs/CM_Tutorial_Sized.adoc b/docs/CM_Tutorial_Sized.adoc
index 7317719ea..37af36d6e 100644
--- a/docs/CM_Tutorial_Sized.adoc
+++ b/docs/CM_Tutorial_Sized.adoc
@@ -6,6 +6,9 @@ Neil Clifford
:toc-placement: macro
:icons: font
+:lang: en-GB
+:source-highlighter: rouge
+
toc::[]
This pair of interfaces is configured using `ChronicleMapBuilder.keyMarshallers()` or
diff --git a/docs/CM_Tutorial_Understanding.adoc b/docs/CM_Tutorial_Understanding.adoc
index c3e40948a..8ee4c4553 100644
--- a/docs/CM_Tutorial_Understanding.adoc
+++ b/docs/CM_Tutorial_Understanding.adoc
@@ -6,6 +6,9 @@ Neil Clifford
:toc-placement: macro
:icons: font
+:lang: en-GB
+:source-highlighter: rouge
+
toc::[]
== Problems
diff --git a/docs/CM_Updates.adoc b/docs/CM_Updates.adoc
index b475d69df..c6e0bf427 100644
--- a/docs/CM_Updates.adoc
+++ b/docs/CM_Updates.adoc
@@ -6,6 +6,9 @@ Neil Clifford
:toc-placement: macro
:icons: font
+:lang: en-GB
+:source-highlighter: rouge
+
toc::[]
Changes between Chronicle Map 3 and the previous Chronicle Map version are detailed below:
diff --git a/docs/systemProperties.adoc b/docs/systemProperties.adoc
new file mode 100644
index 000000000..daabd2159
--- /dev/null
+++ b/docs/systemProperties.adoc
@@ -0,0 +1,47 @@
+= Chronicle Map System Properties
+:toc:
+:lang: en-GB
+:source-highlighter: rouge
+
+== System Properties
+
+This document lists system properties that influence Chronicle Map behaviour in production and test runs.
+Set each property on the Java command line using `-Dkey=value`.
+
+NOTE: Unless otherwise stated, boolean properties are parsed via Chronicle Core's `Jvm.getBoolean` helper.
+They are treated as enabled if the key is present with no value, `true` or `yes`.
+
+.System properties
+[cols="2a,1,3a,2a",options="header"]
+|===
+| Property Key
+| Default Value
+| Description
+| Java Variable Name (Type)
+
+| `chronicle.map.creation.debug`
+| `false`
+| Enables verbose logging during Chronicle Map creation and recovery, including map name, file paths and configuration details. Useful when diagnosing startup issues and misconfigured builders.
+| `MAP_CREATION_DEBUG` (`boolean`)
+
+| `chronicle.map.file.lock.timeout.secs`
+| `10`
+| Number of seconds Chronicle Map waits when acquiring an internal file lock before giving up and throwing a lock related exception.
+| `FILE_LOCK_TIMEOUT` (`int`)
+
+| `chronicle.map.sparseFile`
+| `false`
+| When `true`, Chronicle Map uses sparse files for persisted data. This can reduce disk usage on some file systems at the cost of more fragmented allocation patterns.
+| `sparseFile` (`boolean`)
+
+| `chronicle.map.disable.locking`
+| `false`
+| Disables Chronicle Map file locking. When `true`, shared and exclusive locks are not acquired, which may be required on platforms without reliable file locking but removes protection against concurrent writers.
+| `USE_EXCLUSIVE_LOCKING`, `USE_SHARED_LOCKING` (derived `boolean` flags)
+
+| `net.openhft.chronicle.map.lockTimeoutSeconds`
+| `60`
+| Overrides the global timeout, in seconds, used when waiting for segment level locks in multi-process use. If this timeout is exceeded an `InterProcessDeadLockException` is thrown with guidance on possible causes.
+| `LOCK_TIMEOUT_SECONDS` (`int`)
+|===
+
diff --git a/pom.xml b/pom.xml
index 38ec124e0..b2cd75824 100644
--- a/pom.xml
+++ b/pom.xml
@@ -33,7 +33,7 @@
net.openhft
third-party-bom
- 3.27ea5
+ 3.27ea7
pom
import
@@ -330,7 +330,6 @@
**/Compiled*.java
false
- -Xdoclint:none
diff --git a/spec/3_2-lock-structure.md b/spec/3_2-lock-structure.md
index 83ed46d79..f14800994 100644
--- a/spec/3_2-lock-structure.md
+++ b/spec/3_2-lock-structure.md
@@ -106,10 +106,6 @@ procedure and call one depending on the context.
> The reference Java implementation uses only the version of this procedure without the first two
> steps.
-
-
-
-
## Release write lock, or write to update lock downgrade, or write to read lock downgrade
Perform a CAS operation on the count word of the lock state, comparing 0x80000000 (i. e. a count
@@ -141,9 +137,6 @@ procedure.
read with a wait word with the wait count decremented. If the CAS operation fails, begin the
deregister wait procedure from the start. If the CAS operation succeeds, the procedure succeeds.
-
-
-
## Time-limited write lock acquisition or update to write upgrade
1. Perform the corresponding *try acquire* procedure ([write lock](#try-acquire-write-lock) or
diff --git a/spec/4-hashing-algorithms.md b/spec/4-hashing-algorithms.md
index 70d3f9a5f..34a98ae43 100644
--- a/spec/4-hashing-algorithms.md
+++ b/spec/4-hashing-algorithms.md
@@ -6,9 +6,9 @@ the primary key hash code. Then the [`hashSplitting`](3_1-header-fields.md#hashs
is applied, to determine the segment in which the key should be stored, and the part of the key hash
code to be stored in a segment tier's hash lookup.
-> The reference Java implementation: [`XxHash_r39`](
-> https://github.com/OpenHFT/Chronicle-Algorithms/blob/chronicle-algorithms-1.1.6/src/main/java/net/openhft/chronicle/algo/hashing/XxHash_r39.java).
-> Although the Java implementation class has `_r39` suffix, the xxHash algorithm is stable since r3
+> The reference Java implementation: [`XxHashR39`](
+> https://github.com/OpenHFT/Chronicle-Algorithms/blob/chronicle-algorithms-1.1.6/src/main/java/net/openhft/chronicle/algo/hashing/XxHashR39.java).
+> Although the algorithm originated from the r39 release, the xxHash algorithm is stable since r3
> and [won't change in the future](
> https://github.com/Cyan4973/xxHash/issues/34#issuecomment-169176338). A different version of
> the algorithm could have a different name.
@@ -62,4 +62,4 @@ checksum](#primary-checksum). The entry checksum is stored in the 6th field of t
structure](3-memory-layout.md#stored-entry-structure).
> The reference Java implementation: [`HashEntryChecksumStrategy`](
-> ../src/main/java/net/openhft/chronicle/hash/impl/stage/entry/HashEntryChecksumStrategy.java).
\ No newline at end of file
+> ../src/main/java/net/openhft/chronicle/hash/impl/stage/entry/HashEntryChecksumStrategy.java).
diff --git a/src/main/docs/architecture-overview.adoc b/src/main/docs/architecture-overview.adoc
new file mode 100644
index 000000000..41c9fc771
--- /dev/null
+++ b/src/main/docs/architecture-overview.adoc
@@ -0,0 +1,49 @@
+= Chronicle Map Architecture Overview
+:toc:
+:sectnums:
+:lang: en-GB
+:source-highlighter: rouge
+
+This document provides a high-level overview of Chronicle Map, its role in the Chronicle stack, and the main design choices that underpin its behaviour.
+It complements the existing user guides in `docs/CM_*.adoc` and will be extended as further ISO 9001 and 27001 work is performed.
+
+== Context and Role
+
+Chronicle Map is an off-heap, persisted key–value store designed for low-latency access and inter-process data sharing.
+It sits in Layer 1 of the Chronicle stack alongside Chronicle Queue and Chronicle Wire and is typically used by higher-level services to cache, share and replicate state.
+
+Chronicle Map depends on:
+
+* Chronicle Bytes for off-heap and memory-mapped storage primitives.
+* Chronicle Core for utilities, lifecycle handling and configuration helpers.
+* Chronicle Threads for optional background maintenance and monitoring tasks.
+* Chronicle Wire for serialising complex keys and values when needed.
+
+== Core Abstractions
+
+The primary runtime abstractions are:
+
+* `ChronicleMap` – the main key–value container interface, usually created via `ChronicleMapBuilder`.
+* `ChronicleMapBuilder` – a fluent builder that configures key and value types, expected entry count, persistence location and replication parameters.
+* Query contexts – `ExternalMapQueryContext` and related types that provide scoped, allocation-aware access to entries.
+* Event listeners – hooks such as `MapEventListener` used to observe updates and replication events.
+
+Maps may be in-heap or persisted; persisted instances back their entries with memory-mapped files so that multiple JVMs or processes can observe the same data structures.
+
+== Storage and Persistence Model
+
+Chronicle Map uses fixed-size segments and off-heap memory to store entries.
+When persistence is enabled, segments are backed by memory-mapped files so that data survives process restarts and can be shared between JVMs.
+
+Key aspects include:
+
+* off-heap storage avoids GC pauses on large maps;
+* segments can be accessed concurrently by multiple threads, subject to the configured locking strategy;
+* file-backed maps require careful coordination around file-system permissions and lifecycle, as described in the existing `CM_*.adoc` guides.
+
+Further details on wire formats, replication and compatibility are available in:
+
+* `docs/CM_Compatibility_and_Versioning.adoc`
+* `docs/CM_Replication.adoc`
+* `docs/CM_Tutorial_*.adoc`
+
diff --git a/src/main/docs/decision-log.adoc b/src/main/docs/decision-log.adoc
new file mode 100644
index 000000000..262d887d3
--- /dev/null
+++ b/src/main/docs/decision-log.adoc
@@ -0,0 +1,56 @@
+= Chronicle Map Decision Log
+:toc:
+:sectnums:
+:lang: en-GB
+:source-highlighter: rouge
+
+This file captures component-specific architectural and operational decisions for Chronicle Map.
+Identifiers follow the `--NNN` pattern with scope `MAP` (Chronicle Map) and Nine-Box tags where practical.
+
+== Decision Index
+
+* link:#MAP-FN-101[MAP-FN-101 Off-heap Segmented Hash Map Design]
+* link:#MAP-NF-O-201[MAP-NF-O-201 Sizing and Capacity Configuration Via Builder]
+
+[[MAP-FN-101]]
+== MAP-FN-101 Off-heap Segmented Hash Map Design
+
+Date:: 2025-11-18
+Context::
+* Chronicle Map must support very large maps that may not fit in on-heap memory.
+* Many users require persistence and inter-process sharing via memory-mapped files.
+Decision Statement::
+* Chronicle Map uses an off-heap segmented hash map design backed by memory-mapped files, allowing entries to be stored outside the Java heap while remaining addressable via the `ChronicleMap` API.
+Alternatives Considered::
+* Pure on-heap `ConcurrentHashMap`-style implementation – simple but unable to scale to tens of gigabytes of data or share state between processes.
+* Flat file or database-backed key–value store – would introduce higher latency and require different APIs from existing Chronicle components.
+Rationale for Decision::
+* Off-heap segments and memory-mapped backing allow large maps with predictable latency while retaining a familiar Java API.
+* The design aligns with Chronicle's focus on low-latency, garbage-lean data structures.
+Impact & Consequences::
+* Users must size maps explicitly and consider off-heap memory and file-system limits.
+* Requirements such as `MAP-FN-001` and `MAP-FN-002` and the sizing guidance in `project-requirements.adoc` follow from this design.
+Notes/Links::
+* link:project-requirements.adoc[Project Requirements]
+
+[[MAP-NF-O-201]]
+== MAP-NF-O-201 Sizing and Capacity Configuration Via Builder
+
+Date:: 2025-11-18
+Context::
+* Incorrect sizing of Chronicle Map (for example too few entries or segments) can lead to performance degradation or capacity errors in production.
+* Operators need a clear and supported way to configure sizing without relying on defaults.
+Decision Statement::
+* Chronicle Map exposes sizing and capacity configuration via the fluent `ChronicleMapBuilder`, including expected entry count and average key/value sizes; operators are expected to use these builder settings rather than relying on implicit defaults.
+Alternatives Considered::
+* Hidden heuristics that infer sizing from early usage – convenient but unpredictable and difficult to document.
+* Hard-coded sizes – simple but unsuitable for the range of map sizes and workloads Chronicle Map targets.
+Rationale for Decision::
+* Builder-based sizing makes sizing decisions explicit in code and documentation.
+* It enables operational guidance and testing (for example `EntryCountMapTest`, `AutoResizeTest`, `KeySegmentDistributionTest`) to focus on a small set of well-defined parameters.
+Impact & Consequences::
+* Application code and tutorials must show correct builder usage.
+* Operational run-books and examples in `docs/CM_Tutorial_*.adoc` can give concrete sizing recipes.
+Notes/Links::
+* link:project-requirements.adoc[Project Requirements]
+
diff --git a/src/main/docs/project-requirements.adoc b/src/main/docs/project-requirements.adoc
new file mode 100644
index 000000000..f8e9e40ea
--- /dev/null
+++ b/src/main/docs/project-requirements.adoc
@@ -0,0 +1,81 @@
+= Chronicle Map Project Requirements
+:toc:
+:sectnums:
+:lang: en-GB
+:source-highlighter: rouge
+
+== Scope
+
+Chronicle Map provides an off-heap, optionally persisted key–value store with support for inter-process sharing and replication.
+The requirements in this document summarise existing behaviour described in the `docs/CM_*.adoc` guides and will be refined in future ISO-alignment passes.
+
+== Functional Requirements (Initial Skeleton)
+
+[cols=\"1,5,4\",options=\"header\"]
+|===
+|ID |Requirement |Verification
+|MAP-FN-001 |Provide an off-heap key–value store addressable via `ChronicleMap` that supports put/get/remove operations with predictable latency. |Core map behaviour is exercised by tests such as `ChronicleMapImportExportTest`, `SerializableTest`, `EntryCountMapTest`, `KeySizesTest` and `HugeSparseMapTest`, together with tutorial code under `docs/CM_Tutorial_*.adoc`.
+|MAP-FN-002 |Support persisted maps backed by memory-mapped files so that data survives JVM restarts and can be shared between processes. |Persistence and restart scenarios are covered by tests such as `ChronicleMapImportExportTest`, `BasicReplicationTest`, `AutoResizeTest` and long-running examples such as `LargeMapMain`.
+|MAP-FN-003 |Expose a fluent `ChronicleMapBuilder` to configure key/value types, expected entry count and persistence settings. |Builder usage examples in `docs/CM_Tutorial_DataAccess.adoc` and tests such as `EntryCountMapTest`, `BytesMarshallableValueTest` and `ChronicleSetBuilderTest` (for the related Chronicle Set builder).
+|===
+
+== Non-Functional Requirements - Performance
+
+[cols=\"1,5,4\",options=\"header\"]
+|===
+|ID |Requirement |Verification
+|MAP-NF-P-001 |Put and get operations on typical maps (with configured average key and value sizes) should maintain predictable latency across supported entry-count ranges. |Micro-benchmarks and tests such as `CHMLatencyTestMain` and `PageLatencyMain` measure latency characteristics under different map sizes and access patterns; results are used to detect regressions.
+|MAP-NF-P-002 |Replicated Chronicle Maps should maintain acceptable end-to-end latency for update propagation across typical cluster topologies. |Replication-oriented tests such as `BasicReplicationTest` exercise update propagation between maps; additional benchmarks may be added to characterise latency across nodes.
+|===
+
+== Non-Functional Requirements - Operability
+
+[cols=\"1,5,4\",options=\"header\"]
+|===
+|ID |Requirement |Verification
+|MAP-NF-O-001 |Chronicle Map should provide configuration options and documented patterns for sizing (entry count and segment count) so operators can avoid unexpected resizing or capacity errors in production. |Examples and guidance in `docs/CM_Tutorial_DataAccess.adoc` and related guides, together with tests such as `EntryCountMapTest`, `AutoResizeTest` and `KeySegmentDistributionTest`, illustrate how sizing parameters affect behaviour.
+|===
+
+== Non-Functional Requirements - Security
+
+[cols=\"1,5,4\",options=\"header\"]
+|===
+|ID |Requirement |Verification
+|MAP-NF-S-001 |Chronicle Map must rely on Chronicle Bytes and related core libraries for bounds-checked off-heap access and avoid unchecked direct memory writes that could corrupt map metadata or entries; security-sensitive use (for example maps storing secrets) is expected to layer encryption and access control externally. |Code reviews and static analysis focus on memory-access paths in Chronicle Map, ensuring they pass through Chronicle Bytes safety checks; higher-level security guidance is provided by the Chronicle Bytes and Chronicle Queue security reviews and by deployment documentation in the wider stack.
+|===
+
+== Traceability and Future Work
+
+The tables above provide an initial mapping from `MAP-*` requirements to representative tests and examples.
+They focus on the most widely used Chronicle Map behaviours; additional requirements and associated tests can be added as further behaviours are formalised.
+
+[cols=\"1,5,4\",options=\"header\"]
+|===
+|Requirement ID |Representative tests |Notes
+
+|MAP-FN-001
+|`ChronicleMapImportExportTest`, `SerializableTest`, `EntryCountMapTest`, `KeySizesTest`, `HugeSparseMapTest`
+|Exercise basic off-heap key–value operations, serialisation, sizing and distribution across segments.
+
+|MAP-FN-002
+|`ChronicleMapImportExportTest`, `BasicReplicationTest`, `AutoResizeTest`, `LargeMapMain`
+|Cover persisted maps backed by memory-mapped files, restart scenarios, replication between processes and resizing behaviour.
+
+|MAP-FN-003
+|`EntryCountMapTest`, `BytesMarshallableValueTest`, `ChronicleSetBuilderTest`
+|Use `ChronicleMapBuilder` (and related builder patterns) to configure entry counts, key/value types and persistence, verifying that builder options are honoured.
+
+|MAP-NF-P-001, MAP-NF-P-002
+|`CHMLatencyTestMain`, `PageLatencyMain`, `BasicReplicationTest`
+|Provide latency measurements for local and replicated maps; used to monitor performance regressions.
+
+|MAP-NF-O-001
+|`EntryCountMapTest`, `AutoResizeTest`, `KeySegmentDistributionTest`
+|Demonstrate the impact of sizing and segment configuration on behaviour, helping operators tune maps for production.
+|===
+
+Future work should:
+
+* expand the requirements catalogue as additional behaviours are formalised;
+* align existing `CM_*` documentation with the `MAP-*` identifiers above and add further `MAP-NF-S-*` and `MAP-NF-O-*` entries for security and operability;
+* keep the mapping between `MAP-*` requirements and representative tests in sync with the evolving test suite.
diff --git a/src/main/docs/testing-strategy.adoc b/src/main/docs/testing-strategy.adoc
new file mode 100644
index 000000000..278767b87
--- /dev/null
+++ b/src/main/docs/testing-strategy.adoc
@@ -0,0 +1,48 @@
+= Chronicle Map Testing Strategy
+:toc:
+:sectnums:
+:lang: en-GB
+:source-highlighter: rouge
+
+This document outlines how Chronicle Map is tested.
+It complements the `MAP-*` requirements in `project-requirements.adoc` and helps reviewers understand how key behaviours are validated.
+
+== Objectives
+
+Testing for Chronicle Map aims to:
+
+* verify the functional requirements (`MAP-FN-*`) for core map operations, persistence and builder configuration;
+* exercise non-functional requirements (`MAP-NF-P-*`, `MAP-NF-O-*`) around latency, sizing and operability;
+* ensure that off-heap and persisted maps behave correctly across JVM restarts and, where applicable, in replicated deployments.
+
+== Unit and Integration Tests
+
+Representative tests include:
+
+* core operations and serialisation: `ChronicleMapImportExportTest`, `SerializableTest`, `BytesMarshallableValueTest`;
+* sizing and distribution: `EntryCountMapTest`, `KeySizesTest`, `HugeSparseMapTest`, `KeySegmentDistributionTest`;
+* persistence and restart: `ChronicleMapImportExportTest`, `AutoResizeTest`, examples such as `LargeMapMain`;
+* replication: `BasicReplicationTest` and related classes under `net.openhft.chronicle.map`.
+
+These tests are organised under `src/test/java` and are run via the standard Maven test lifecycle.
+
+== Performance and Latency Tests
+
+Latency characteristics for Chronicle Map are explored using tests and harnesses such as:
+
+* `CHMLatencyTestMain` – exercises map operations under load and reports latency metrics;
+* `PageLatencyMain` – explores page and memory-mapped access patterns.
+
+These programmes are typically run manually or in dedicated performance environments rather than on every CI build, but they inform the performance expectations captured in `MAP-NF-P-*` requirements.
+
+== Traceability
+
+`project-requirements.adoc` documents the `MAP-*` requirements and includes a traceability table mapping each requirement to representative tests.
+When new behaviours are added or existing ones change, keep the following aligned:
+
+* add or update `MAP-*` entries in `project-requirements.adoc`;
+* extend the traceability table with the relevant test classes;
+* add or update unit, integration or performance tests under `src/test/java`.
+
+All tests should pass under the shared quality profiles on supported JDKs before releasing new versions of Chronicle Map.
+
diff --git a/src/main/java/net/openhft/chronicle/hash/impl/stage/data/bytes/InputKeyBytesData.java b/src/main/java/net/openhft/chronicle/hash/impl/stage/data/bytes/InputKeyBytesData.java
index 1f996a83e..889de8925 100644
--- a/src/main/java/net/openhft/chronicle/hash/impl/stage/data/bytes/InputKeyBytesData.java
+++ b/src/main/java/net/openhft/chronicle/hash/impl/stage/data/bytes/InputKeyBytesData.java
@@ -18,7 +18,6 @@
*/
package net.openhft.chronicle.hash.impl.stage.data.bytes;
-import net.openhft.chronicle.bytes.Bytes;
import net.openhft.chronicle.bytes.BytesStore;
import net.openhft.chronicle.bytes.RandomDataInput;
import net.openhft.chronicle.bytes.VanillaBytes;
diff --git a/src/main/java/net/openhft/chronicle/hash/impl/util/FileIOUtils.java b/src/main/java/net/openhft/chronicle/hash/impl/util/FileIOUtils.java
index 909e0a33d..3f30fc531 100644
--- a/src/main/java/net/openhft/chronicle/hash/impl/util/FileIOUtils.java
+++ b/src/main/java/net/openhft/chronicle/hash/impl/util/FileIOUtils.java
@@ -3,6 +3,8 @@
*/
package net.openhft.chronicle.hash.impl.util;
+import net.openhft.chronicle.core.Jvm;
+
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
@@ -17,10 +19,21 @@ public static void readFully(FileChannel fileChannel, long filePosition, ByteBuf
int startBufferPosition = buffer.position();
while (buffer.remaining() > 0 &&
buffer.position() < fileChannel.size()) {
- int bytesRead = fileChannel.read(buffer,
- filePosition + buffer.position() - startBufferPosition);
- if (bytesRead == -1)
- break;
+ long position = filePosition + buffer.position() - startBufferPosition;
+ try {
+ int bytesRead = fileChannel.read(buffer, position);
+ if (bytesRead == -1)
+ break;
+ } catch (IOException e) {
+ Jvm.warn().on(FileIOUtils.class, "IOException in readFully: " +
+ "fileChannel: " + fileChannel +
+ ", position= " + position +
+ ", filePosition=" + filePosition +
+ ", buffer.position()=" + buffer.position() +
+ ", buffer.remaining()=" + buffer.remaining() +
+ ", startBufferPosition=" + startBufferPosition, e);
+ throw e;
+ }
}
}
@@ -28,7 +41,19 @@ public static void writeFully(FileChannel fileChannel, long filePosition, ByteBu
throws IOException {
int startBufferPosition = buffer.position();
while (buffer.remaining() > 0) {
- fileChannel.write(buffer, filePosition + buffer.position() - startBufferPosition);
+ long position = filePosition + buffer.position() - startBufferPosition;
+ try {
+ fileChannel.write(buffer, position);
+ } catch (IOException ioe) {
+ Jvm.warn().on(FileIOUtils.class, "IOException in writeFully: " +
+ "fileChannel: " + fileChannel +
+ ", position= " + position +
+ ", filePosition=" + filePosition +
+ ", buffer.position()=" + buffer.position() +
+ ", buffer.remaining()=" + buffer.remaining() +
+ ", startBufferPosition=" + startBufferPosition, ioe);
+ throw ioe;
+ }
}
}
}
diff --git a/src/main/java/net/openhft/chronicle/hash/serialization/impl/ByteBufferDataAccess.java b/src/main/java/net/openhft/chronicle/hash/serialization/impl/ByteBufferDataAccess.java
index 57c76ab76..477747495 100644
--- a/src/main/java/net/openhft/chronicle/hash/serialization/impl/ByteBufferDataAccess.java
+++ b/src/main/java/net/openhft/chronicle/hash/serialization/impl/ByteBufferDataAccess.java
@@ -3,7 +3,6 @@
*/
package net.openhft.chronicle.hash.serialization.impl;
-import net.openhft.chronicle.bytes.Bytes;
import net.openhft.chronicle.bytes.BytesStore;
import net.openhft.chronicle.bytes.RandomDataInput;
import net.openhft.chronicle.bytes.VanillaBytes;
diff --git a/src/main/java/net/openhft/chronicle/map/AbstractChronicleMap.java b/src/main/java/net/openhft/chronicle/map/AbstractChronicleMap.java
index e5b40912f..94db5fc6a 100644
--- a/src/main/java/net/openhft/chronicle/map/AbstractChronicleMap.java
+++ b/src/main/java/net/openhft/chronicle/map/AbstractChronicleMap.java
@@ -82,7 +82,7 @@ default Collection values() {
@Override
public Iterator iterator() {
return new Iterator() {
- private Iterator> i = entrySet().iterator();
+ private final Iterator> i = entrySet().iterator();
@Override
public boolean hasNext() {
diff --git a/src/main/java/net/openhft/chronicle/map/ChronicleMap.java b/src/main/java/net/openhft/chronicle/map/ChronicleMap.java
index fd6684c4b..01297b150 100644
--- a/src/main/java/net/openhft/chronicle/map/ChronicleMap.java
+++ b/src/main/java/net/openhft/chronicle/map/ChronicleMap.java
@@ -164,8 +164,8 @@ static ChronicleMapBuilder of(Class keyClass, Class valueClas
*
* @param key the key whose associated value is to be returned
* @param usingValue the object to read value data in, if present. Can be null
- * @see #acquireUsing(Object, Object)
* @return Lock control object that releases the update lock on close.
+ * @see #acquireUsing(Object, Object)
*/
@NotNull
net.openhft.chronicle.core.io.Closeable acquireContext(@NotNull K key, @NotNull V usingValue);
@@ -237,6 +237,7 @@ static ChronicleMapBuilder of(Class keyClass, Class valueClas
* @return the amount of free space in the map as a percentage. When the free space gets low ( around 5-25% ) the map will automatically expand. The
* number of times it can automatically expand is based on the {@code net.openhft.chronicle.map.ChronicleMapBuilder#maxBloatFactor}. If the map
* expands you will see an increase in the available free space. NOTE: It is not possible to expand the chronicle map manually.
+ *
* @see net.openhft.chronicle.map.ChronicleMap#remainingAutoResizes as these operations are related.
*/
default short percentageFreeSpace() {
@@ -245,6 +246,7 @@ default short percentageFreeSpace() {
/**
* WARNING: This is a detailed however expensive operation which can take milliseconds
+ *
* @return an array of how full each segment is
*/
default SegmentStats[] segmentStats() {
diff --git a/src/main/java/net/openhft/chronicle/map/ChronicleMapBuilder.java b/src/main/java/net/openhft/chronicle/map/ChronicleMapBuilder.java
index db0501e83..8db15f774 100644
--- a/src/main/java/net/openhft/chronicle/map/ChronicleMapBuilder.java
+++ b/src/main/java/net/openhft/chronicle/map/ChronicleMapBuilder.java
@@ -1763,7 +1763,7 @@ private ChronicleMap createWithFile(@NotNull final File file,
}
}
- private void prepareMapPublication(@NotNull final VanillaChronicleMap map) throws IOException {
+ private void prepareMapPublication(@NotNull final VanillaChronicleMap map) {
establishReplication(map);
map.setResourcesName();
map.registerCleaner();
@@ -1969,7 +1969,7 @@ private ChronicleMap createWithoutFile() {
}
@SuppressWarnings("unchecked")
- private VanillaChronicleMap newMap() throws IOException {
+ private VanillaChronicleMap newMap() {
preMapConstruction();
if (replicated) {
try {
diff --git a/src/main/java/net/openhft/chronicle/map/FindByName.java b/src/main/java/net/openhft/chronicle/map/FindByName.java
index dd09e697d..3f085acab 100644
--- a/src/main/java/net/openhft/chronicle/map/FindByName.java
+++ b/src/main/java/net/openhft/chronicle/map/FindByName.java
@@ -5,9 +5,6 @@
import net.openhft.chronicle.hash.ChronicleHash;
-import java.io.IOException;
-import java.util.concurrent.TimeoutException;
-
/**
* @author Rob Austin.
*/
@@ -19,10 +16,6 @@ interface FindByName {
* @param the type returned
* @return a chronicle map or set
* @throws IllegalArgumentException if a map with this name can not be found
- * @throws IOException if it not possible to create the map or set
- * @throws TimeoutException if the call times out
- * @throws InterruptedException if interrupted by another thread
*/
- T from(String name) throws IllegalArgumentException,
- IOException, TimeoutException, InterruptedException;
+ T from(String name) throws IllegalArgumentException;
}
diff --git a/src/main/java/net/openhft/chronicle/map/JsonSerializer.java b/src/main/java/net/openhft/chronicle/map/JsonSerializer.java
index 46e45db69..3a055051c 100644
--- a/src/main/java/net/openhft/chronicle/map/JsonSerializer.java
+++ b/src/main/java/net/openhft/chronicle/map/JsonSerializer.java
@@ -10,6 +10,7 @@
import net.openhft.xstream.converters.*;
import java.io.*;
+import java.nio.file.Files;
import java.util.List;
import java.util.Map;
import java.util.zip.GZIPInputStream;
@@ -59,16 +60,16 @@ static synchronized void putAll(final File fromFile,
private static InputStream createInputStream(final File toFile) throws IOException {
if (toFile.getName().toLowerCase().endsWith(".gz"))
- return new GZIPInputStream(new FileInputStream(toFile));
+ return new GZIPInputStream(Files.newInputStream(toFile.toPath()));
else
- return new FileInputStream(toFile);
+ return Files.newInputStream(toFile.toPath());
}
private static OutputStream createOutputStream(final File toFile) throws IOException {
if (toFile.getName().toLowerCase().endsWith(".gz"))
- return new GZIPOutputStream(new FileOutputStream(toFile));
+ return new GZIPOutputStream(Files.newOutputStream(toFile.toPath()));
else
- return new FileOutputStream(toFile);
+ return Files.newOutputStream(toFile.toPath());
}
private static XStream xStream(final Map map, final List> jsonConverters) {
diff --git a/src/main/java/net/openhft/chronicle/map/OldDeletedEntriesCleanupThread.java b/src/main/java/net/openhft/chronicle/map/OldDeletedEntriesCleanupThread.java
index 8db260682..ec7a06e1a 100644
--- a/src/main/java/net/openhft/chronicle/map/OldDeletedEntriesCleanupThread.java
+++ b/src/main/java/net/openhft/chronicle/map/OldDeletedEntriesCleanupThread.java
@@ -54,7 +54,7 @@ class OldDeletedEntriesCleanupThread extends Thread
private long prevSegment0ScanStart = -1;
private long removedCompletely;
- private long startTime = System.currentTimeMillis();
+ private final long startTime = System.currentTimeMillis();
OldDeletedEntriesCleanupThread(ReplicatedChronicleMap, ?, ?> map) {
super("Cleanup Thread for " + map.toIdentityString());
diff --git a/src/main/java/net/openhft/chronicle/map/ReplicatedChronicleMap.java b/src/main/java/net/openhft/chronicle/map/ReplicatedChronicleMap.java
index 32efec556..d4e7d17ad 100644
--- a/src/main/java/net/openhft/chronicle/map/ReplicatedChronicleMap.java
+++ b/src/main/java/net/openhft/chronicle/map/ReplicatedChronicleMap.java
@@ -26,7 +26,6 @@
import net.openhft.chronicle.wire.WireOut;
import org.jetbrains.annotations.NotNull;
-import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
@@ -121,7 +120,7 @@ public class ReplicatedChronicleMap extends VanillaChronicleMap builder) throws IOException {
+ public ReplicatedChronicleMap(@NotNull final ChronicleMapBuilder builder) {
super(builder);
tierModIterBitSetSizeInBits = computeTierModIterBitSetSizeInBits();
tierModIterBitSetOuterSize = computeTierModIterBitSetOuterSize();
diff --git a/src/main/java/net/openhft/chronicle/map/VanillaChronicleMap.java b/src/main/java/net/openhft/chronicle/map/VanillaChronicleMap.java
index ab3d85b13..80a2902c4 100644
--- a/src/main/java/net/openhft/chronicle/map/VanillaChronicleMap.java
+++ b/src/main/java/net/openhft/chronicle/map/VanillaChronicleMap.java
@@ -80,7 +80,7 @@ public class VanillaChronicleMap
/////////////////////////////////////////////////
private transient String name;
/**
- * identityString is initialized lazily in {@link #toIdentityString()} rather than in {@link #initOwnTransients()} because it depends on {@link
+ * identityString is initialized lazily in {@link #toIdentityString()} rather than in because it depends on {@link
* #file()} which is set after initOwnTransients().
*/
private transient String identityString;
diff --git a/src/main/java/net/openhft/chronicle/map/impl/CompiledMapIterationContext.java b/src/main/java/net/openhft/chronicle/map/impl/CompiledMapIterationContext.java
index ed73a9982..e00138e7b 100644
--- a/src/main/java/net/openhft/chronicle/map/impl/CompiledMapIterationContext.java
+++ b/src/main/java/net/openhft/chronicle/map/impl/CompiledMapIterationContext.java
@@ -40,7 +40,7 @@
* Generated code
*/
@SuppressWarnings({"rawtypes", "unchecked", "this-escape"})
-public class CompiledMapIterationContext extends ChainingInterface implements AutoCloseable , ChecksumEntry , HashEntry , HashSegmentContext> , SegmentLock , Alloc , KeyHashCode , LocksInterface , MapContext , MapEntry , IterationContext , VanillaChronicleMapHolder , SetContext {
+public class CompiledMapIterationContext extends ChainingInterface implements AutoCloseable, ChecksumEntry, HashEntry, HashSegmentContext>, SegmentLock, Alloc, KeyHashCode, LocksInterface, MapContext, MapEntry, IterationContext, VanillaChronicleMapHolder, SetContext {
public boolean readZeroGuarded() {
if (!(this.locksInit()))
this.initLocks();
@@ -152,7 +152,7 @@ public void doCloseAllocatedChunks() {
public void doCloseDelayedUpdateChecksum() {
if (!(this.delayedUpdateChecksumInit()))
- return ;
+ return;
if (this.h().checksumEntries)
this.hashEntryChecksumStrategy.computeAndStoreChecksum();
@@ -202,7 +202,7 @@ public void doCloseKeySize() {
public void doCloseLocks() {
if (!(this.locksInit()))
- return ;
+ return;
if ((rootContextLockedOnThisSegment) == (this)) {
closeRootLocks();
@@ -228,7 +228,7 @@ public void doCloseSearchKey() {
public void doCloseSegment() {
if (!(this.segmentInit()))
- return ;
+ return;
entrySpaceOffset = 0;
}
@@ -247,7 +247,7 @@ public void doCloseSegmentTier() {
public void doCloseUsed() {
if (!(this.usedInit()))
- return ;
+ return;
used = false;
if (firstContextLockedInThisThread)
@@ -326,7 +326,7 @@ public void setLocalLockStateGuarded(LocalLockState newState) {
setLocalLockState(newState);
}
- public CompiledMapIterationContext(ChainingInterface rootContextInThisThread ,VanillaChronicleMap map) {
+ public CompiledMapIterationContext(ChainingInterface rootContextInThisThread, VanillaChronicleMap map) {
contextChain = rootContextInThisThread.getContextChain();
indexInContextChain = contextChain.size();
contextChain.add(this);
@@ -345,7 +345,7 @@ public CompiledMapIterationContext(ChainingInterface rootContextInThisThread ,Va
this.segmentBytes = CompiledMapIterationContext.unmonitoredVanillaBytes(segmentBS);
this.hashEntryChecksumStrategy = new HashEntryChecksumStrategy();
this.checksumStrategy = this.h().checksumEntries ? this.hashEntryChecksumStrategy : NoChecksumStrategy.INSTANCE;
- this.freeList = new ReusableBitSet(new SingleThreadedFlatBitSetFrame(MemoryUnit.LONGS.align(this.h().actualChunksPerSegmentTier, MemoryUnit.BITS)) , Access.nativeAccess() , null , 0);
+ this.freeList = new ReusableBitSet(new SingleThreadedFlatBitSetFrame(MemoryUnit.LONGS.align(this.h().actualChunksPerSegmentTier, MemoryUnit.BITS)), Access.nativeAccess(), null, 0);
this.innerUpdateLock = new UpdateLock();
}
@@ -368,7 +368,7 @@ public CompiledMapIterationContext(VanillaChronicleMap map) {
this.segmentBytes = CompiledMapIterationContext.unmonitoredVanillaBytes(segmentBS);
this.hashEntryChecksumStrategy = new HashEntryChecksumStrategy();
this.checksumStrategy = this.h().checksumEntries ? this.hashEntryChecksumStrategy : NoChecksumStrategy.INSTANCE;
- this.freeList = new ReusableBitSet(new SingleThreadedFlatBitSetFrame(MemoryUnit.LONGS.align(this.h().actualChunksPerSegmentTier, MemoryUnit.BITS)) , Access.nativeAccess() , null , 0);
+ this.freeList = new ReusableBitSet(new SingleThreadedFlatBitSetFrame(MemoryUnit.LONGS.align(this.h().actualChunksPerSegmentTier, MemoryUnit.BITS)), Access.nativeAccess(), null, 0);
this.innerUpdateLock = new UpdateLock();
}
@@ -470,7 +470,7 @@ public K getUsing(K using) {
public class EntryValueBytesData extends AbstractData {
public void doCloseCachedEntryValue() {
if (!(this.cachedEntryValueInit()))
- return ;
+ return;
cachedEntryValueRead = false;
}
@@ -507,7 +507,7 @@ public V getUsing(V using) {
return innerGetUsing(using);
}
- private V cachedEntryValue = (CompiledMapIterationContext.this.m().valueType()) == (CharSequence.class) ? ((V)(new StringBuilder())) : null;
+ private V cachedEntryValue = (CompiledMapIterationContext.this.m().valueType()) == (CharSequence.class) ? ((V) (new StringBuilder())) : null;
private boolean cachedEntryValueRead = false;
@@ -529,7 +529,7 @@ public V cachedEntryValue() {
public void closeCachedEntryValue() {
if (!(this.cachedEntryValueInit()))
- return ;
+ return;
cachedEntryValueRead = false;
}
@@ -566,7 +566,7 @@ public int computeChecksum() {
} else {
checksum = keyHashCode;
}
- return ((int)((checksum >>> 32) ^ checksum));
+ return ((int) ((checksum >>> 32) ^ checksum));
}
public void closeHashEntryChecksumStrategyComputeChecksumDependants() {
@@ -610,10 +610,10 @@ public void lockInterruptibly() throws InterruptedException {
} catch (InterProcessDeadLockException e) {
throw CompiledMapIterationContext.this.debugContextsAndLocksGuarded(e);
}
- }
+ }
CompiledMapIterationContext.this.incrementReadGuarded();
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.READ_LOCKED);
- }
+ }
}
@Override
@@ -626,10 +626,10 @@ public void lock() {
} catch (InterProcessDeadLockException e) {
throw CompiledMapIterationContext.this.debugContextsAndLocksGuarded(e);
}
- }
+ }
CompiledMapIterationContext.this.incrementReadGuarded();
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.READ_LOCKED);
- }
+ }
}
public void closeReadLockLockDependants() {
@@ -648,7 +648,7 @@ public void unlock() {
if ((CompiledMapIterationContext.this.localLockState()) != (LocalLockState.UNLOCKED)) {
CompiledMapIterationContext.this.closeHashLookupPos();
CompiledMapIterationContext.this.closeEntry();
- }
+ }
CompiledMapIterationContext.this.readUnlockAndDecrementCountGuarded();
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.UNLOCKED);
}
@@ -715,7 +715,7 @@ public void lockInterruptibly() throws InterruptedException {
throw new InterruptedException();
switch (CompiledMapIterationContext.this.localLockState()) {
- case UNLOCKED :
+ case UNLOCKED:
CompiledMapIterationContext.this.checkIterationContextNotLockedInThisThread();
if ((CompiledMapIterationContext.this.updateZeroGuarded()) && (CompiledMapIterationContext.this.writeZeroGuarded())) {
if (!(CompiledMapIterationContext.this.readZeroGuarded()))
@@ -726,14 +726,14 @@ public void lockInterruptibly() throws InterruptedException {
} catch (InterProcessDeadLockException e) {
throw CompiledMapIterationContext.this.debugContextsAndLocksGuarded(e);
}
- }
+ }
CompiledMapIterationContext.this.incrementUpdateGuarded();
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.UPDATE_LOCKED);
- return ;
- case READ_LOCKED :
+ return;
+ case READ_LOCKED:
throw forbiddenUpgrade();
- case UPDATE_LOCKED :
- case WRITE_LOCKED :
+ case UPDATE_LOCKED:
+ case WRITE_LOCKED:
}
}
@@ -747,16 +747,16 @@ public boolean isHeldByCurrentThread() {
public void unlock() {
CompiledMapIterationContext.this.checkOnEachLockOperation();
switch (CompiledMapIterationContext.this.localLockState()) {
- case UNLOCKED :
- case READ_LOCKED :
- return ;
- case UPDATE_LOCKED :
+ case UNLOCKED:
+ case READ_LOCKED:
+ return;
+ case UPDATE_LOCKED:
CompiledMapIterationContext.this.closeDelayedUpdateChecksum();
if (((CompiledMapIterationContext.this.decrementUpdateGuarded()) == 0) && (CompiledMapIterationContext.this.writeZeroGuarded())) {
CompiledMapIterationContext.this.segmentHeader().downgradeUpdateToReadLock(CompiledMapIterationContext.this.segmentHeaderAddress());
- }
+ }
break;
- case WRITE_LOCKED :
+ case WRITE_LOCKED:
CompiledMapIterationContext.this.closeDelayedUpdateChecksum();
if ((CompiledMapIterationContext.this.decrementWriteGuarded()) == 0) {
if (!(CompiledMapIterationContext.this.updateZeroGuarded())) {
@@ -764,7 +764,7 @@ public void unlock() {
} else {
CompiledMapIterationContext.this.segmentHeader().downgradeWriteToReadLock(CompiledMapIterationContext.this.segmentHeaderAddress());
}
- }
+ }
}
CompiledMapIterationContext.this.incrementReadGuarded();
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.READ_LOCKED);
@@ -774,7 +774,7 @@ public void unlock() {
public void lock() {
CompiledMapIterationContext.this.checkOnEachLockOperation();
switch (CompiledMapIterationContext.this.localLockState()) {
- case UNLOCKED :
+ case UNLOCKED:
CompiledMapIterationContext.this.checkIterationContextNotLockedInThisThread();
if ((CompiledMapIterationContext.this.updateZeroGuarded()) && (CompiledMapIterationContext.this.writeZeroGuarded())) {
if (!(CompiledMapIterationContext.this.readZeroGuarded()))
@@ -785,14 +785,14 @@ public void lock() {
} catch (InterProcessDeadLockException e) {
throw CompiledMapIterationContext.this.debugContextsAndLocksGuarded(e);
}
- }
+ }
CompiledMapIterationContext.this.incrementUpdateGuarded();
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.UPDATE_LOCKED);
- return ;
- case READ_LOCKED :
+ return;
+ case READ_LOCKED:
throw forbiddenUpgrade();
- case UPDATE_LOCKED :
- case WRITE_LOCKED :
+ case UPDATE_LOCKED:
+ case WRITE_LOCKED:
}
}
@@ -800,7 +800,7 @@ public void lock() {
public boolean tryLock() {
CompiledMapIterationContext.this.checkOnEachLockOperation();
switch (CompiledMapIterationContext.this.localLockState()) {
- case UNLOCKED :
+ case UNLOCKED:
CompiledMapIterationContext.this.checkIterationContextNotLockedInThisThread();
if ((CompiledMapIterationContext.this.updateZeroGuarded()) && (CompiledMapIterationContext.this.writeZeroGuarded())) {
if (!(CompiledMapIterationContext.this.readZeroGuarded()))
@@ -818,12 +818,12 @@ public boolean tryLock() {
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.UPDATE_LOCKED);
return true;
}
- case READ_LOCKED :
+ case READ_LOCKED:
throw forbiddenUpgrade();
- case UPDATE_LOCKED :
- case WRITE_LOCKED :
+ case UPDATE_LOCKED:
+ case WRITE_LOCKED:
return true;
- default :
+ default:
throw new IllegalStateException((((CompiledMapIterationContext.this.h().toIdentityString()) + ": unexpected localLockState=") + (CompiledMapIterationContext.this.localLockState())));
}
}
@@ -836,7 +836,7 @@ public boolean tryLock(long time, @NotNull
throw new InterruptedException();
switch (CompiledMapIterationContext.this.localLockState()) {
- case UNLOCKED :
+ case UNLOCKED:
CompiledMapIterationContext.this.checkIterationContextNotLockedInThisThread();
if ((CompiledMapIterationContext.this.updateZeroGuarded()) && (CompiledMapIterationContext.this.writeZeroGuarded())) {
if (!(CompiledMapIterationContext.this.readZeroGuarded()))
@@ -854,12 +854,12 @@ public boolean tryLock(long time, @NotNull
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.UPDATE_LOCKED);
return true;
}
- case READ_LOCKED :
+ case READ_LOCKED:
throw forbiddenUpgrade();
- case UPDATE_LOCKED :
- case WRITE_LOCKED :
+ case UPDATE_LOCKED:
+ case WRITE_LOCKED:
return true;
- default :
+ default:
throw new IllegalStateException((((CompiledMapIterationContext.this.h().toIdentityString()) + ": unexpected localLockState=") + (CompiledMapIterationContext.this.localLockState())));
}
}
@@ -879,13 +879,13 @@ public void doCloseCachedWrappedValue() {
public void doCloseNext() {
if (!(this.nextInit()))
- return ;
+ return;
}
public void doCloseWrappedValueBytes() {
if (!(this.wrappedValueBytesInit()))
- return ;
+ return;
wrappedValueBytes.bytesStore(BytesStore.empty(), 0, 0);
wrappedValueBytesUsed = false;
@@ -893,7 +893,7 @@ public void doCloseWrappedValueBytes() {
public void doCloseWrappedValueBytesStore() {
if (!(this.wrappedValueBytesStoreInit()))
- return ;
+ return;
wrappedValueBytesStore = null;
if ((next) != null)
@@ -935,7 +935,7 @@ public WrappedValueBytesData next() {
void closeNext() {
if (!(this.nextInit()))
- return ;
+ return;
this.closeNextDependants();
}
@@ -981,7 +981,7 @@ public long wrappedValueBytesOffset() {
void closeWrappedValueBytesStore() {
if (!(this.wrappedValueBytesStoreInit()))
- return ;
+ return;
this.closeWrappedValueBytesStoreDependants();
wrappedValueBytesStore = null;
@@ -1019,7 +1019,7 @@ public VanillaBytes wrappedValueBytes() {
void closeWrappedValueBytes() {
if (!(this.wrappedValueBytesInit()))
- return ;
+ return;
this.closeWrappedValueBytesDependants();
wrappedValueBytes.bytesStore(BytesStore.empty(), 0, 0);
@@ -1097,7 +1097,7 @@ public RandomDataInput bytes() {
public class WrappedValueInstanceDataHolder {
public void doCloseValue() {
if (!(this.valueInit()))
- return ;
+ return;
value = null;
if ((next) != null)
@@ -1107,13 +1107,13 @@ public void doCloseValue() {
public void doCloseNext() {
if (!(this.nextInit()))
- return ;
+ return;
}
public void doCloseWrappedData() {
if (!(this.wrappedDataInit()))
- return ;
+ return;
wrappedData = null;
wrappedValueDataAccess.uninit();
@@ -1157,7 +1157,7 @@ public WrappedValueInstanceDataHolder next() {
void closeNext() {
if (!(this.nextInit()))
- return ;
+ return;
this.closeNextDependants();
}
@@ -1188,7 +1188,7 @@ public V value() {
public void closeValue() {
if (!(this.valueInit()))
- return ;
+ return;
this.closeValueDependants();
value = null;
@@ -1220,7 +1220,7 @@ public Data wrappedData() {
private void closeWrappedData() {
if (!(this.wrappedDataInit()))
- return ;
+ return;
wrappedData = null;
wrappedValueDataAccess.uninit();
@@ -1242,7 +1242,7 @@ private IllegalStateException forbiddenWriteLockWhenOuterContextReadLocked() {
public boolean tryLock() {
CompiledMapIterationContext.this.checkOnEachLockOperation();
switch (CompiledMapIterationContext.this.localLockState()) {
- case UNLOCKED :
+ case UNLOCKED:
CompiledMapIterationContext.this.checkIterationContextNotLockedInThisThread();
if (CompiledMapIterationContext.this.writeZeroGuarded()) {
if (!(CompiledMapIterationContext.this.updateZeroGuarded())) {
@@ -1270,9 +1270,9 @@ public boolean tryLock() {
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.WRITE_LOCKED);
return true;
}
- case READ_LOCKED :
+ case READ_LOCKED:
throw forbiddenUpgrade();
- case UPDATE_LOCKED :
+ case UPDATE_LOCKED:
if (CompiledMapIterationContext.this.writeZeroGuarded()) {
assert !(CompiledMapIterationContext.this.updateZeroGuarded());
if (CompiledMapIterationContext.this.segmentHeader().tryUpgradeUpdateToWriteLock(CompiledMapIterationContext.this.segmentHeaderAddress())) {
@@ -1289,9 +1289,9 @@ public boolean tryLock() {
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.WRITE_LOCKED);
return true;
}
- case WRITE_LOCKED :
+ case WRITE_LOCKED:
return true;
- default :
+ default:
throw new IllegalStateException((((CompiledMapIterationContext.this.h().toIdentityString()) + ": unexpected localLockState=") + (CompiledMapIterationContext.this.localLockState())));
}
}
@@ -1304,7 +1304,7 @@ public boolean tryLock(long time, @NotNull
throw new InterruptedException();
switch (CompiledMapIterationContext.this.localLockState()) {
- case UNLOCKED :
+ case UNLOCKED:
CompiledMapIterationContext.this.checkIterationContextNotLockedInThisThread();
if (CompiledMapIterationContext.this.writeZeroGuarded()) {
if (!(CompiledMapIterationContext.this.updateZeroGuarded())) {
@@ -1332,9 +1332,9 @@ public boolean tryLock(long time, @NotNull
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.WRITE_LOCKED);
return true;
}
- case READ_LOCKED :
+ case READ_LOCKED:
throw forbiddenUpgrade();
- case UPDATE_LOCKED :
+ case UPDATE_LOCKED:
if (CompiledMapIterationContext.this.writeZeroGuarded()) {
assert !(CompiledMapIterationContext.this.updateZeroGuarded());
if (CompiledMapIterationContext.this.segmentHeader().tryUpgradeUpdateToWriteLock(CompiledMapIterationContext.this.segmentHeaderAddress(), time, unit)) {
@@ -1351,9 +1351,9 @@ public boolean tryLock(long time, @NotNull
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.WRITE_LOCKED);
return true;
}
- case WRITE_LOCKED :
+ case WRITE_LOCKED:
return true;
- default :
+ default:
throw new IllegalStateException((((CompiledMapIterationContext.this.h().toIdentityString()) + ": unexpected localLockState=") + (CompiledMapIterationContext.this.localLockState())));
}
}
@@ -1365,7 +1365,7 @@ public void lockInterruptibly() throws InterruptedException {
throw new InterruptedException();
switch (CompiledMapIterationContext.this.localLockState()) {
- case UNLOCKED :
+ case UNLOCKED:
CompiledMapIterationContext.this.checkIterationContextNotLockedInThisThread();
if (CompiledMapIterationContext.this.writeZeroGuarded()) {
if (!(CompiledMapIterationContext.this.updateZeroGuarded())) {
@@ -1380,13 +1380,13 @@ public void lockInterruptibly() throws InterruptedException {
throw CompiledMapIterationContext.this.debugContextsAndLocksGuarded(e);
}
}
- }
+ }
CompiledMapIterationContext.this.incrementWriteGuarded();
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.WRITE_LOCKED);
- return ;
- case READ_LOCKED :
+ return;
+ case READ_LOCKED:
throw forbiddenUpgrade();
- case UPDATE_LOCKED :
+ case UPDATE_LOCKED:
if (CompiledMapIterationContext.this.writeZeroGuarded()) {
assert !(CompiledMapIterationContext.this.updateZeroGuarded());
try {
@@ -1394,12 +1394,12 @@ public void lockInterruptibly() throws InterruptedException {
} catch (InterProcessDeadLockException e) {
throw CompiledMapIterationContext.this.debugContextsAndLocksGuarded(e);
}
- }
+ }
CompiledMapIterationContext.this.decrementUpdateGuarded();
CompiledMapIterationContext.this.incrementWriteGuarded();
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.WRITE_LOCKED);
break;
- case WRITE_LOCKED :
+ case WRITE_LOCKED:
break;
}
}
@@ -1408,11 +1408,11 @@ public void lockInterruptibly() throws InterruptedException {
public void unlock() {
CompiledMapIterationContext.this.checkOnEachLockOperation();
switch (CompiledMapIterationContext.this.localLockState()) {
- case UNLOCKED :
- case READ_LOCKED :
- case UPDATE_LOCKED :
- return ;
- case WRITE_LOCKED :
+ case UNLOCKED:
+ case READ_LOCKED:
+ case UPDATE_LOCKED:
+ return;
+ case WRITE_LOCKED:
CompiledMapIterationContext.this.closeDelayedUpdateChecksum();
if ((CompiledMapIterationContext.this.decrementWriteGuarded()) == 0)
CompiledMapIterationContext.this.segmentHeader().downgradeWriteToUpdateLock(CompiledMapIterationContext.this.segmentHeaderAddress());
@@ -1426,7 +1426,7 @@ public void unlock() {
public void lock() {
CompiledMapIterationContext.this.checkOnEachLockOperation();
switch (CompiledMapIterationContext.this.localLockState()) {
- case UNLOCKED :
+ case UNLOCKED:
CompiledMapIterationContext.this.checkIterationContextNotLockedInThisThread();
if (CompiledMapIterationContext.this.writeZeroGuarded()) {
if (!(CompiledMapIterationContext.this.updateZeroGuarded())) {
@@ -1441,13 +1441,13 @@ public void lock() {
throw CompiledMapIterationContext.this.debugContextsAndLocksGuarded(e);
}
}
- }
+ }
CompiledMapIterationContext.this.incrementWriteGuarded();
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.WRITE_LOCKED);
- return ;
- case READ_LOCKED :
+ return;
+ case READ_LOCKED:
throw forbiddenUpgrade();
- case UPDATE_LOCKED :
+ case UPDATE_LOCKED:
if (CompiledMapIterationContext.this.writeZeroGuarded()) {
assert !(CompiledMapIterationContext.this.updateZeroGuarded());
try {
@@ -1455,12 +1455,12 @@ public void lock() {
} catch (InterProcessDeadLockException e) {
throw CompiledMapIterationContext.this.debugContextsAndLocksGuarded(e);
}
- }
+ }
CompiledMapIterationContext.this.decrementUpdateGuarded();
CompiledMapIterationContext.this.incrementWriteGuarded();
CompiledMapIterationContext.this.setLocalLockStateGuarded(LocalLockState.WRITE_LOCKED);
break;
- case WRITE_LOCKED :
+ case WRITE_LOCKED:
break;
}
}
@@ -1515,7 +1515,8 @@ public int decrementWrite() {
}
public enum SearchState {
-PRESENT, ABSENT; }
+ PRESENT, ABSENT;
+ }
private long _HashEntryStages_entryEnd() {
return keyEnd();
@@ -1525,7 +1526,7 @@ public long allocReturnCode(int chunks) {
VanillaChronicleHash h = this.h();
if (chunks > (h.maxChunksPerEntry)) {
throw new IllegalArgumentException(((((((this.h().toIdentityString()) + ": Entry is too large: requires ") + chunks) + " chunks, ") + (h.maxChunksPerEntry)) + " is maximum."));
- }
+ }
long lowestPossiblyFreeChunk = lowestPossiblyFreeChunk();
if ((lowestPossiblyFreeChunk + chunks) > (h.actualChunksPerSegmentTier))
return -1;
@@ -1539,13 +1540,13 @@ public long allocReturnCode(int chunks) {
if ((ret + chunks) > (h.actualChunksPerSegmentTier)) {
assert ret != (BitSetFrame.NOT_FOUND);
freeList.clearRange(ret, (ret + chunks));
- }
+ }
return -1;
} else {
tierEntries(((tierEntries()) + 1));
if ((chunks == 1) || (freeList.isSet(lowestPossiblyFreeChunk))) {
lowestPossiblyFreeChunk((ret + chunks));
- }
+ }
return ret;
}
}
@@ -1560,11 +1561,11 @@ private void _HashSegmentIteration_hookAfterEachIteration() {
private void _SegmentStages_checkNestedContextsQueryDifferentKeys(LocksInterface innermostContextOnThisSegment) {
if ((innermostContextOnThisSegment.getClass()) == (getClass())) {
- Data key = ((CompiledMapIterationContext)(innermostContextOnThisSegment)).inputKey();
- if (java.util.Objects.equals(key, ((CompiledMapIterationContext)((Object)(this))).inputKey())) {
+ Data key = ((CompiledMapIterationContext) (innermostContextOnThisSegment)).inputKey();
+ if (java.util.Objects.equals(key, ((CompiledMapIterationContext) ((Object) (this))).inputKey())) {
throw new IllegalStateException((((this.h().toIdentityString()) + ": Nested same-thread contexts cannot access the same key ") + key));
- }
- }
+ }
+ }
}
private void _SegmentStages_nextTier() {
@@ -1619,16 +1620,16 @@ public void incrementWrite() {
public void readUnlockAndDecrementCount() {
switch (localLockState) {
- case UNLOCKED :
- return ;
- case READ_LOCKED :
+ case UNLOCKED:
+ return;
+ case READ_LOCKED:
if ((decrementRead()) == 0) {
if ((updateZero()) && (writeZero()))
segmentHeader().readUnlock(segmentHeaderAddress());
- }
- return ;
- case UPDATE_LOCKED :
+ }
+ return;
+ case UPDATE_LOCKED:
if ((decrementUpdate()) == 0) {
if (writeZero()) {
if (readZero()) {
@@ -1636,10 +1637,10 @@ public void readUnlockAndDecrementCount() {
} else {
segmentHeader().downgradeUpdateToReadLock(segmentHeaderAddress());
}
- }
- }
- return ;
- case WRITE_LOCKED :
+ }
+ }
+ return;
+ case WRITE_LOCKED:
if ((decrementWrite()) == 0) {
if (!(updateZero())) {
segmentHeader().downgradeWriteToUpdateLock(segmentHeaderAddress());
@@ -1650,7 +1651,7 @@ public void readUnlockAndDecrementCount() {
segmentHeader().writeUnlock(segmentHeaderAddress());
}
}
- }
+ }
}
}
@@ -1667,7 +1668,7 @@ public void setLocalLockState(LocalLockState newState) {
} else if (goingToLock) {
registerIterationContextLockedInThisThread();
- }
+ }
localLockState = newState;
}
@@ -1709,15 +1710,15 @@ private void closeNestedLocks() {
private void closeRootLocks() {
verifyInnermostContext();
switch (localLockState) {
- case UNLOCKED :
- return ;
- case READ_LOCKED :
+ case UNLOCKED:
+ return;
+ case READ_LOCKED:
segmentHeader().readUnlock(segmentHeaderAddress());
- return ;
- case UPDATE_LOCKED :
+ return;
+ case UPDATE_LOCKED:
segmentHeader().updateUnlock(segmentHeaderAddress());
- return ;
- case WRITE_LOCKED :
+ return;
+ case WRITE_LOCKED:
segmentHeader().writeUnlock(segmentHeaderAddress());
}
}
@@ -1750,7 +1751,7 @@ private void unlinkFromSegmentContextsChain() {
private void verifyInnermostContext() {
if ((nextNode) != null) {
throw new IllegalStateException(((this.h().toIdentityString()) + ": Attempt to close contexts not structurally"));
- }
+ }
}
public boolean readZero() {
@@ -1801,15 +1802,15 @@ public UpdateLock innerUpdateLock() {
public RuntimeException debugContextsAndLocks(InterProcessDeadLockException e) {
String message = (this.h().toIdentityString()) + ":\n";
message += "Contexts locked on this segment:\n";
- for (LocksInterface cxt = rootContextLockedOnThisSegment ; cxt != null ; cxt = cxt.nextNode()) {
+ for (LocksInterface cxt = rootContextLockedOnThisSegment; cxt != null; cxt = cxt.nextNode()) {
message += (cxt.debugLocksState()) + "\n";
}
message += "Current thread contexts:\n";
- for (int i = 0, size = this.contextChain.size() ; i < size ; i++) {
+ for (int i = 0, size = this.contextChain.size(); i < size; i++) {
LocksInterface cxt = this.contextAtIndexInChain(i);
message += (cxt.debugLocksState()) + "\n";
}
- throw new InterProcessDeadLockException(message , e);
+ throw new InterProcessDeadLockException(message, e);
}
final WrappedValueBytesData wrappedValueBytesData;
@@ -1877,13 +1878,13 @@ public List contextChain() {
return this.contextChain;
}
- private static T initUsedAndReturn(VanillaChronicleMap map, ChainingInterface context) {
+ private static T initUsedAndReturn(VanillaChronicleMap map, ChainingInterface context) {
try {
context.initUsed(true, map);
- return ((T)(context));
+ return ((T) (context));
} catch (Throwable throwable) {
try {
- ((AutoCloseable)(context)).close();
+ ((AutoCloseable) (context)).close();
} catch (Throwable t) {
throwable.addSuppressed(t);
}
@@ -1913,15 +1914,15 @@ public void closeMapEntryStagesReadFoundEntryDependants() {
private void deregisterIterationContextLockedInThisThread() {
if ((this) instanceof IterationContext) {
this.rootContextInThisThread.iterationContextLockedInThisThread = false;
- }
+ }
}
public void closeIterationSegmentStagesDeregisterIterationContextLockedInThisThreadDependants() {
this.closeLocks();
}
- public T contextAtIndexInChain(int index) {
- return ((T)(contextChain.get(index)));
+ public T contextAtIndexInChain(int index) {
+ return ((T) (contextChain.get(index)));
}
public void closeVanillaChronicleMapHolderImplContextAtIndexInChainDependants() {
@@ -1938,17 +1939,17 @@ public Object entryForIteration() {
}
@Override
- public T getContext(Class extends T> contextClass, BiFunction createChaining, VanillaChronicleMap map) {
- for (int i = 0 ; i < (contextChain.size()) ; i++) {
+ public T getContext(Class extends T> contextClass, BiFunction createChaining, VanillaChronicleMap map) {
+ for (int i = 0; i < (contextChain.size()); i++) {
ChainingInterface context = contextChain.get(i);
if (((context.getClass()) == contextClass) && (!(context.usedInit()))) {
return CompiledMapIterationContext.initUsedAndReturn(map, context);
- }
+ }
}
int maxNestedContexts = 1 << 10;
if ((contextChain.size()) > maxNestedContexts) {
throw new IllegalStateException(((((((((map.toIdentityString()) + ": More than ") + maxNestedContexts) + " nested ChronicleHash contexts\n") + "are not supported. Very probable that you simply forgot to close context\n") + "somewhere (recommended to use try-with-resources statement).\n") + "Otherwise this is a bug, please report with this\n") + "stack trace on https://github.com/OpenHFT/Chronicle-Map/issues"));
- }
+ }
T context = createChaining.apply(this, map);
return CompiledMapIterationContext.initUsedAndReturn(map, context);
}
@@ -1977,7 +1978,7 @@ public long newEntrySize(Data newValue, long entryStartOffset, long newValueO
private void registerIterationContextLockedInThisThread() {
if ((this) instanceof IterationContext) {
this.rootContextInThisThread.iterationContextLockedInThisThread = true;
- }
+ }
}
public void closeIterationSegmentStagesRegisterIterationContextLockedInThisThreadDependants() {
@@ -2014,7 +2015,7 @@ public long keySize() {
public void closeKeySize() {
if (!(this.keySizeInit()))
- return ;
+ return;
this.closeKeySizeDependants();
this.keySize = -1;
@@ -2068,7 +2069,7 @@ public int segmentIndex() {
public void closeSegmentIndex() {
if (!(this.segmentIndexInit()))
- return ;
+ return;
this.closeSegmentIndexDependants();
this.segmentIndex = -1;
@@ -2114,7 +2115,7 @@ public Data inputKey() {
public void closeInputKey() {
if (!(this.inputKeyInit()))
- return ;
+ return;
this.closeInputKeyDependants();
this.inputKey = null;
@@ -2146,7 +2147,7 @@ public long pos() {
public void closePos() {
if (!(this.posInit()))
- return ;
+ return;
this.closePosDependants();
this.pos = -1;
@@ -2177,7 +2178,7 @@ public long keyOffset() {
public void closeKeyOffset() {
if (!(this.keyOffsetInit()))
- return ;
+ return;
this.closeKeyOffsetDependants();
this.keyOffset = -1;
@@ -2231,7 +2232,7 @@ public long valueSizeOffset() {
public void closeValueSizeOffset() {
if (!(this.valueSizeOffsetInit()))
- return ;
+ return;
this.closeValueSizeOffsetDependants();
this.valueSizeOffset = -1;
@@ -2281,7 +2282,7 @@ public VanillaChronicleMap m() {
public void closeMap() {
if (!(this.mapInit()))
- return ;
+ return;
this.closeMapDependants();
this.m = null;
@@ -2361,7 +2362,7 @@ public void closeIterationSegmentStagesCheckNestedContextsQueryDifferentKeysDepe
public void checkIterationContextNotLockedInThisThread() {
if (this.rootContextInThisThread.iterationContextLockedInThisThread) {
throw new IllegalStateException((((this.h().toIdentityString()) + ": Update or Write ") + "locking is forbidden in the context of locked iteration context"));
- }
+ }
}
public CompactOffHeapLinearHashTable hl() {
@@ -2403,7 +2404,7 @@ public long searchStartPos() {
public void closeSearchKey() {
if (!(this.searchKeyInit()))
- return ;
+ return;
this.closeSearchKeyDependants();
this.searchKey = CompactOffHeapLinearHashTable.UNSET_KEY;
@@ -2447,7 +2448,7 @@ public SegmentHeader segmentHeader() {
public void closeSegmentHeader() {
if (!(this.segmentHeaderInit()))
- return ;
+ return;
this.closeSegmentHeaderDependants();
this.segmentHeader = null;
@@ -2467,7 +2468,7 @@ private void resetSegmentLock(ChronicleHashCorruption.Listener corruptionListene
if (lockState != (this.segmentHeader().resetLockState())) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, this.segmentIndex(), () -> ChronicleHashCorruptionImpl.format("lock of segment {} is not clear: {}", this.segmentIndex(), this.segmentHeader().lockStateToString(lockState)));
this.segmentHeader().resetLock(this.segmentHeaderAddress());
- }
+ }
}
public int tier = -1;
@@ -2544,7 +2545,7 @@ public long tierIndex() {
public void closeSegmentTier() {
if (!(this.segmentTierInit()))
- return ;
+ return;
this.closeSegmentTierDependants();
this.tier = -1;
@@ -2581,15 +2582,15 @@ private void shiftHashLookupEntries() {
if ((hl.remove(hlAddr, hlPos)) != hlPos) {
hlPos = hl.stepBack(hlPos);
steps--;
- }
+ }
break;
- }
+ }
hlHolePos = hl.step(hlHolePos);
}
- }
+ }
hlPos = hl.step(hlPos);
steps++;
- } while ((hlPos != 0) || (steps == 0) );
+ } while ((hlPos != 0) || (steps == 0));
}
long keyHash = 0;
@@ -2617,7 +2618,7 @@ public long keyHash() {
public void closeKeyHash() {
if (!(this.keyHashInit()))
- return ;
+ return;
this.closeKeyHashDependants();
this.keyHash = 0;
@@ -2654,7 +2655,7 @@ public long prevTierIndex() {
public void prevTier() {
if ((tier()) == 0) {
throw new IllegalStateException(((this.h().toIdentityString()) + ": first tier doesn\'t have previous"));
- }
+ }
initSegmentTier(((tier()) - 1), prevTierIndex());
}
@@ -2693,16 +2694,16 @@ private void zeroOutFirstSegmentTierCountersArea(ChronicleHashCorruption.Listene
if ((this.prevTierIndex()) != 0) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, this.segmentIndex(), () -> ChronicleHashCorruptionImpl.format("stored prev tier index in first tier of segment {}: {}, should be 0", this.segmentIndex(), this.prevTierIndex()));
this.prevTierIndex(0);
- }
+ }
long tierCountersAreaAddr = this.tierCountersAreaAddr();
if ((TierCountersArea.segmentIndex(tierCountersAreaAddr)) != 0) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, this.segmentIndex(), () -> ChronicleHashCorruptionImpl.format("stored segment index in first tier of segment {}: {}, should be 0", this.segmentIndex(), TierCountersArea.segmentIndex(tierCountersAreaAddr)));
TierCountersArea.segmentIndex(tierCountersAreaAddr, 0);
- }
+ }
if ((TierCountersArea.tier(tierCountersAreaAddr)) != 0) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, this.segmentIndex(), () -> ChronicleHashCorruptionImpl.format("stored tier in first tier of segment {}: {}, should be 0", this.segmentIndex(), TierCountersArea.tier(tierCountersAreaAddr)));
TierCountersArea.tier(tierCountersAreaAddr, 0);
- }
+ }
}
public long tierEntries() {
@@ -2750,7 +2751,7 @@ private void recoverTierEntriesCounter(long entries, ChronicleHashCorruption.Lis
if ((this.tierEntries()) != entries) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, this.segmentIndex(), () -> ChronicleHashCorruptionImpl.format((("Wrong number of entries counter for tier with index {}, " + "stored: {}, should be: ") + (this.tierIndex())), this.tierEntries(), entries));
this.tierEntries(entries);
- }
+ }
}
private long addr() {
@@ -2831,7 +2832,7 @@ public PointerBytesStore segmentBS() {
void closeSegment() {
if (!(this.segmentInit()))
- return ;
+ return;
this.closeSegmentDependants();
entrySpaceOffset = 0;
@@ -2901,7 +2902,7 @@ public long valueSize() {
public void closeValueSize() {
if (!(this.valueSizeInit()))
- return ;
+ return;
this.closeValueSizeDependants();
this.valueSize = -1;
@@ -2972,7 +2973,7 @@ public long keySizeOffset() {
public void closeEntryOffset() {
if (!(this.entryOffsetInit()))
- return ;
+ return;
this.closeEntryOffsetDependants();
this.keySizeOffset = -1;
@@ -3061,7 +3062,7 @@ private void recoverLowestPossibleFreeChunkTiered(ChronicleHashCorruption.Listen
long finalLowestFreeChunk = lowestFreeChunk;
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, this.segmentIndex(), () -> ChronicleHashCorruptionImpl.format(("wrong lowest free chunk for tier with index {}, " + "stored: {}, should be: {}"), this.tierIndex(), this.lowestPossiblyFreeChunk(), finalLowestFreeChunk));
this.lowestPossiblyFreeChunk(lowestFreeChunk);
- }
+ }
}
public void nextTier() {
@@ -3122,13 +3123,13 @@ public void verifyTierCountersAreaData() {
int tierSegmentIndex = TierCountersArea.segmentIndex(tierCountersAreaAddr());
if (tierSegmentIndex != (segmentIndex())) {
throw new AssertionError(((((((((("segmentIndex: " + (segmentIndex())) + ", tier: ") + (tier())) + ", tierIndex: ") + (tierIndex())) + ", tierBaseAddr: ") + (tierBaseAddr())) + " reports it belongs to segmentIndex ") + tierSegmentIndex));
- }
+ }
if (hasNextTier()) {
long currentTierIndex = this.tierIndex();
nextTier();
if ((prevTierIndex()) != currentTierIndex) {
throw new AssertionError(((((((((((("segmentIndex: " + (segmentIndex())) + ", tier: ") + (tier())) + ", tierIndex: ") + (tierIndex())) + ", tierBaseAddr: ") + (tierBaseAddr())) + " reports the previous tierIndex is ") + (prevTierIndex())) + " while actually it is ") + currentTierIndex));
- }
+ }
} else {
break;
}
@@ -3138,7 +3139,7 @@ public void verifyTierCountersAreaData() {
public void checkAccessingFromOwnerThread() {
if ((owner) != (Thread.currentThread())) {
throw new ConcurrentModificationException(((this.h().toIdentityString()) + ": Context shouldn\'t be accessed from multiple threads"));
- }
+ }
}
public void closeOwnerThreadHolderCheckAccessingFromOwnerThreadDependants() {
@@ -3158,7 +3159,7 @@ public void checkEntryNotRemovedOnThisIteration() {
throwExceptionIfClosed();
if (entryRemovedOnThisIterationInit()) {
throw new IllegalStateException(((this.h().toIdentityString()) + ": Entry was already removed on this iteration"));
- }
+ }
}
public void closeMapSegmentIterationCheckEntryNotRemovedOnThisIterationDependants() {
@@ -3177,7 +3178,7 @@ public void closeIterationCheckOnEachPublicOperationCheckOnEachPublicOperationDe
@Override
public R replaceValue(@NotNull
- MapEntry entry, Data newValue) {
+ MapEntry entry, Data newValue) {
this.checkOnEachPublicOperation();
return this.m().entryOperations.replaceValue(entry, newValue);
}
@@ -3200,7 +3201,7 @@ public Data wrapValueAsData(V value) {
@Override
public Data defaultValue(@NotNull
- MapAbsentEntry absentEntry) {
+ MapAbsentEntry absentEntry) {
this.checkOnEachPublicOperation();
return this.m().defaultValueProvider.defaultValue(absentEntry);
}
@@ -3224,7 +3225,7 @@ public Data wrapValueBytesAsData(BytesStore, ?> bytesStore, long offset, lo
@Override
public R remove(@NotNull
- MapEntry entry) {
+ MapEntry entry) {
this.checkOnEachPublicOperation();
return this.m().entryOperations.remove(entry);
}
@@ -3241,7 +3242,7 @@ private int checkEntry(long searchKey, long entryPos, int segmentIndex, Chronicl
if ((entryPos < 0) || (entryPos >= (h.actualChunksPerSegmentTier))) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, segmentIndex, () -> ChronicleHashCorruptionImpl.format("Entry pos is out of range: {}, should be 0-{}", entryPos, ((h.actualChunksPerSegmentTier) - 1)));
return -1;
- }
+ }
try {
this.readExistingEntry(entryPos);
} catch (Exception e) {
@@ -3251,25 +3252,25 @@ private int checkEntry(long searchKey, long entryPos, int segmentIndex, Chronicl
if ((this.keyEnd()) > (this.segmentBytes().capacity())) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, segmentIndex, () -> ChronicleHashCorruptionImpl.format(("Wrong key size: " + (this.keySize()))));
return -1;
- }
+ }
long keyHashCode = this.keyHashCode();
int segmentIndexFromKey = h.hashSplitting.segmentIndex(keyHashCode);
if ((segmentIndexFromKey < 0) || (segmentIndexFromKey >= (h.actualSegments))) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, segmentIndex, () -> ChronicleHashCorruptionImpl.format(("Segment index from the entry key hash code is out of range: {}, " + "should be 0-{}, entry key: {}"), segmentIndexFromKey, ((h.actualSegments) - 1), this.key()));
return -1;
- }
+ }
long segmentHashFromKey = h.hashSplitting.segmentHash(keyHashCode);
long searchKeyFromKey = h.hashLookup.maskUnsetKey(segmentHashFromKey);
if (searchKey != searchKeyFromKey) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, segmentIndex, () -> ChronicleHashCorruptionImpl.format(("HashLookup searchKey: {}, HashLookup searchKey " + "from the entry key hash code: {}, entry key: {}, entry pos: {}"), searchKey, searchKeyFromKey, this.key(), entryPos));
return -1;
- }
+ }
try {
long entryAndChecksumEnd = (this.entryEnd()) + (this.checksumStrategy.extraEntryBytes());
if (entryAndChecksumEnd > (this.segmentBytes().capacity())) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, segmentIndex, () -> ChronicleHashCorruptionImpl.format(("Wrong value size: {}, key: " + (this.valueSize())), this.key()));
return -1;
- }
+ }
} catch (Exception ex) {
ChronicleHashCorruptionImpl.reportException(corruptionListener, corruption, segmentIndex, () -> "Exception while reading entry value size, key: " + (this.key()), ex);
return -1;
@@ -3279,11 +3280,11 @@ private int checkEntry(long searchKey, long entryPos, int segmentIndex, Chronicl
if (storedChecksum != checksumFromEntry) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, segmentIndex, () -> ChronicleHashCorruptionImpl.format(("Checksum doesn\'t match, stored: {}, should be from " + "the entry bytes: {}, key: {}, value: {}"), storedChecksum, checksumFromEntry, this.key(), this.value()));
return -1;
- }
+ }
if (!(this.freeList().isRangeClear(entryPos, (entryPos + (this.entrySizeInChunks()))))) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, segmentIndex, () -> ChronicleHashCorruptionImpl.format("Overlapping entry: positions {}-{}, key: {}, value: {}", entryPos, ((entryPos + (this.entrySizeInChunks())) - 1), this.key(), this.value()));
return -1;
- }
+ }
if (segmentIndex < 0) {
return segmentIndexFromKey;
} else {
@@ -3307,7 +3308,8 @@ public void removeDuplicatesInSegment(ChronicleHashCorruption.Listener corruptio
long hlPos = startHlPos;
int steps = 0;
long entries = 0;
- tierIteration : do {
+ tierIteration:
+ do {
hlPos = hashLookup.step(hlPos);
steps++;
long entry = hashLookup.readEntry(currentTierBaseAddr, hlPos);
@@ -3316,28 +3318,28 @@ public void removeDuplicatesInSegment(ChronicleHashCorruption.Listener corruptio
Data key = this.key();
try (ExternalMapQueryContext, ?, ?> c = m.queryContext(key)) {
MapEntry, ?> entry2 = c.entry();
- Data> key2 = ((MapEntry)(c)).key();
+ Data> key2 = ((MapEntry) (c)).key();
long keyAddress = key.bytes().addressForRead(key.offset());
long key2Address = key2.bytes().addressForRead(key2.offset());
if (key2Address != keyAddress) {
- ChronicleHashCorruptionImpl.report(corruptionListener, corruption, this.segmentIndex(), () -> ChronicleHashCorruptionImpl.format(("entries with duplicate key {} in segment {}: " + "with values {} and {}, removing the latter"), key, c.segmentIndex(), (entry2 != null ? ((MapEntry)(c)).value() : ""), (!(this.entryDeleted()) ? this.value() : "")));
+ ChronicleHashCorruptionImpl.report(corruptionListener, corruption, this.segmentIndex(), () -> ChronicleHashCorruptionImpl.format(("entries with duplicate key {} in segment {}: " + "with values {} and {}, removing the latter"), key, c.segmentIndex(), (entry2 != null ? ((MapEntry) (c)).value() : ""), (!(this.entryDeleted()) ? this.value() : "")));
if ((hashLookup.remove(currentTierBaseAddr, hlPos)) != hlPos) {
hlPos = hashLookup.stepBack(hlPos);
steps--;
- }
+ }
continue tierIteration;
- }
+ }
}
entries++;
- }
- } while ((hlPos != startHlPos) || (steps == 0) );
+ }
+ } while ((hlPos != startHlPos) || (steps == 0));
recoverTierEntriesCounter(entries, corruptionListener, corruption);
recoverLowestPossibleFreeChunkTiered(corruptionListener, corruption);
}
private void removeDuplicatesInSegments(ChronicleHashCorruption.Listener corruptionListener, ChronicleHashCorruptionImpl corruption) {
VanillaChronicleHash h = this.h();
- for (int segmentIndex = 0 ; segmentIndex < (h.actualSegments) ; segmentIndex++) {
+ for (int segmentIndex = 0; segmentIndex < (h.actualSegments); segmentIndex++) {
this.initSegmentIndex(segmentIndex);
this.initSegmentTier();
this.goToLastTier();
@@ -3361,12 +3363,13 @@ public int recoverTier(int segmentIndex, ChronicleHashCorruption.Listener corrup
long hlPos = 0;
do {
long hlEntry = hl.readEntry(hlAddr, hlPos);
- nextHlPos : if (!(hl.empty(hlEntry))) {
+ nextHlPos:
+ if (!(hl.empty(hlEntry))) {
hl.clearEntry(hlAddr, hlPos);
if (validEntries >= (h.maxEntriesPerHashLookup)) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, segmentIndex, () -> ChronicleHashCorruptionImpl.format("Too many entries in tier with index {}, max is {}", this.tierIndex(), h.maxEntriesPerHashLookup));
break nextHlPos;
- }
+ }
long searchKey = hl.key(hlEntry);
long entryPos = hl.value(hlEntry);
int si = checkEntry(searchKey, entryPos, segmentIndex, corruptionListener, corruption);
@@ -3384,16 +3387,17 @@ public int recoverTier(int segmentIndex, ChronicleHashCorruption.Listener corrup
hl.writeEntry(hlAddr, insertPos, hl.entry(searchKey, entryPos));
validEntries++;
break nextHlPos;
- }
+ }
if (insertPos == hlPos) {
throw new ChronicleHashRecoveryFailedException((("Concurrent modification of " + (h.toIdentityString())) + " while recovery procedure is in progress"));
- }
- checkDuplicateKeys : if ((hl.key(hlInsertEntry)) == searchKey) {
+ }
+ checkDuplicateKeys:
+ if ((hl.key(hlInsertEntry)) == searchKey) {
long anotherEntryPos = hl.value(hlInsertEntry);
if (anotherEntryPos == entryPos) {
validEntries++;
break nextHlPos;
- }
+ }
long currentKeyOffset = this.keyOffset();
long currentKeySize = this.keySize();
int currentEntrySizeInChunks = this.entrySizeInChunks();
@@ -3401,19 +3405,19 @@ public int recoverTier(int segmentIndex, ChronicleHashCorruption.Listener corrup
this.readExistingEntry(anotherEntryPos);
} else if ((checkEntry(searchKey, anotherEntryPos, segmentIndex, corruptionListener, corruption)) < 0) {
break checkDuplicateKeys;
- }
+ }
if (((this.keySize()) == currentKeySize) && (BytesUtil.bytesEqual(this.segmentBS(), currentKeyOffset, this.segmentBS(), this.keyOffset(), currentKeySize))) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, segmentIndex, () -> ChronicleHashCorruptionImpl.format(("Entries with duplicate keys within a tier: " + "at pos {} and {} with key {}, first value is {}"), entryPos, anotherEntryPos, this.key(), this.value()));
this.freeList().clearRange(entryPos, (entryPos + currentEntrySizeInChunks));
break nextHlPos;
- }
- }
+ }
+ }
insertPos = hl.step(insertPos);
- } while (insertPos != startInsertPos );
+ } while (insertPos != startInsertPos);
throw new ChronicleHashRecoveryFailedException(((("HashLookup overflow should never occur. " + "It might also be concurrent access to ") + (h.toIdentityString())) + " while recovery procedure is in progress"));
- }
+ }
hlPos = hl.step(hlPos);
- } while (hlPos != 0 );
+ } while (hlPos != 0);
shiftHashLookupEntries();
return segmentIndex;
}
@@ -3422,7 +3426,7 @@ public int recoverTier(int segmentIndex, ChronicleHashCorruption.Listener corrup
public void recoverSegments(ChronicleHashCorruption.Listener corruptionListener, ChronicleHashCorruptionImpl corruption) {
throwExceptionIfClosed();
VanillaChronicleHash h = this.h();
- for (int segmentIndex = 0 ; segmentIndex < (h.actualSegments) ; segmentIndex++) {
+ for (int segmentIndex = 0; segmentIndex < (h.actualSegments); segmentIndex++) {
this.initSegmentIndex(segmentIndex);
resetSegmentLock(corruptionListener, corruption);
zeroOutFirstSegmentTierCountersArea(corruptionListener, corruption);
@@ -3434,7 +3438,7 @@ public void recoverSegments(ChronicleHashCorruption.Listener corruptionListener,
long expectedExtraTiersInUse = Math.max(0, Math.min(storedExtraTiersInUse, allocatedExtraTiers));
long actualExtraTiersInUse = 0;
long firstFreeExtraTierIndex = -1;
- for (long extraTierIndex = 0 ; extraTierIndex < expectedExtraTiersInUse ; extraTierIndex++) {
+ for (long extraTierIndex = 0; extraTierIndex < expectedExtraTiersInUse; extraTierIndex++) {
long tierIndex = h.extraTierIndexToTierIndex(extraTierIndex);
this.initSegmentTier(0, tierIndex);
int segmentIndex = this.recoverTier(-1, corruptionListener, corruption);
@@ -3444,7 +3448,7 @@ public void recoverSegments(ChronicleHashCorruption.Listener corruptionListener,
if (storedSegmentIndex != segmentIndex) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, segmentIndex, () -> ChronicleHashCorruptionImpl.format(("wrong segment index stored in tier counters area " + "of tier with index {}: {}, should be, based on entries: {}"), tierIndex, storedSegmentIndex, segmentIndex));
TierCountersArea.segmentIndex(tierCountersAreaAddr, segmentIndex);
- }
+ }
TierCountersArea.nextTierIndex(tierCountersAreaAddr, 0);
this.initSegmentIndex(segmentIndex);
this.goToLastTier();
@@ -3461,7 +3465,7 @@ public void recoverSegments(ChronicleHashCorruption.Listener corruptionListener,
long finalActualExtraTiersInUse = actualExtraTiersInUse;
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, -1, () -> ChronicleHashCorruptionImpl.format((("wrong number of actual tiers in use in global mutable state, stored: {}, " + "should be: ") + storedExtraTiersInUse), finalActualExtraTiersInUse));
globalMutableState.setExtraTiersInUse(actualExtraTiersInUse);
- }
+ }
long firstFreeTierIndex;
if (firstFreeExtraTierIndex == (-1)) {
if (allocatedExtraTiers > expectedExtraTiersInUse) {
@@ -3475,12 +3479,12 @@ public void recoverSegments(ChronicleHashCorruption.Listener corruptionListener,
if (firstFreeTierIndex > 0) {
long lastTierIndex = h.extraTierIndexToTierIndex((allocatedExtraTiers - 1));
h.linkAndZeroOutFreeTiers(firstFreeTierIndex, lastTierIndex);
- }
+ }
long storedFirstFreeTierIndex = globalMutableState.getFirstFreeTierIndex();
if (storedFirstFreeTierIndex != firstFreeTierIndex) {
ChronicleHashCorruptionImpl.report(corruptionListener, corruption, -1, () -> ChronicleHashCorruptionImpl.format((("wrong first free tier index in global mutable state, stored: {}, " + "should be: ") + storedFirstFreeTierIndex), firstFreeTierIndex));
globalMutableState.setFirstFreeTierIndex(firstFreeTierIndex);
- }
+ }
removeDuplicatesInSegments(corruptionListener, corruption);
}
@@ -3500,7 +3504,7 @@ public InterProcessLock updateLock() {
@Override
public R insert(@NotNull
- MapAbsentEntry absentEntry, Data value) {
+ MapAbsentEntry absentEntry, Data value) {
this.checkOnEachPublicOperation();
return this.m().entryOperations.insert(absentEntry, value);
}
@@ -3534,7 +3538,7 @@ public boolean used() {
@SuppressWarnings(value = "unused")
void closeUsed() {
if (!(this.usedInit()))
- return ;
+ return;
this.closeUsedDependants();
used = false;
@@ -3577,14 +3581,14 @@ void initLocks() {
localLockState = LocalLockState.UNLOCKED;
int indexOfThisContext = this.indexInContextChain;
- for (int i = indexOfThisContext - 1 ; i >= 0 ; i--) {
+ for (int i = indexOfThisContext - 1; i >= 0; i--) {
if (tryFindInitLocksOfThisSegment(i))
- return ;
+ return;
}
- for (int i = indexOfThisContext + 1, size = this.contextChain.size() ; i < size ; i++) {
+ for (int i = indexOfThisContext + 1, size = this.contextChain.size(); i < size; i++) {
if (tryFindInitLocksOfThisSegment(i))
- return ;
+ return;
}
rootContextLockedOnThisSegment = this;
@@ -3650,7 +3654,7 @@ public LocksInterface rootContextLockedOnThisSegment() {
void closeLocks() {
if (!(this.locksInit()))
- return ;
+ return;
this.closeLocksDependants();
if ((rootContextLockedOnThisSegment) == (this)) {
@@ -3703,7 +3707,7 @@ public long hashLookupPos() {
public void closeHashLookupPos() {
if (!(this.hashLookupPosInit()))
- return ;
+ return;
this.closeHashLookupPosDependants();
this.hashLookupPos = -1;
@@ -3722,7 +3726,7 @@ public long nextPos() {
if (hl.empty(entry)) {
this.setHashLookupPosGuarded(pos);
return -1L;
- }
+ }
pos = hl.step(pos);
if (pos == (searchStartPos()))
break;
@@ -3730,7 +3734,7 @@ public long nextPos() {
if ((hl.key(entry)) == (searchKey())) {
this.setHashLookupPosGuarded(pos);
return hl.value(entry);
- }
+ }
}
throw new IllegalStateException(((this.h().toIdentityString()) + ": HashLookup overflow should never occur"));
}
@@ -3754,7 +3758,7 @@ public boolean keySearchInit() {
}
public void initKeySearch() {
- for (long pos ; (pos = this.nextPos()) >= 0L ; ) {
+ for (long pos; (pos = this.nextPos()) >= 0L; ) {
if (inputKeyInit()) {
long keySizeOffset = (this.entrySpaceOffset()) + (pos * (this.m().chunkSize));
Bytes segmentBytes = this.segmentBytesForReadGuarded();
@@ -3767,8 +3771,8 @@ public void initKeySearch() {
this.found();
this.readFoundEntry(pos, keySizeOffset, keySize, keyOffset);
searchState = CompiledMapIterationContext.SearchState.PRESENT;
- return ;
- }
+ return;
+ }
}
searchState = CompiledMapIterationContext.SearchState.ABSENT;
}
@@ -3799,7 +3803,7 @@ public void putNewVolatile(long entryPos) {
if (keySearchReInit) {
this.readExistingEntry(entryPos);
- }
+ }
hl().checkValueForPut(entryPos);
hl().writeEntryVolatile(addr(), this.hashLookupPos(), searchKey(), entryPos);
}
@@ -3819,17 +3823,17 @@ public String debugLocksState() {
if (!(this.usedInit())) {
s += "unused";
return s;
- }
+ }
s += "used, ";
if (!(segmentIndexInit())) {
s += "segment uninitialized";
return s;
- }
+ }
s += ("segment " + (segmentIndex())) + ", ";
if (!(locksInit())) {
s += "locks uninitialized";
return s;
- }
+ }
s += ("local state: " + (localLockState())) + ", ";
s += ("read lock count: " + (rootContextLockedOnThisSegment().totalReadLockCount())) + ", ";
s += ("update lock count: " + (rootContextLockedOnThisSegment().totalUpdateLockCount())) + ", ";
@@ -3842,7 +3846,7 @@ public boolean checkSum() {
this.checkOnEachPublicOperation();
if (!(this.h().checksumEntries)) {
throw new UnsupportedOperationException(((this.h().toIdentityString()) + ": Checksum is not stored in this Chronicle Hash"));
- }
+ }
this.innerUpdateLock.lock();
return (delayedUpdateChecksumInit()) || (checksumStrategy.innerCheckSum());
}
@@ -3852,12 +3856,12 @@ public void updateChecksum() {
this.checkOnEachPublicOperation();
if (!(this.h().checksumEntries)) {
throw new UnsupportedOperationException(((this.h().toIdentityString()) + ": Checksum is not stored in this Chronicle Hash"));
- }
+ }
this.innerUpdateLock.lock();
initDelayedUpdateChecksum(true);
}
- public boolean forEachTierEntryWhile(Predicate super T> predicate, int currentTier, long currentTierBaseAddr, long tierIndex) {
+ public boolean forEachTierEntryWhile(Predicate super T> predicate, int currentTier, long currentTierBaseAddr, long tierIndex) {
long leftEntries = tierEntriesForIteration();
boolean interrupted = false;
long startPos = 0L;
@@ -3879,7 +3883,7 @@ public boolean forEachTierEntryWhile(Predicate super T> predicate, int curr
if (shouldTestEntry()) {
initEntryRemovedOnThisIteration(false);
try {
- if (!(predicate.test(((T)(entryForIteration()))))) {
+ if (!(predicate.test(((T) (entryForIteration()))))) {
interrupted = true;
break;
} else {
@@ -3894,20 +3898,20 @@ public boolean forEachTierEntryWhile(Predicate super T> predicate, int curr
currentHashLookupPos = hashLookup.stepBack(currentHashLookupPos);
steps--;
this.initHashLookupPos(currentHashLookupPos);
- }
+ }
this.innerWriteLock.unlock();
this.closeKeyOffset();
}
- }
- }
- } while ((currentHashLookupPos != startPos) || (steps == 0) );
+ }
+ }
+ } while ((currentHashLookupPos != startPos) || (steps == 0));
if ((!interrupted) && (leftEntries > 0)) {
throw new IllegalStateException((((((this.h().toIdentityString()) + ": We went through a tier without interruption, ") + "but according to tier counters there should be ") + leftEntries) + " more entries. Size diverged?"));
- }
+ }
return interrupted;
}
- public boolean innerForEachSegmentEntryWhile(Predicate super T> predicate) {
+ public boolean innerForEachSegmentEntryWhile(Predicate super T> predicate) {
try {
this.goToLastTier();
while (true) {
@@ -3955,7 +3959,7 @@ protected void relocation(Data newValue, long newEntrySize) {
if (!(this.searchStateAbsent()))
throw new AssertionError();
- }
+ }
initValue(newValue);
freeExtraAllocatedChunks();
CompactOffHeapLinearHashTable hl = this.h().hashLookup;
@@ -3978,18 +3982,19 @@ public void innerDefaultReplaceValue(Data newValue) {
long newValueOffset = VanillaChronicleMap.alignAddr((entryStartOffset + newSizeOfEverythingBeforeValue), this.m().alignment);
long newEntrySize = newEntrySize(newValue, entryStartOffset, newValueOffset);
int newSizeInChunks = m.inChunks(newEntrySize);
- newValueDoesNotFit : if (newSizeInChunks > (entrySizeInChunks())) {
+ newValueDoesNotFit:
+ if (newSizeInChunks > (entrySizeInChunks())) {
if (newSizeInChunks > (m.maxChunksPerEntry)) {
throw new IllegalArgumentException(((((((m.toIdentityString()) + ": Value too large: entry takes ") + newSizeInChunks) + " chunks, ") + (m.maxChunksPerEntry)) + " is maximum."));
- }
+ }
if (this.reallocGuarded(pos(), entrySizeInChunks(), newSizeInChunks)) {
break newValueDoesNotFit;
- }
+ }
relocation(newValue, newEntrySize);
- return ;
+ return;
} else if (newSizeInChunks < (entrySizeInChunks())) {
this.freeExtraGuarded(pos(), entrySizeInChunks(), newSizeInChunks);
- }
+ }
}
this.innerWriteLock.lock();
if (newValueSizeIsDifferent) {
@@ -4019,7 +4024,7 @@ public void iterationRemove() {
throwExceptionIfClosed();
if ((this.h().hashLookup.remove(this.tierBaseAddr(), this.hashLookupPos())) != (this.hashLookupPos())) {
this.setHashLookupPosGuarded(this.h().hashLookup.stepBack(this.hashLookupPos()));
- }
+ }
this.innerRemoveEntryExceptHashLookupUpdate();
}
@@ -4051,7 +4056,7 @@ public void initDelayedUpdateChecksum(boolean delayedUpdateChecksum) {
public void closeDelayedUpdateChecksum() {
if (!(this.delayedUpdateChecksumInit()))
- return ;
+ return;
if (this.h().checksumEntries)
this.hashEntryChecksumStrategy.computeAndStoreChecksum();
diff --git a/src/main/java/net/openhft/chronicle/map/impl/CompiledMapQueryContext.java b/src/main/java/net/openhft/chronicle/map/impl/CompiledMapQueryContext.java
index 1a6427a20..dcbd4e2ea 100644
--- a/src/main/java/net/openhft/chronicle/map/impl/CompiledMapQueryContext.java
+++ b/src/main/java/net/openhft/chronicle/map/impl/CompiledMapQueryContext.java
@@ -1788,10 +1788,12 @@ public int decrementWrite() {
}
public enum EntryPresence {
-PRESENT, ABSENT; }
+ PRESENT, ABSENT;
+ }
public enum SearchState {
-PRESENT, ABSENT; }
+ PRESENT, ABSENT;
+ }
private long _HashEntryStages_entryEnd() {
return keyEnd();
diff --git a/src/main/java/net/openhft/chronicle/map/impl/CompiledReplicatedMapIterationContext.java b/src/main/java/net/openhft/chronicle/map/impl/CompiledReplicatedMapIterationContext.java
index a635a73a5..e090ba4e6 100644
--- a/src/main/java/net/openhft/chronicle/map/impl/CompiledReplicatedMapIterationContext.java
+++ b/src/main/java/net/openhft/chronicle/map/impl/CompiledReplicatedMapIterationContext.java
@@ -344,7 +344,8 @@ public void setLocalLockStateGuarded(LocalLockState newState) {
}
enum EntriesToTest {
-PRESENT, ALL; }
+ PRESENT, ALL;
+ }
private long _MapEntryStages_countValueSizeOffset() {
return keyEnd();
@@ -1671,7 +1672,8 @@ public int decrementWrite() {
}
public enum SearchState {
-PRESENT, ABSENT; }
+ PRESENT, ABSENT;
+ }
private long _HashEntryStages_entryEnd() {
return keyEnd();
diff --git a/src/main/java/net/openhft/chronicle/map/impl/CompiledReplicatedMapQueryContext.java b/src/main/java/net/openhft/chronicle/map/impl/CompiledReplicatedMapQueryContext.java
index efb636a17..7e117c6d3 100644
--- a/src/main/java/net/openhft/chronicle/map/impl/CompiledReplicatedMapQueryContext.java
+++ b/src/main/java/net/openhft/chronicle/map/impl/CompiledReplicatedMapQueryContext.java
@@ -1848,10 +1848,12 @@ public int decrementWrite() {
}
public enum EntryPresence {
-PRESENT, ABSENT; }
+ PRESENT, ABSENT;
+ }
public enum SearchState {
-PRESENT, ABSENT; }
+ PRESENT, ABSENT;
+ }
private long _HashEntryStages_entryEnd() {
return keyEnd();
diff --git a/src/main/java/net/openhft/chronicle/map/impl/stage/data/bytes/WrappedValueBytesData.java b/src/main/java/net/openhft/chronicle/map/impl/stage/data/bytes/WrappedValueBytesData.java
index 7665aead4..af316c51a 100644
--- a/src/main/java/net/openhft/chronicle/map/impl/stage/data/bytes/WrappedValueBytesData.java
+++ b/src/main/java/net/openhft/chronicle/map/impl/stage/data/bytes/WrappedValueBytesData.java
@@ -48,8 +48,6 @@ public class WrappedValueBytesData extends AbstractData {
private boolean wrappedValueBytesUsed = false;
@Stage("CachedWrappedValue")
private V cachedWrappedValue;
- @Stage("CachedWrappedValue")
- private boolean cachedWrappedValueRead = false;
boolean nextInit() {
return true;
@@ -102,7 +100,6 @@ void closeWrappedValueBytes() {
private void initCachedWrappedValue() {
cachedWrappedValue = innerGetUsing(cachedWrappedValue);
- cachedWrappedValueRead = true;
}
@Override
diff --git a/src/main/java/net/openhft/chronicle/map/locks/ChronicleStampedLock.java b/src/main/java/net/openhft/chronicle/map/locks/ChronicleStampedLock.java
index d5eb9d03c..87b68ec40 100644
--- a/src/main/java/net/openhft/chronicle/map/locks/ChronicleStampedLock.java
+++ b/src/main/java/net/openhft/chronicle/map/locks/ChronicleStampedLock.java
@@ -173,15 +173,13 @@ public long tryConvertToWriteLock(long stamp) {
@Override
public long tryWriteLock() {
- long l = 0L;
-
offHeapLock = chm.get("Stamp ");
lastWriterT = chm.get("LastWriterTime ");
writeLockHolderCount = chmW.get("WriterCount ");
- l = offHeapLock.getEntryLockState();
+ long lockState = offHeapLock.getEntryLockState();
- if (l != 0L)
+ if (lockState != 0L)
return 0L;
do {
@@ -202,7 +200,7 @@ public long tryWriteLock() {
","
);
offHeapLock = chm.get("Stamp ");
- l = offHeapLock.getEntryLockState();
+ lockState = offHeapLock.getEntryLockState();
try {
Thread.sleep((long) (1000 * Math.random()));
} catch (InterruptedException e) {
@@ -211,10 +209,8 @@ public long tryWriteLock() {
}
} while (
readLockHolderCount.getVolatileValue() > 0 ||
- (writeLockHolderCount =
- chmW.get("WriterCount "))
- .getVolatileValue() > 0
- );
+ (writeLockHolderCount = chmW.get("WriterCount "))
+ .getVolatileValue() > 0);
writeLockHolderCount.addAtomicValue(+1);
chmW.put("WriterCount ", writeLockHolderCount);
@@ -306,12 +302,11 @@ public long tryReadLock() {
@Override
public long writeLock() {
- long l = 0L;
-
offHeapLock = chm.get("Stamp ");
lastWriterT = chm.get("LastWriterTime ");
writeLockHolderCount = chmW.get("WriterCount ");
+ long lockState;
do {
Jvm.debug().on(getClass(),
" ,@t=" + System.currentTimeMillis() +
@@ -330,7 +325,7 @@ public long writeLock() {
","
);
offHeapLock = chm.get("Stamp ");
- l = offHeapLock.getEntryLockState();
+ lockState = offHeapLock.getEntryLockState();
try {
Thread.sleep((long) (1000 * Math.random()));
} catch (InterruptedException e) {
@@ -338,7 +333,7 @@ public long writeLock() {
Thread.currentThread().interrupt();
}
} while (
- l != 0L ||
+ lockState != 0L ||
readLockHolderCount.getVolatileValue() > 0 ||
(writeLockHolderCount = chmW.get("WriterCount ")).getVolatileValue() > 0
);
@@ -373,11 +368,10 @@ public long writeLock() {
@Override
public long readLock() {
- long l = 0L;
-
offHeapLock = chm.get("Stamp ");
readLockHolderCount = chmR.get("ReaderCount ");
+ long lockState;
do {
Jvm.debug().on(getClass(),
" ,@t=" + System.currentTimeMillis() +
@@ -391,14 +385,14 @@ public long readLock() {
"] " +
","
);
- l = (offHeapLock = chm.get("Stamp ")).getEntryLockState();
+ lockState = (offHeapLock = chm.get("Stamp ")).getEntryLockState();
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
Thread.currentThread().interrupt();
}
- } while (l < 0L);
+ } while (lockState < 0L);
Jvm.debug().on(getClass(),
" ,@t=" + System.currentTimeMillis() +
@@ -437,7 +431,7 @@ public void unlock(long stamp) {
} else if (stamp > 0L) {
unlockRead(stamp);
} else {
- // lock available
+ return;
}
}
diff --git a/src/main/java/net/openhft/chronicle/set/ChronicleSetBuilder.java b/src/main/java/net/openhft/chronicle/set/ChronicleSetBuilder.java
index 2e56af71a..00bbe9e53 100644
--- a/src/main/java/net/openhft/chronicle/set/ChronicleSetBuilder.java
+++ b/src/main/java/net/openhft/chronicle/set/ChronicleSetBuilder.java
@@ -35,7 +35,6 @@ public final class ChronicleSetBuilder
corruption -> Jvm.error().on(ChronicleSetBuilder.class, corruption.message(), corruption.exception());
private ChronicleMapBuilder chronicleMapBuilder;
- private final ChronicleSetBuilderPrivateAPI privateAPI;
ChronicleSetBuilder(Class keyClass) {
chronicleMapBuilder = ChronicleMapBuilder.of(keyClass, DummyValue.class)
@@ -43,7 +42,7 @@ public final class ChronicleSetBuilder
DummyValueMarshaller.INSTANCE, DummyValueMarshaller.INSTANCE)
.valueSizeMarshaller(SizeMarshaller.constant(0));
//noinspection deprecation,unchecked
- privateAPI = new ChronicleSetBuilderPrivateAPI<>(
+ ChronicleSetBuilderPrivateAPI privateAPI = new ChronicleSetBuilderPrivateAPI<>(
(ChronicleHashBuilderPrivateAPI>)
Jvm.getValue(chronicleMapBuilder, "privateAPI"));
}
diff --git a/src/main/java/net/openhft/chronicle/set/SetFromMap.java b/src/main/java/net/openhft/chronicle/set/SetFromMap.java
index c116bb225..db7dcbb76 100644
--- a/src/main/java/net/openhft/chronicle/set/SetFromMap.java
+++ b/src/main/java/net/openhft/chronicle/set/SetFromMap.java
@@ -29,7 +29,7 @@
class SetFromMap extends AbstractSet implements ChronicleSet {
private final ChronicleMap m; // The backing map
- private transient Set s; // Its keySet
+ private final transient Set s; // Its keySet
SetFromMap(VanillaChronicleMap map) {
m = map;
diff --git a/src/main/java/net/openhft/xstream/converters/AbstractChronicleMapConverter.java b/src/main/java/net/openhft/xstream/converters/AbstractChronicleMapConverter.java
index 715178ca9..aed3371ff 100644
--- a/src/main/java/net/openhft/xstream/converters/AbstractChronicleMapConverter.java
+++ b/src/main/java/net/openhft/xstream/converters/AbstractChronicleMapConverter.java
@@ -46,7 +46,8 @@ private static E deserialize(@NotNull UnmarshallingContext unmarshallingCont
case "java.util.Collections$EmptyMap":
case "java.util.Collections.EmptyMap":
return (E) Collections.emptyMap();
-
+ default:
+ break;
}
return (E) unmarshallingContext.convertAnother(null, forName(reader.getNodeName()));
diff --git a/src/main/java/net/openhft/xstream/converters/ByteBufferConverter.java b/src/main/java/net/openhft/xstream/converters/ByteBufferConverter.java
index bb1279288..732433999 100644
--- a/src/main/java/net/openhft/xstream/converters/ByteBufferConverter.java
+++ b/src/main/java/net/openhft/xstream/converters/ByteBufferConverter.java
@@ -14,13 +14,15 @@
import java.nio.CharBuffer;
import java.nio.charset.*;
+import static java.nio.charset.StandardCharsets.*;
+
/**
* Created by Rob Austin
*/
@SuppressWarnings({"rawtypes", "unchecked"})
public class ByteBufferConverter implements Converter {
- private final Charset charset = Charset.forName("ISO-8859-1");
+ private final Charset charset = ISO_8859_1;
private final CharsetDecoder decoder = charset.newDecoder();
@Override
@@ -47,7 +49,7 @@ public void marshal(Object o, HierarchicalStreamWriter writer, MarshallingContex
buffer.capacity();
int position = buffer.position();
- int limit = buffer.limit();
+ final int limit = buffer.limit();
buffer.clear();
@@ -78,7 +80,7 @@ public Object unmarshal(HierarchicalStreamReader reader, UnmarshallingContext un
reader.moveUp();
reader.moveDown();
- int limit = (Integer) unmarshallingContext.convertAnother(null, int.class);
+ final int limit = (Integer) unmarshallingContext.convertAnother(null, int.class);
reader.moveUp();
reader.moveDown();
diff --git a/src/main/java/net/openhft/xstream/converters/ValueConverter.java b/src/main/java/net/openhft/xstream/converters/ValueConverter.java
index eddc75b64..2cfe96a75 100644
--- a/src/main/java/net/openhft/xstream/converters/ValueConverter.java
+++ b/src/main/java/net/openhft/xstream/converters/ValueConverter.java
@@ -84,7 +84,7 @@ public Object unmarshal(HierarchicalStreamReader reader, UnmarshallingContext co
}
private void fillInObject(HierarchicalStreamReader reader, UnmarshallingContext context,
- ValueModel valueModel, Object using) throws ClassNotFoundException {
+ ValueModel valueModel, Object using) {
while (reader.hasMoreChildren()) {
reader.moveDown();
diff --git a/src/test/java/eg/BigData.java b/src/test/java/eg/BigData.java
index f0e2f81d9..a06381b5b 100644
--- a/src/test/java/eg/BigData.java
+++ b/src/test/java/eg/BigData.java
@@ -20,12 +20,12 @@
vm.dirty_writeback_centisecs = 3000
*/
public class BigData {
- final static long MAXSIZE = 1000 * 1000 * 1000L;
+ static final long MAXSIZE = 1000 * 1000 * 1000L;
static final ChronicleMapBuilder builder =
ChronicleMapBuilder.of(Long.class, BigDataStuff.class);
//run 1st test with no map, and Highwatermark set to 0
//then switch to Highwatermark set to MAXSIZE for subsequent test repeats
- static AtomicInteger Highwatermark = new AtomicInteger((int) MAXSIZE);
+ static final AtomicInteger highWatermark = new AtomicInteger((int) MAXSIZE);
static Map theMap;
// static AtomicInteger Highwatermark = new AtomicInteger(0);
@@ -47,37 +47,37 @@ public class BigData {
public static void main(String[] args) throws IOException, InterruptedException {
long start = System.currentTimeMillis();
initialbuild();
- System.out.println("Start highwatermark " + Highwatermark.get());
+ System.out.println("Start highwatermark " + highWatermark.get());
for (int i = 0; i < 10; i++) {
Thread t1 = new Thread("test 1") {
public void run() {
- _test();
+ runTest();
}
};
Thread t2 = new Thread("test 2") {
public void run() {
- _test();
+ runTest();
}
};
Thread t3 = new Thread("test 3") {
public void run() {
- _test();
+ runTest();
}
};
t1.start();
t2.start();
t3.start();
- _test();
+ runTest();
t1.join();
t2.join();
t3.join();
}
- System.out.println("End highwatermark " + Highwatermark.get());
+ System.out.println("End highwatermark " + highWatermark.get());
long time = System.currentTimeMillis() - start;
System.out.printf("End to end took %.1f%n", time / 1e3);
}
- public static void initialbuild() throws IOException, InterruptedException {
+ public static void initialbuild() throws InterruptedException {
System.out.println("building an empty map");
long start = System.currentTimeMillis();
Thread t1 = new Thread("test 1") {
@@ -123,26 +123,22 @@ public static void populate(int n) {
}
}
- public static void _test() {
+ public static void runTest() {
// improves logging of these threads.
Affinity.setThreadId();
- try {
- test();
- } catch (IOException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
+ runPerformanceIteration();
+
}
- public static void test() throws IOException {
+ public static void runPerformanceIteration() {
//do a sequence 1m of each of insert/read/update
//inserts
- long LOOPCOUNT = 100 * 1000L;
+ long loopCount = 100 * 1000L;
Random rand = new Random();
long start = System.currentTimeMillis();
BigDataStuff value = new BigDataStuff(0);
- for (long i = 0; i < LOOPCOUNT; i++) {
- long current = rand.nextInt(Highwatermark.get());
+ for (long i = 0; i < loopCount; i++) {
+ long current = rand.nextInt(highWatermark.get());
value.x = current;
value.y.setLength(0);
value.y.append(current);
@@ -153,8 +149,8 @@ public static void test() throws IOException {
int count = 0;
start = System.currentTimeMillis();
- for (long i = 0; i < LOOPCOUNT; i++) {
- long keyval = rand.nextInt(Highwatermark.get());
+ for (long i = 0; i < loopCount; i++) {
+ long keyval = rand.nextInt(highWatermark.get());
count++;
BigDataStuff stuff = theMap.get(keyval);
if (stuff == null) {
@@ -166,8 +162,8 @@ public static void test() throws IOException {
start = System.currentTimeMillis();
count = 0;
- for (long i = 0; i < LOOPCOUNT; i++) {
- long keyval = rand.nextInt(Highwatermark.get());
+ for (long i = 0; i < loopCount; i++) {
+ long keyval = rand.nextInt(highWatermark.get());
BigDataStuff stuff = theMap.get(keyval);
if (stuff == null) {
System.out.println("hit an empty at key " + keyval);
@@ -191,6 +187,9 @@ public BigDataStuff(long x) {
this.x = x;
}
+ public BigDataStuff() {
+ }
+
@Override
public void writeExternal(ObjectOutput out) throws IOException {
out.writeLong(x);
diff --git a/src/test/java/eg/WordCountTest.java b/src/test/java/eg/WordCountTest.java
index 3e018c4bf..b0c90f95a 100644
--- a/src/test/java/eg/WordCountTest.java
+++ b/src/test/java/eg/WordCountTest.java
@@ -26,14 +26,14 @@
public class WordCountTest {
- static String[] words;
- static Map expectedMap;
+ static final String[] words;
+ static final Map expectedMap;
static {
// english version of war and peace -> ascii
ClassLoader cl = Thread.currentThread().getContextClassLoader();
try (InputStream zippedIS = Objects.requireNonNull(cl.getResourceAsStream("war_and_peace.txt.gz"));
- GZIPInputStream binaryIS = new GZIPInputStream(zippedIS);) {
+ GZIPInputStream binaryIS = new GZIPInputStream(zippedIS)) {
String fullText =
new String(ByteStreams.toByteArray(binaryIS), UTF_8);
words = fullText.split("\\s+");
diff --git a/src/test/java/examples/portfolio/PortfolioAssetInterface.java b/src/test/java/examples/portfolio/PortfolioAssetInterface.java
index 441f2050c..219345cbe 100644
--- a/src/test/java/examples/portfolio/PortfolioAssetInterface.java
+++ b/src/test/java/examples/portfolio/PortfolioAssetInterface.java
@@ -4,15 +4,15 @@
package examples.portfolio;
public interface PortfolioAssetInterface {
- public long getAssetId();
+ long getAssetId();
- public void setAssetId(long assetId);
+ void setAssetId(long assetId);
- public int getShares();
+ int getShares();
- public void setShares(int shares);
+ void setShares(int shares);
- public double getPrice();
+ double getPrice();
- public void setPrice(double price);
+ void setPrice(double price);
}
diff --git a/src/test/java/net/openhft/chronicle/hash/serialization/impl/ByteBufferDataAccessTest.java b/src/test/java/net/openhft/chronicle/hash/serialization/impl/ByteBufferDataAccessTest.java
index 22ca8cb87..602134cf4 100644
--- a/src/test/java/net/openhft/chronicle/hash/serialization/impl/ByteBufferDataAccessTest.java
+++ b/src/test/java/net/openhft/chronicle/hash/serialization/impl/ByteBufferDataAccessTest.java
@@ -24,8 +24,8 @@ public void getUsingTest() {
Data data1 = bbDataAccess.getData(bb1);
ByteBuffer bb2 = ByteBuffer.allocate(2);
data1.getUsing(bb2);
- assertEquals(bb2.get(0), 3);
- assertEquals(bb2.get(1), 4);
+ assertEquals(3, bb2.get(0));
+ assertEquals(4, bb2.get(1));
}
@Test
diff --git a/src/test/java/net/openhft/chronicle/map/AbstractMarshallableKeyValueTest.java b/src/test/java/net/openhft/chronicle/map/AbstractMarshallableKeyValueTest.java
index 5b7fbaf28..cda2c13eb 100644
--- a/src/test/java/net/openhft/chronicle/map/AbstractMarshallableKeyValueTest.java
+++ b/src/test/java/net/openhft/chronicle/map/AbstractMarshallableKeyValueTest.java
@@ -14,7 +14,7 @@
public final class AbstractMarshallableKeyValueTest {
@Test
- public void shouldAcceptAbstractMarshallableComponents() throws Exception {
+ public void shouldAcceptAbstractMarshallableComponents() {
final ChronicleMap map = ChronicleMapBuilder.of(Key.class, Value.class).entries(10).
averageKey(new Key()).averageValue(new Value()).create();
@@ -24,7 +24,7 @@ public void shouldAcceptAbstractMarshallableComponents() throws Exception {
}
@Test
- public void shouldAcceptAbstractMarshallableComponents2() throws Exception {
+ public void shouldAcceptAbstractMarshallableComponents2() {
final ChronicleMap map = ChronicleMapBuilder.of(Key.class, Marshallable.class).entries(10)
.averageKey(new Key()).averageValue(new Value())
.valueMarshaller(new TypedMarshallableReaderWriter<>(Marshallable.class))
@@ -37,10 +37,10 @@ public void shouldAcceptAbstractMarshallableComponents2() throws Exception {
}
private static final class Key extends SelfDescribingMarshallable {
- private String k = "key";
+ private final String k = "key";
}
private static final class Value extends SelfDescribingMarshallable {
- private Integer number = 17;
+ private final Integer number = 17;
}
}
diff --git a/src/test/java/net/openhft/chronicle/map/AcquireGetUsingMain.java b/src/test/java/net/openhft/chronicle/map/AcquireGetUsingMain.java
index f2918cc95..b2cf65b0b 100644
--- a/src/test/java/net/openhft/chronicle/map/AcquireGetUsingMain.java
+++ b/src/test/java/net/openhft/chronicle/map/AcquireGetUsingMain.java
@@ -71,7 +71,7 @@ public static void main(String[] args) throws IOException {
System.out.println("2 " + data.getTimeAt(1));
}
- public static interface Data {
+ public interface Data {
@Array(length = 8)
void setTimeAt(int index, long time);
diff --git a/src/test/java/net/openhft/chronicle/map/AutoResizeTest.java b/src/test/java/net/openhft/chronicle/map/AutoResizeTest.java
index 9eae1af02..af86b0039 100644
--- a/src/test/java/net/openhft/chronicle/map/AutoResizeTest.java
+++ b/src/test/java/net/openhft/chronicle/map/AutoResizeTest.java
@@ -70,6 +70,6 @@ public void testAutoResizeNotZeroUponRestart2() {
@Test(expected = IllegalArgumentException.class)
public void testNegativeReplication() {
- ChronicleMapBuilder.of(String.class, String.class).replication((byte) -1);
+ ChronicleMapBuilder.of(String.class, String.class).replication((byte) -1);
}
}
diff --git a/src/test/java/net/openhft/chronicle/map/BasicReplicationTest.java b/src/test/java/net/openhft/chronicle/map/BasicReplicationTest.java
index 00cfe531a..0a07aee02 100644
--- a/src/test/java/net/openhft/chronicle/map/BasicReplicationTest.java
+++ b/src/test/java/net/openhft/chronicle/map/BasicReplicationTest.java
@@ -150,6 +150,8 @@ void processPendingChangesLoop() {
for (IteratorAndDestinationMap iteratorAndDestinationMap : destinationMaps) {
while (iteratorAndDestinationMap.modificationIterator.nextEntry(
iteratorAndDestinationMap, sourceMap.identifier())) {
+ // drain all pending entries
+ continue;
}
}
diff --git a/src/test/java/net/openhft/chronicle/map/BooleanValuesTest.java b/src/test/java/net/openhft/chronicle/map/BooleanValuesTest.java
index fb8d79830..334bf83c1 100644
--- a/src/test/java/net/openhft/chronicle/map/BooleanValuesTest.java
+++ b/src/test/java/net/openhft/chronicle/map/BooleanValuesTest.java
@@ -6,8 +6,6 @@
import org.junit.Assert;
import org.junit.Test;
-import java.io.IOException;
-
/**
* @author Rob Austin.
*/
@@ -17,7 +15,7 @@ public class BooleanValuesTest {
* see issue here
*/
@Test
- public void testTestBooleanValues() throws IOException, InterruptedException {
+ public void testTestBooleanValues() {
try (ChronicleMap map = ChronicleMap.of(Integer.class, Boolean.class)
.entries(1).create()) {
map.put(7, true);
diff --git a/src/test/java/net/openhft/chronicle/map/BuildVersionTest.java b/src/test/java/net/openhft/chronicle/map/BuildVersionTest.java
index b1b39aa8b..2358fdc87 100644
--- a/src/test/java/net/openhft/chronicle/map/BuildVersionTest.java
+++ b/src/test/java/net/openhft/chronicle/map/BuildVersionTest.java
@@ -7,15 +7,13 @@
import org.junit.Assert;
import org.junit.Test;
-import java.io.IOException;
-
/**
* @author Rob Austin.
*/
public class BuildVersionTest {
@Test
- public void test() throws IOException, InterruptedException {
+ public void test() {
// checks that we always get a version
Assert.assertNotNull(BuildVersion.version());
}
@@ -25,7 +23,7 @@ public void test() throws IOException, InterruptedException {
*
*/
@Test
- public void testVersion() throws IOException, InterruptedException {
+ public void testVersion() {
try (ChronicleMap expected = ChronicleMap.of(Integer.class, Double.class)
.entries(1).create()) {
diff --git a/src/test/java/net/openhft/chronicle/map/BytesMarshallableValueTest.java b/src/test/java/net/openhft/chronicle/map/BytesMarshallableValueTest.java
index 7d6ae35ea..f72b95167 100644
--- a/src/test/java/net/openhft/chronicle/map/BytesMarshallableValueTest.java
+++ b/src/test/java/net/openhft/chronicle/map/BytesMarshallableValueTest.java
@@ -23,8 +23,8 @@ public void bytesMarshallableValueTest() {
}
public static class Value implements BytesMarshallable {
- int x;
- String foo;
+ final int x;
+ final String foo;
public Value(int x, String foo) {
this.x = x;
diff --git a/src/test/java/net/openhft/chronicle/map/CHMLatencyTestMain.java b/src/test/java/net/openhft/chronicle/map/CHMLatencyTestMain.java
index 4409f63da..5d09745c6 100644
--- a/src/test/java/net/openhft/chronicle/map/CHMLatencyTestMain.java
+++ b/src/test/java/net/openhft/chronicle/map/CHMLatencyTestMain.java
@@ -59,7 +59,7 @@ public class CHMLatencyTestMain {
public static void main(String... ignored) throws IOException {
AffinityLock lock = AffinityLock.acquireCore();
File file = File.createTempFile("testCHMLatency", "deleteme");
-// File file = new File("testCHMLatency.deleteme");
+ // File file = new File("testCHMLatency.deleteme");
file.delete();
ChronicleMap countersMap =
ChronicleMapBuilder.of(LongValue.class, LongValue.class)
@@ -75,7 +75,7 @@ public static void main(String... ignored) throws IOException {
value.setValue(0);
}
System.out.println("Keys created");
-// Monitor monitor = new Monitor();
+ // Monitor monitor = new Monitor();
LongValue value2 = Values.newNativeReference(LongValue.class);
for (int t = 0; t < 5; t++) {
for (int rate : new int[]{2 * 1000 * 1000, 1000 * 1000, 500 * 1000/*, 250 * 1000, 100 * 1000, 50 * 1000*/}) {
@@ -89,9 +89,9 @@ public static void main(String... ignored) throws IOException {
// the timed part
for (int i = 0; i < KEYS && u < RUN_TIME * rate; i += stride) {
// busy wait for next time.
- while (System.nanoTime() < next - 12) ;
-// monitor.sample = System.nanoTime();
- long start0 = next;
+ while (System.nanoTime() < next - 12)
+ Thread.yield();
+ final long start0 = next;
// start the update.
key.setValue(i);
@@ -105,22 +105,22 @@ public static void main(String... ignored) throws IOException {
times.sample(elapse);
next += delay;
}
-// monitor.sample = Long.MAX_VALUE;
+ // monitor.sample = Long.MAX_VALUE;
}
System.out.printf("run %d %,9d : ", t, rate);
times.printPercentiles(" micro-seconds.");
}
System.out.println();
}
-// monitor.running = false;
+ // monitor.running = false;
countersMap.close();
file.delete();
}
static class Monitor implements Runnable {
final Thread thread;
- volatile boolean running = true;
- volatile long sample;
+ final boolean running = true;
+ final long sample;
Monitor() {
this.thread = Thread.currentThread();
diff --git a/src/test/java/net/openhft/chronicle/map/CHMTestIterator1.java b/src/test/java/net/openhft/chronicle/map/CHMTestIterator1.java
index 6b3f1dc73..978492119 100644
--- a/src/test/java/net/openhft/chronicle/map/CHMTestIterator1.java
+++ b/src/test/java/net/openhft/chronicle/map/CHMTestIterator1.java
@@ -20,12 +20,12 @@ public static void main(String[] args) {
.entries(runs);
try (ChronicleMap chm = builder.create()) {
- /*chm.put("k1", alValue.incrementAndGet());
- chm.put("k2", alValue.incrementAndGet());
- chm.put("k3", alValue.incrementAndGet());
- chm.put("k4", alValue.incrementAndGet());
- chm.put("k5", alValue.incrementAndGet());*/
- //chm.keySet();
+ // chm.put("k1", alValue.incrementAndGet());
+ // chm.put("k2", alValue.incrementAndGet());
+ // chm.put("k3", alValue.incrementAndGet());
+ // chm.put("k4", alValue.incrementAndGet());
+ // chm.put("k5", alValue.incrementAndGet());
+ // chm.keySet();
for (int i = 0; i < runs; i++) {
chm.put("k" + alKey.incrementAndGet(), alValue.incrementAndGet());
diff --git a/src/test/java/net/openhft/chronicle/map/CHMUseCasesTest.java b/src/test/java/net/openhft/chronicle/map/CHMUseCasesTest.java
index ac310a341..c55c736a7 100644
--- a/src/test/java/net/openhft/chronicle/map/CHMUseCasesTest.java
+++ b/src/test/java/net/openhft/chronicle/map/CHMUseCasesTest.java
@@ -136,6 +136,7 @@ interface Inner {
}
}
+
/**
* This test enumerates common use cases for keys and values.
*/
@@ -219,7 +220,9 @@ else if (map1.valueClass() == byte[][].class) {
for (int i = 0; i < o1.length; i++) {
Assert.assertArrayEquals(o1[i], o2[i]);
}
- } else throw new IllegalStateException("unsupported type");
+ } else {
+ throw new IllegalStateException("unsupported type");
+ }
}
}
@@ -501,7 +504,7 @@ public void testStringStringMap() throws
assertEquals("World", map.get("Hello"));
assertEquals("New World", map.getMapped("Hello", new PrefixStringFunction("New ")));
- assertEquals(null, map.getMapped("No key", new PrefixStringFunction("New ")));
+ assertNull(map.getMapped("No key", new PrefixStringFunction("New ")));
mapChecks();
}
}
@@ -582,7 +585,7 @@ public void testCharSequenceCharSequenceMap()
assertNull(map.getUsing(key, value));
assertEquals("New World", map.getMapped("Hello", s -> "New " + s));
- assertEquals(null, map.getMapped("No key",
+ assertNull(map.getMapped("No key",
(SerializableFunction) s -> "New " + s));
assertEquals("New World !!", map.computeIfPresent("Hello", (k, s) -> {
@@ -635,7 +638,7 @@ public void testGetUsingWithIntValueNoValue() throws IOException {
assertNull(c.entry());
}
- assertEquals(null, map.get("1"));
+ assertNull(map.get("1"));
mapChecks();
}
}
@@ -657,19 +660,19 @@ public void testAcquireUsingImmutableUsing() throws IOException {
using.setValue(1);
}
- assertEquals(null, map.get("1"));
+ assertNull(map.get("1"));
mapChecks();
}
}
@Test(expected = IllegalArgumentException.class)
public void testNegativeActualChunkSize() {
- ChronicleMapBuilder.of(String.class, String.class).actualChunkSize(-1);
+ ChronicleMapBuilder.of(String.class, String.class).actualChunkSize(-1);
}
@Test(expected = IllegalArgumentException.class)
public void testActualChunksPerSegmentTier() {
- ChronicleMapBuilder.of(String.class, String.class).actualChunksPerSegmentTier(0);
+ ChronicleMapBuilder.of(String.class, String.class).actualChunksPerSegmentTier(0);
}
@Test
@@ -737,9 +740,9 @@ public void testStringValueStringValueMap() throws IOException {
try (ChronicleMap map = newInstance(builder)) {
StringValue key1 = Values.newHeapInstance(StringValue.class);
- StringValue key2 = Values.newHeapInstance(StringValue.class);
+ final StringValue key2 = Values.newHeapInstance(StringValue.class);
StringValue value1 = Values.newHeapInstance(StringValue.class);
- StringValue value2 = Values.newHeapInstance(StringValue.class);
+ final StringValue value2 = Values.newHeapInstance(StringValue.class);
key1.setValue(new StringBuilder("1"));
value1.setValue("11");
@@ -876,9 +879,9 @@ public void testIntegerIntegerMap()
assertEquals(1, ((VanillaChronicleMap, ?, ?>) map).maxChunksPerEntry);
Integer key1;
- Integer key2;
+ final Integer key2;
Integer value1;
- Integer value2;
+ final Integer value2;
key1 = 1;
value1 = 11;
@@ -892,8 +895,8 @@ public void testIntegerIntegerMap()
assertEquals((Integer) 11, map.get(key1));
assertEquals((Integer) 22, map.get(key2));
- assertEquals(null, map.get(3));
- assertEquals(null, map.get(4));
+ assertNull(map.get(3));
+ assertNull(map.get(4));
mapChecks();
@@ -906,7 +909,7 @@ public Integer apply(Integer s) {
mapChecks();
- assertEquals(null, map.getMapped(-1, new SerializableFunction() {
+ assertNull(map.getMapped(-1, new SerializableFunction() {
@Override
public Integer apply(Integer s) {
return 10 * s;
@@ -918,6 +921,7 @@ public Integer apply(Integer s) {
try {
map.computeIfPresent(1, (k, s) -> s + 1);
} catch (Exception todoMoreSpecificException) {
+ assertNotNull(todoMoreSpecificException);
}
mapChecks();
@@ -932,16 +936,16 @@ public void testLongLongMap() throws IOException {
.entries(10);
try (ChronicleMap map = newInstance(builder)) {
-// assertEquals(16, entrySize(map));
-// assertEquals(1, ((VanillaChronicleMap) map).maxChunksPerEntry);
+ // assertEquals(16, entrySize(map));
+ // assertEquals(1, ((VanillaChronicleMap) map).maxChunksPerEntry);
map.put(1L, 11L);
assertEquals((Long) 11L, map.get(1L));
map.put(2L, 22L);
assertEquals((Long) 22L, map.get(2L));
- assertEquals(null, map.get(3L));
- assertEquals(null, map.get(4L));
+ assertNull(map.get(3L));
+ assertNull(map.get(4L));
mapChecks();
@@ -951,13 +955,14 @@ public Long apply(Long s) {
return 10 * s;
}
}));
- assertEquals(null, map.getMapped(-1L, (SerializableFunction) s -> 10 * s));
+ assertNull(map.getMapped(-1L, (SerializableFunction) s -> 10 * s));
mapChecks();
try {
map.computeIfPresent(1L, (k, s) -> s + 1);
} catch (Exception todoMoreSpecificException) {
+ assertNotNull(todoMoreSpecificException);
}
mapChecks();
@@ -980,8 +985,8 @@ public void testDoubleDoubleMap() throws IOException {
map.put(2.0, 22.0);
assertEquals((Double) 22.0, map.get(2.0));
- assertEquals(null, map.get(3.0));
- assertEquals(null, map.get(4.0));
+ assertNull(map.get(3.0));
+ assertNull(map.get(4.0));
assertEquals((Double) 110.0, map.getMapped(1.0, new SerializableFunction() {
@Override
@@ -989,12 +994,13 @@ public Double apply(Double s) {
return 10 * s;
}
}));
- assertEquals(null, map.getMapped(-1.0, (SerializableFunction) s -> 10 * s));
+ assertNull(map.getMapped(-1.0, (SerializableFunction) s -> 10 * s));
try {
map.computeIfPresent(1.0, (k, s) -> s + 1);
} catch (Exception todoMoreSpecificException) {
+ assertNotNull(todoMoreSpecificException);
}
}
}
@@ -1009,39 +1015,37 @@ public void testByteArrayByteArrayMap()
try (ChronicleMap map = newInstance(builder)) {
byte[] key1 = {1, 1, 1, 1};
- byte[] key2 = {2, 2, 2, 2};
+ final byte[] key2 = {2, 2, 2, 2};
byte[] value1 = {11, 11, 11, 11};
- byte[] value2 = {22, 22, 22, 22};
+ final byte[] value2 = {22, 22, 22, 22};
assertNull(map.put(key1, value1));
- assertTrue(Arrays.equals(value1, map.put(key1, value2)));
- assertTrue(Arrays.equals(value2, map.get(key1)));
+ assertArrayEquals(value1, map.put(key1, value2));
+ assertArrayEquals(value2, map.get(key1));
assertNull(map.get(key2));
map.put(key1, value1);
- assertTrue(Arrays.equals(new byte[]{11, 11},
- map.getMapped(key1, new SerializableFunction() {
- @Override
- public byte[] apply(byte[] s) {
- return Arrays.copyOf(s, 2);
- }
- })));
- assertEquals(null, map.getMapped(key2, new SerializableFunction() {
+ assertArrayEquals(new byte[]{11, 11}, map.getMapped(key1, new SerializableFunction() {
+ @Override
+ public byte[] apply(byte[] s) {
+ return Arrays.copyOf(s, 2);
+ }
+ }));
+ assertNull(map.getMapped(key2, new SerializableFunction() {
@Override
public byte[] apply(byte[] s) {
return Arrays.copyOf(s, 2);
}
}));
- assertTrue(Arrays.equals(new byte[]{12, 10},
- map.computeIfPresent(key1, (k, s) -> {
- s[0]++;
- s[1]--;
- return Arrays.copyOf(s, 2);
- })));
+ assertArrayEquals(new byte[]{12, 10}, map.computeIfPresent(key1, (k, s) -> {
+ s[0]++;
+ s[1]--;
+ return Arrays.copyOf(s, 2);
+ }));
byte[] a2 = map.get(key1);
- assertTrue(Arrays.equals(new byte[]{12, 10}, a2));
+ assertArrayEquals(new byte[]{12, 10}, a2);
}
}
@@ -1058,10 +1062,10 @@ public void testByteBufferByteBufferDefaultKeyValueMarshaller() throws
try (ChronicleMap map = newInstance(builder)) {
- ByteBuffer key1 = ByteBuffer.wrap(new byte[]{1, 1, 1, 1});
- ByteBuffer key2 = ByteBuffer.wrap(new byte[]{2, 2, 2, 2});
- ByteBuffer value1 = ByteBuffer.wrap(new byte[]{11, 11, 11, 11});
- ByteBuffer value2 = ByteBuffer.wrap(new byte[]{22, 22, 22, 22});
+ final ByteBuffer key1 = ByteBuffer.wrap(new byte[]{1, 1, 1, 1});
+ final ByteBuffer key2 = ByteBuffer.wrap(new byte[]{2, 2, 2, 2});
+ final ByteBuffer value1 = ByteBuffer.wrap(new byte[]{11, 11, 11, 11});
+ final ByteBuffer value2 = ByteBuffer.wrap(new byte[]{22, 22, 22, 22});
assertNull(map.put(key1, value1));
assertBBEquals(value1, map.put(key1, value2));
assertBBEquals(value2, map.get(key1));
@@ -1086,9 +1090,9 @@ public void testByteBufferByteBufferMap()
try (ChronicleMap map = newInstance(builder)) {
ByteBuffer key1 = ByteBuffer.wrap(new byte[]{1, 1, 1, 1}).order(ByteOrder.nativeOrder());
- ByteBuffer key2 = ByteBuffer.wrap(new byte[]{2, 2, 2, 2}).order(ByteOrder.nativeOrder());
+ final ByteBuffer key2 = ByteBuffer.wrap(new byte[]{2, 2, 2, 2}).order(ByteOrder.nativeOrder());
ByteBuffer value1 = ByteBuffer.wrap(new byte[]{11, 11, 11, 11}).order(ByteOrder.nativeOrder());
- ByteBuffer value2 = ByteBuffer.wrap(new byte[]{22, 22, 22, 22}).order(ByteOrder.nativeOrder());
+ final ByteBuffer value2 = ByteBuffer.wrap(new byte[]{22, 22, 22, 22}).order(ByteOrder.nativeOrder());
assertNull(map.put(key1, value1));
assertBBEquals(value1, map.put(key1, value2));
assertBBEquals(value2, map.get(key1));
@@ -1106,7 +1110,7 @@ public ByteBuffer apply(ByteBuffer s) {
map.put(key1, value1);
assertBBEquals(ByteBuffer.wrap(new byte[]{11, 11}), map.getMapped(key1, function));
- assertEquals(null, map.getMapped(key2, function));
+ assertNull(map.getMapped(key2, function));
mapChecks();
assertBBEquals(ByteBuffer.wrap(new byte[]{12, 10}),
map.computeIfPresent(key1, (k, s) -> {
@@ -1123,7 +1127,7 @@ public ByteBuffer apply(ByteBuffer s) {
map.put(key2, value2);
ByteBuffer valueA = ByteBuffer.allocateDirect(8).order(ByteOrder.nativeOrder());
ByteBuffer valueB = ByteBuffer.allocate(8).order(ByteOrder.nativeOrder());
-// assertBBEquals(value1, valueA);
+ // assertBBEquals(value1, valueA);
try (ExternalMapQueryContext c = map.queryContext(key1)) {
MapEntry entry = c.entry();
assertNotNull(entry);
@@ -1263,12 +1267,12 @@ public void testIntValueIntValueMap() throws IOException {
try (ChronicleMap map = newInstance(builder)) {
// this may change due to alignment
-// assertEquals(8, entrySize(map));
+ // assertEquals(8, entrySize(map));
assertEquals(1, ((VanillaChronicleMap, ?, ?>) map).maxChunksPerEntry);
- IntValue key1 = Values.newHeapInstance(IntValue.class);
- IntValue key2 = Values.newHeapInstance(IntValue.class);
- IntValue value1 = Values.newHeapInstance(IntValue.class);
- IntValue value2 = Values.newHeapInstance(IntValue.class);
+ final IntValue key1 = Values.newHeapInstance(IntValue.class);
+ final IntValue key2 = Values.newHeapInstance(IntValue.class);
+ final IntValue value1 = Values.newHeapInstance(IntValue.class);
+ final IntValue value2 = Values.newHeapInstance(IntValue.class);
key1.setValue(1);
value1.setValue(11);
@@ -1287,10 +1291,6 @@ public void testIntValueIntValueMap() throws IOException {
}
// TODO review -- the previous version of this block:
// acquiring for value1, comparing value2 -- as intended?
-// try (ReadContext rc = map.getUsingLocked(key2, value1)) {
-// assertTrue(rc.present());
-// assertEquals(22, value2.getValue());
-// }
try (ExternalMapQueryContext c = map.queryContext(key2)) {
MapEntry entry = c.entry();
assertNotNull(entry);
@@ -1377,15 +1377,16 @@ public void testUnsignedIntValueUnsignedIntValueMap() throws IOException {
assertEquals(value1, map.get(key1));
key1 = Values.newHeapInstance(UnsignedIntValue.class);
- UnsignedIntValue key2 = Values.newHeapInstance(UnsignedIntValue.class);
value1 = Values.newHeapInstance(UnsignedIntValue.class);
- UnsignedIntValue value2 = Values.newHeapInstance(UnsignedIntValue.class);
key1.setValue(1);
value1.setValue(11);
map.put(key1, value1);
assertEquals(value1, map.get(key1));
+ UnsignedIntValue key2 = Values.newHeapInstance(UnsignedIntValue.class);
+ UnsignedIntValue value2 = Values.newHeapInstance(UnsignedIntValue.class);
+
key2.setValue(2);
value2.setValue(22);
map.put(key2, value2);
@@ -1398,10 +1399,6 @@ public void testUnsignedIntValueUnsignedIntValueMap() throws IOException {
assertEquals(11, entry.value().get().getValue());
}
// TODO review suspicious block
-// try (ReadContext rc = map.getUsingLocked(key2, value1)) {
-// assertTrue(rc.present());
-// assertEquals(22, value2.getValue());
-// }
try (ExternalMapQueryContext c =
map.queryContext(key2)) {
MapEntry entry = c.entry();
@@ -1488,10 +1485,10 @@ public void testIntValueShortValueMap() throws IOException {
// assertEquals(6, entrySize(map));
// assertEquals(1, ((VanillaChronicleMap) map).maxChunksPerEntry);
- IntValue key1 = Values.newHeapInstance(IntValue.class);
- IntValue key2 = Values.newHeapInstance(IntValue.class);
- ShortValue value1 = Values.newHeapInstance(ShortValue.class);
- ShortValue value2 = Values.newHeapInstance(ShortValue.class);
+ final IntValue key1 = Values.newHeapInstance(IntValue.class);
+ final IntValue key2 = Values.newHeapInstance(IntValue.class);
+ final ShortValue value1 = Values.newHeapInstance(ShortValue.class);
+ final ShortValue value2 = Values.newHeapInstance(ShortValue.class);
key1.setValue(1);
value1.setValue((short) 11);
@@ -1509,10 +1506,10 @@ public void testIntValueShortValueMap() throws IOException {
assertEquals(11, entry.value().get().getValue());
}
// TODO the same as above.
-// try (ReadContext rc = map.getUsingLocked(key2, value1)) {
-// assertTrue(rc.present());
-// assertEquals(22, value2.getValue());
-// }
+ // try (ReadContext rc = map.getUsingLocked(key2, value1)) {
+ // assertTrue(rc.present());
+ // assertEquals(22, value2.getValue());
+ // }
try (ExternalMapQueryContext, ShortValue, ?> c = map.queryContext(key2)) {
MapEntry, ShortValue> entry = c.entry();
assertNotNull(entry);
@@ -1594,15 +1591,16 @@ public void testIntValueUnsignedShortValueMap() throws IOException {
// assertEquals(8, entrySize(map));
assertEquals(1, ((VanillaChronicleMap, ?, ?>) map).maxChunksPerEntry);
IntValue key1 = Values.newHeapInstance(IntValue.class);
- IntValue key2 = Values.newHeapInstance(IntValue.class);
UnsignedShortValue value1 = Values.newHeapInstance(UnsignedShortValue.class);
- UnsignedShortValue value2 = Values.newHeapInstance(UnsignedShortValue.class);
key1.setValue(1);
value1.setValue(11);
map.put(key1, value1);
assertEquals(value1, map.get(key1));
+ IntValue key2 = Values.newHeapInstance(IntValue.class);
+ UnsignedShortValue value2 = Values.newHeapInstance(UnsignedShortValue.class);
+
key2.setValue(2);
value2.setValue(22);
map.put(key2, value2);
@@ -1614,10 +1612,6 @@ public void testIntValueUnsignedShortValueMap() throws IOException {
assertEquals(11, entry.value().get().getValue());
}
// TODO the same as above.
-// try (ReadContext rc = map.getUsingLocked(key2, value1)) {
-// assertTrue(rc.present());
-// assertEquals(22, value2.getValue());
-// }
try (ExternalMapQueryContext, UnsignedShortValue, ?> c = map.queryContext(key2)) {
MapEntry, UnsignedShortValue> entry = c.entry();
assertNotNull(entry);
@@ -1696,15 +1690,16 @@ public void testIntValueCharValueMap() throws IOException {
assertEquals(1, ((VanillaChronicleMap, ?, ?>) map).maxChunksPerEntry);
IntValue key1 = Values.newHeapInstance(IntValue.class);
- IntValue key2 = Values.newHeapInstance(IntValue.class);
CharValue value1 = Values.newHeapInstance(CharValue.class);
- CharValue value2 = Values.newHeapInstance(CharValue.class);
key1.setValue(1);
value1.setValue((char) 11);
map.put(key1, value1);
assertEquals(value1, map.get(key1));
+ IntValue key2 = Values.newHeapInstance(IntValue.class);
+ CharValue value2 = Values.newHeapInstance(CharValue.class);
+
key2.setValue(2);
value2.setValue((char) 22);
map.put(key2, value2);
@@ -1716,10 +1711,6 @@ public void testIntValueCharValueMap() throws IOException {
assertEquals(11, entry.value().get().getValue());
}
// TODO The same as above
-// try (ReadContext rc = map.getUsingLocked(key2, value1)) {
-// assertTrue(rc.present());
-// assertEquals(22, value2.getValue());
-// }
try (ExternalMapQueryContext, CharValue, ?> c = map.queryContext(key2)) {
MapEntry, CharValue> entry = c.entry();
assertNotNull(entry);
@@ -1799,15 +1790,16 @@ public void testIntValueUnsignedByteMap() throws IOException {
assertEquals(1, ((VanillaChronicleMap, ?, ?>) map).maxChunksPerEntry);
IntValue key1 = Values.newHeapInstance(IntValue.class);
- IntValue key2 = Values.newHeapInstance(IntValue.class);
UnsignedByteValue value1 = Values.newHeapInstance(UnsignedByteValue.class);
- UnsignedByteValue value2 = Values.newHeapInstance(UnsignedByteValue.class);
key1.setValue(1);
value1.setValue(11);
map.put(key1, value1);
assertEquals(value1, map.get(key1));
+ IntValue key2 = Values.newHeapInstance(IntValue.class);
+ UnsignedByteValue value2 = Values.newHeapInstance(UnsignedByteValue.class);
+
key2.setValue(2);
value2.setValue(22);
map.put(key2, value2);
@@ -1819,10 +1811,6 @@ public void testIntValueUnsignedByteMap() throws IOException {
assertEquals(11, entry.value().get().getValue());
}
// TODO the same as above
-// try (ReadContext rc = map.getUsingLocked(key2, value1)) {
-// assertTrue(rc.present());
-// assertEquals(22, value2.getValue());
-// }
try (ExternalMapQueryContext, UnsignedByteValue, ?> c = map.queryContext(key2)) {
MapEntry, UnsignedByteValue> entry = c.entry();
assertNotNull(entry);
@@ -1901,15 +1889,16 @@ public void testIntValueBooleanValueMap() throws IOException {
assertEquals(1, ((VanillaChronicleMap, ?, ?>) map).maxChunksPerEntry);
IntValue key1 = Values.newHeapInstance(IntValue.class);
- IntValue key2 = Values.newHeapInstance(IntValue.class);
BooleanValue value1 = Values.newHeapInstance(BooleanValue.class);
- BooleanValue value2 = Values.newHeapInstance(BooleanValue.class);
key1.setValue(1);
value1.setValue(true);
map.put(key1, value1);
assertEquals(value1, map.get(key1));
+ IntValue key2 = Values.newHeapInstance(IntValue.class);
+ BooleanValue value2 = Values.newHeapInstance(BooleanValue.class);
+
key2.setValue(2);
value2.setValue(false);
map.put(key2, value2);
@@ -1918,27 +1907,23 @@ public void testIntValueBooleanValueMap() throws IOException {
try (ExternalMapQueryContext, BooleanValue, ?> c = map.queryContext(key1)) {
MapEntry, BooleanValue> entry = c.entry();
assertNotNull(entry);
- assertEquals(true, entry.value().get().getValue());
+ assertTrue(entry.value().get().getValue());
}
// TODO the same as above. copy paste, copy paste, copy-paste...
-// try (ReadContext rc = map.getUsingLocked(key2, value1)) {
-// assertTrue(rc.present());
-// assertEquals(false, value2.getValue());
-// }
try (ExternalMapQueryContext, BooleanValue, ?> c = map.queryContext(key2)) {
MapEntry, BooleanValue> entry = c.entry();
assertNotNull(entry);
- assertEquals(false, entry.value().get().getValue());
+ assertFalse(entry.value().get().getValue());
}
try (ExternalMapQueryContext, BooleanValue, ?> c = map.queryContext(key1)) {
MapEntry, BooleanValue> entry = c.entry();
assertNotNull(entry);
- assertEquals(true, entry.value().get().getValue());
+ assertTrue(entry.value().get().getValue());
}
try (ExternalMapQueryContext, BooleanValue, ?> c = map.queryContext(key2)) {
MapEntry, BooleanValue> entry = c.entry();
assertNotNull(entry);
- assertEquals(false, entry.value().get().getValue());
+ assertFalse(entry.value().get().getValue());
}
key1.setValue(3);
try (ExternalMapQueryContext, BooleanValue, ?> c = map.queryContext(key1)) {
@@ -1951,38 +1936,38 @@ public void testIntValueBooleanValueMap() throws IOException {
try (net.openhft.chronicle.core.io.Closeable c =
map.acquireContext(key1, value1)) {
- assertEquals(false, value1.getValue());
+ assertFalse(value1.getValue());
value1.setValue(true);
- assertEquals(true, value1.getValue());
+ assertTrue(value1.getValue());
}
try (net.openhft.chronicle.core.io.Closeable c =
map.acquireContext(key1, value2)) {
- assertEquals(true, value2.getValue());
+ assertTrue(value2.getValue());
value2.setValue(false);
- assertEquals(false, value2.getValue());
+ assertFalse(value2.getValue());
}
try (ExternalMapQueryContext, BooleanValue, ?> c = map.queryContext(key1)) {
MapEntry, BooleanValue> entry = c.entry();
assertNotNull(entry);
- assertEquals(false, entry.value().get().getValue());
+ assertFalse(entry.value().get().getValue());
}
try (net.openhft.chronicle.core.io.Closeable c =
map.acquireContext(key2, value2)) {
- assertEquals(false, value2.getValue());
+ assertFalse(value2.getValue());
value2.setValue(true);
- assertEquals(true, value2.getValue());
+ assertTrue(value2.getValue());
}
try (net.openhft.chronicle.core.io.Closeable c =
map.acquireContext(key2, value1)) {
- assertEquals(true, value1.getValue());
+ assertTrue(value1.getValue());
value1.setValue(false);
- assertEquals(false, value1.getValue());
+ assertFalse(value1.getValue());
}
try (ExternalMapQueryContext, BooleanValue, ?> c = map.queryContext(key2)) {
MapEntry, BooleanValue> entry = c.entry();
assertNotNull(entry);
- assertEquals(false, entry.value().get().getValue());
+ assertFalse(entry.value().get().getValue());
}
mapChecks();
}
@@ -2003,15 +1988,16 @@ public void testFloatValueFloatValueMap() throws IOException {
assertEquals(1, ((VanillaChronicleMap, ?, ?>) map).maxChunksPerEntry);
FloatValue key1 = Values.newHeapInstance(FloatValue.class);
- FloatValue key2 = Values.newHeapInstance(FloatValue.class);
FloatValue value1 = Values.newHeapInstance(FloatValue.class);
- FloatValue value2 = Values.newHeapInstance(FloatValue.class);
key1.setValue(1);
value1.setValue(11);
map.put(key1, value1);
assertEquals(value1, map.get(key1));
+ FloatValue key2 = Values.newHeapInstance(FloatValue.class);
+ FloatValue value2 = Values.newHeapInstance(FloatValue.class);
+
key2.setValue(2);
value2.setValue(22);
map.put(key2, value2);
@@ -2023,10 +2009,6 @@ public void testFloatValueFloatValueMap() throws IOException {
assertEquals(11, entry.value().get().getValue(), 0);
}
// TODO see above
-// try (ReadContext rc = map.getUsingLocked(key2, value1)) {
-// assertTrue(rc.present());
-// assertEquals(22, value2.getValue(), 0);
-// }
try (ExternalMapQueryContext, FloatValue, ?> c = map.queryContext(key2)) {
MapEntry, FloatValue> entry = c.entry();
assertNotNull(entry);
@@ -2108,18 +2090,19 @@ public void testDoubleValueDoubleValueMap() throws IOException {
assertEquals(1, ((VanillaChronicleMap, ?, ?>) map).maxChunksPerEntry);
DoubleValue key1 = Values.newHeapInstance(DoubleValue.class);
- DoubleValue key2 = Values.newHeapInstance(DoubleValue.class);
DoubleValue value1 = Values.newHeapInstance(DoubleValue.class);
- DoubleValue value2 = Values.newHeapInstance(DoubleValue.class);
key1.setValue(1);
value1.setValue(11);
- assertEquals(null, map.get(key1));
+ assertNull(map.get(key1));
map.put(key1, value1);
DoubleValue v2 = map.get(key1);
assertEquals(value1, v2);
+ DoubleValue key2 = Values.newHeapInstance(DoubleValue.class);
+ DoubleValue value2 = Values.newHeapInstance(DoubleValue.class);
+
key2.setValue(2);
value2.setValue(22);
map.put(key2, value2);
@@ -2130,10 +2113,6 @@ public void testDoubleValueDoubleValueMap() throws IOException {
assertNotNull(entry);
assertEquals(11, entry.value().get().getValue(), 0);
}
-// try (ReadContext rc = map.getUsingLocked(key2, value1)) {
-// assertTrue(rc.present());
-// assertEquals(22, value2.getValue(), 0);
-// }
try (ExternalMapQueryContext, DoubleValue, ?> c = map.queryContext(key2)) {
MapEntry, DoubleValue> entry = c.entry();
assertNotNull(entry);
@@ -2214,15 +2193,16 @@ public void testLongValueLongValueMap() throws IOException {
assertEquals(1, ((VanillaChronicleMap, ?, ?>) map).maxChunksPerEntry);
LongValue key1 = Values.newHeapInstance(LongValue.class);
- LongValue key2 = Values.newHeapInstance(LongValue.class);
LongValue value1 = Values.newHeapInstance(LongValue.class);
- LongValue value2 = Values.newHeapInstance(LongValue.class);
key1.setValue(1);
value1.setValue(11);
- assertEquals(null, map.get(key1));
+ assertNull(map.get(key1));
map.put(key1, value1);
+ LongValue key2 = Values.newHeapInstance(LongValue.class);
+ LongValue value2 = Values.newHeapInstance(LongValue.class);
+
key2.setValue(2);
value2.setValue(22);
map.put(key2, value2);
@@ -2234,10 +2214,6 @@ public void testLongValueLongValueMap() throws IOException {
assertEquals(11, entry.value().get().getValue());
}
// TODO see above
-// try (ReadContext rc = map.getUsingLocked(key2, value1)) {
-// assertTrue(rc.present());
-// assertEquals(22, value2.getValue());
-// }
try (ExternalMapQueryContext, LongValue, ?> c = map.queryContext(key2)) {
MapEntry, LongValue> entry = c.entry();
assertNotNull(entry);
@@ -2312,18 +2288,18 @@ public void testListValue() throws IOException {
try (ChronicleMap> map = newInstance(builder)) {
map.put("1", Collections.emptyList());
- map.put("2", asList("two-A"));
+ map.put("2", Collections.singletonList("two-A"));
List list1 = new ArrayList<>();
try (net.openhft.chronicle.core.io.Closeable c = map.acquireContext("1", list1)) {
list1.add("one");
- assertEquals(asList("one"), list1);
+ assertEquals(Collections.singletonList("one"), list1);
}
List list2 = new ArrayList<>();
try (ExternalMapQueryContext, ?> c = map.queryContext("1")) {
MapEntry> entry = c.entry();
assertNotNull(entry);
- assertEquals(asList("one"), entry.value().getUsing(list2));
+ assertEquals(Collections.singletonList("one"), entry.value().getUsing(list2));
}
try (ExternalMapQueryContext, ?> c = map.queryContext("2")) {
@@ -2358,18 +2334,18 @@ public void testSetValue() throws IOException {
try (ChronicleMap> map = newInstance(builder)) {
map.put("1", Collections.emptySet());
- map.put("2", new LinkedHashSet<>(asList("one")));
+ map.put("2", new LinkedHashSet<>(Collections.singletonList("one")));
Set list1 = new LinkedHashSet<>();
try (net.openhft.chronicle.core.io.Closeable c = map.acquireContext("1", list1)) {
list1.add("two");
- assertEquals(new LinkedHashSet<>(asList("two")), list1);
+ assertEquals(new LinkedHashSet<>(Collections.singletonList("two")), list1);
}
Set list2 = new LinkedHashSet<>();
try (ExternalMapQueryContext, ?> c = map.queryContext("1")) {
MapEntry> entry = c.entry();
assertNotNull(entry);
- assertEquals(new LinkedHashSet<>(asList("two")), entry.value().getUsing(list2));
+ assertEquals(new LinkedHashSet<>(Collections.singletonList("two")), entry.value().getUsing(list2));
}
try (net.openhft.chronicle.core.io.Closeable c =
map.acquireContext("2", list1)) {
@@ -2656,7 +2632,7 @@ public String toString() {
private static class StringPrefixUnaryOperator
implements BiFunction, Serializable {
- private String prefix;
+ private final String prefix;
StringPrefixUnaryOperator(final String prefix1) {
prefix = prefix1;
diff --git a/src/test/java/net/openhft/chronicle/map/ChronicleMapEqualsTest.java b/src/test/java/net/openhft/chronicle/map/ChronicleMapEqualsTest.java
index 76efdd684..34b7af090 100644
--- a/src/test/java/net/openhft/chronicle/map/ChronicleMapEqualsTest.java
+++ b/src/test/java/net/openhft/chronicle/map/ChronicleMapEqualsTest.java
@@ -7,7 +7,7 @@
import java.util.HashMap;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
public class ChronicleMapEqualsTest {
@@ -22,6 +22,6 @@ public void test() {
HashMap refMap = new HashMap<>();
refMap.put("a", "b");
map.putAll(refMap);
- assertTrue(map.equals(refMap));
+ assertEquals(map, refMap);
}
}
diff --git a/src/test/java/net/openhft/chronicle/map/ChronicleMapImportExportTest.java b/src/test/java/net/openhft/chronicle/map/ChronicleMapImportExportTest.java
index 469139431..13580427d 100644
--- a/src/test/java/net/openhft/chronicle/map/ChronicleMapImportExportTest.java
+++ b/src/test/java/net/openhft/chronicle/map/ChronicleMapImportExportTest.java
@@ -15,8 +15,8 @@
import org.junit.Test;
import java.io.File;
-import java.io.FileOutputStream;
import java.io.IOException;
+import java.nio.file.Files;
import java.util.HashMap;
import java.util.Map;
@@ -31,7 +31,7 @@ public class ChronicleMapImportExportTest {
public static final String TMP = OS.getTarget();
@Test
- public void test() throws IOException, InterruptedException {
+ public void test() throws IOException {
File file = new File(TMP + "/chronicle-map-" + Time.uniqueId() + ".json");
file.deleteOnExit();
@@ -56,7 +56,7 @@ public void test() throws IOException, InterruptedException {
}
@Test
- public void testWithMapValue() throws IOException, InterruptedException {
+ public void testWithMapValue() throws IOException {
File file = new File(TMP + "/chronicle-map-" + Time.uniqueId() + ".json");
file.deleteOnExit();
@@ -85,7 +85,7 @@ public void testWithMapValue() throws IOException, InterruptedException {
}
@Test
- public void testWithMapOfMapValue() throws IOException, InterruptedException {
+ public void testWithMapOfMapValue() throws IOException {
File file = new File(TMP + "/chronicle-map-" + Time.uniqueId() + ".json");
file.deleteOnExit();
@@ -115,7 +115,7 @@ public void testWithMapOfMapValue() throws IOException, InterruptedException {
}
@Test
- public void testWithIntegerAndDouble() throws IOException, InterruptedException {
+ public void testWithIntegerAndDouble() throws IOException {
File file = new File(TMP + "/chronicle-map-" + Time.uniqueId() + ".json");
file.deleteOnExit();
@@ -138,7 +138,7 @@ public void testWithIntegerAndDouble() throws IOException, InterruptedException
}
@Test
- public void testWithCharSeq() throws IOException, InterruptedException {
+ public void testWithCharSeq() throws IOException {
File file = new File(TMP + "/chronicle-map-" + Time.uniqueId() + ".json");
file.deleteOnExit();
@@ -166,7 +166,7 @@ public void testWithCharSeq() throws IOException, InterruptedException {
}
@Test
- public void testFromHashMap() throws IOException, InterruptedException {
+ public void testFromHashMap() throws IOException {
File file = new File(TMP + "/chronicle-map-" + Time.uniqueId() + ".json");
System.out.println(file.getCanonicalFile());
@@ -181,7 +181,7 @@ public void testFromHashMap() throws IOException, InterruptedException {
final XStream xstream = new XStream(new JettisonMappedXmlDriver());
xstream.setMode(XStream.NO_REFERENCES);
- xstream.toXML(map, new FileOutputStream(file));
+ xstream.toXML(map, Files.newOutputStream(file.toPath()));
try (ChronicleMap expected = ChronicleMapBuilder
.of(Integer.class, String.class)
@@ -204,7 +204,7 @@ public void testFromHashMap() throws IOException, InterruptedException {
}
@Test
- public void testWithLongValue() throws IOException, InterruptedException {
+ public void testWithLongValue() throws IOException {
File file = new File(TMP + "/chronicle-map-" + Time.uniqueId() + ".json");
//file.deleteOnExit();
@@ -233,12 +233,12 @@ public void testWithLongValue() throws IOException, InterruptedException {
Assert.assertEquals(expected, actual);
}
} finally {
- // file.delete();
+ file.delete();
}
}
@Test
- public void testBondVOInterface() throws IOException, InterruptedException {
+ public void testBondVOInterface() throws IOException {
File file = new File(TMP + "/chronicle-map-" + Time.uniqueId() + ".json");
file.deleteOnExit();
diff --git a/src/test/java/net/openhft/chronicle/map/ChronicleMapSanityCheckTest.java b/src/test/java/net/openhft/chronicle/map/ChronicleMapSanityCheckTest.java
index d87bd739c..6beed2bbd 100644
--- a/src/test/java/net/openhft/chronicle/map/ChronicleMapSanityCheckTest.java
+++ b/src/test/java/net/openhft/chronicle/map/ChronicleMapSanityCheckTest.java
@@ -24,7 +24,7 @@
public class ChronicleMapSanityCheckTest {
@Test
- public void testSanity1() throws IOException, InterruptedException {
+ public void testSanity1() throws IOException {
String tmp = OS.getTarget();
@@ -33,7 +33,7 @@ public void testSanity1() throws IOException, InterruptedException {
File file = new File(pathname);
System.out.println("Starting sanity test 1. Chronicle file :" +
- file.getAbsolutePath().toString());
+ file.getAbsolutePath());
ScheduledExecutorService producerExecutor =
Executors.newScheduledThreadPool(Runtime.getRuntime().availableProcessors() - 1,
@@ -43,7 +43,7 @@ public void testSanity1() throws IOException, InterruptedException {
Executors.newSingleThreadScheduledExecutor(
new NamedThreadFactory("consumer"));
- int N = 1000;
+ int iterations = 1000;
int producerPeriod = 100;
TimeUnit producerTimeUnit = TimeUnit.MILLISECONDS;
@@ -55,8 +55,8 @@ public void testSanity1() throws IOException, InterruptedException {
try (ChronicleMap map =
ChronicleMapBuilder.of(String.class, DummyValue.class)
- .averageKey("" + N).averageValue(DummyValue.DUMMY_VALUE)
- .entries(N)
+ .averageKey("" + iterations).averageValue(DummyValue.DUMMY_VALUE)
+ .entries(iterations)
.createPersistedTo(file)) {
map.clear();
@@ -67,7 +67,7 @@ public void testSanity1() throws IOException, InterruptedException {
Random r = new Random();
System.out.println("Before PRODUCING size is " + map.size());
- for (int i = 0; i < N; i++) {
+ for (int i = 0; i < iterations; i++) {
LockSupport.parkNanos(r.nextInt(5));
map.put(String.valueOf(i), DummyValue.DUMMY_VALUE);
}
diff --git a/src/test/java/net/openhft/chronicle/map/ChronicleMapTest.java b/src/test/java/net/openhft/chronicle/map/ChronicleMapTest.java
index 5495fd887..50f81fc33 100644
--- a/src/test/java/net/openhft/chronicle/map/ChronicleMapTest.java
+++ b/src/test/java/net/openhft/chronicle/map/ChronicleMapTest.java
@@ -27,6 +27,7 @@
import static java.util.stream.Collectors.toSet;
import static org.junit.Assert.*;
+
@org.junit.Ignore("flaky test see - https://teamcity.chronicle.software/repository/download/OpenHFT_ReleaseJob_ReleaseByArtifact/643179:id/ReleaseAutomation/projects/chronicle-map-runTests-1642011539698.log")
@SuppressWarnings({"rawtypes", "unchecked", "ResultOfMethodCallIgnored", "try"})
public class ChronicleMapTest {
@@ -96,8 +97,8 @@ public static IntValue nativeIntValue() {
}
static File getPersistenceFile() {
- String TMP = OS.getTarget();
- File file = new File(TMP + "/chm-test" + Time.uniqueId() + count++);
+ String tmpDir = OS.getTarget();
+ File file = new File(tmpDir + "/chm-test" + Time.uniqueId() + count++);
file.deleteOnExit();
return file;
}
@@ -116,7 +117,7 @@ private static void printStatus() {
}
}
- private ChronicleMap getViewTestMap(int noOfElements) throws IOException {
+ private ChronicleMap getViewTestMap(int noOfElements) {
ChronicleMap map =
ChronicleMapBuilder.of(Integer.class, CharSequence.class)
.entries(noOfElements * 2 + 100)
@@ -165,7 +166,7 @@ public void testRemoveWithKey() {
assertEquals("one", result.toString());
assertFalse(map.containsKey("key1"));
- assertEquals(null, map.get("key1"));
+ assertNull(map.get("key1"));
assertEquals("two", map.get("key2").toString());
assertFalse(map.containsKey("key3"));
@@ -293,7 +294,7 @@ public void testSize() {
}
@Test
- public void testRemoveInteger() throws IOException {
+ public void testRemoveInteger() {
int count = 300;
try (final ChronicleMap