test_stress_crdts_exhaustive.py
python
| 1 | """Exhaustive CRDT stress tests — lattice laws, concurrent writes, adversarial inputs. |
| 2 | |
| 3 | For each CRDT primitive, verifies: |
| 4 | 1. Commutativity: join(a, b) == join(b, a) |
| 5 | 2. Associativity: join(join(a, b), c) == join(a, join(b, c)) |
| 6 | 3. Idempotency: join(a, a) == a |
| 7 | 4. Monotonicity: join never loses information (|join(a,b)| >= |a|) |
| 8 | 5. Serialisation: from_dict(to_dict(x)) == x (round-trip) |
| 9 | |
| 10 | Additional adversarial cases: |
| 11 | - 100-agent GCounter: concurrent increments from all agents. |
| 12 | - VectorClock causal ordering under partition / reconnect. |
| 13 | - ORSet: add → remove semantics, concurrent add wins over remove. |
| 14 | - RGA: concurrent inserts from N agents produce a deterministic total order. |
| 15 | - AWMap: nested concurrent updates converge. |
| 16 | - LWWRegister: last writer wins with tie-broken by agent_id. |
| 17 | """ |
| 18 | |
| 19 | import datetime |
| 20 | import itertools |
| 21 | |
| 22 | import pytest |
| 23 | |
| 24 | from muse.core.crdts import AWMap, GCounter, LWWRegister, ORSet, RGA, VectorClock |
| 25 | from muse.core.crdts.lww_register import LWWValue |
| 26 | from muse.core.crdts.or_set import ORSetDict |
| 27 | from muse.core.crdts.rga import RGAElement |
| 28 | |
| 29 | |
| 30 | # =========================================================================== |
| 31 | # VectorClock — exhaustive |
| 32 | # =========================================================================== |
| 33 | |
| 34 | |
| 35 | class TestVectorClockExhaustive: |
| 36 | # --- lattice laws --- |
| 37 | |
| 38 | def _clock(self, **kw: int) -> VectorClock: |
| 39 | return VectorClock(kw) |
| 40 | |
| 41 | def test_commutativity_many_agents(self) -> None: |
| 42 | for n in range(1, 11): |
| 43 | a = VectorClock({f"agent-{i}": i for i in range(n)}) |
| 44 | b = VectorClock({f"agent-{i}": n - i for i in range(n)}) |
| 45 | assert a.merge(b).equivalent(b.merge(a)) |
| 46 | |
| 47 | def test_associativity_three_replicas(self) -> None: |
| 48 | a = self._clock(**{"a": 3, "b": 1}) |
| 49 | b = self._clock(**{"a": 1, "b": 5, "c": 2}) |
| 50 | c = self._clock(**{"b": 3, "c": 4, "d": 1}) |
| 51 | left = a.merge(b).merge(c) |
| 52 | right = a.merge(b.merge(c)) |
| 53 | assert left.equivalent(right) |
| 54 | |
| 55 | def test_idempotency(self) -> None: |
| 56 | for n in [1, 5, 20]: |
| 57 | clk = VectorClock({f"a{i}": i * 3 for i in range(n)}) |
| 58 | assert clk.merge(clk).equivalent(clk) |
| 59 | |
| 60 | def test_merge_is_monotone(self) -> None: |
| 61 | a = VectorClock({"x": 1, "y": 2}) |
| 62 | b = VectorClock({"x": 3, "z": 1}) |
| 63 | merged = a.merge(b) |
| 64 | # merged must dominate both a and b (or be equal to them in each slot). |
| 65 | for agent in ["x", "y", "z"]: |
| 66 | assert merged.to_dict().get(agent, 0) >= a.to_dict().get(agent, 0) |
| 67 | assert merged.to_dict().get(agent, 0) >= b.to_dict().get(agent, 0) |
| 68 | |
| 69 | # --- causal ordering --- |
| 70 | |
| 71 | def test_happens_before_empty_clocks(self) -> None: |
| 72 | empty = VectorClock() |
| 73 | assert not empty.happens_before(empty) |
| 74 | |
| 75 | def test_happens_before_empty_before_non_empty(self) -> None: |
| 76 | empty = VectorClock() |
| 77 | non_empty = VectorClock({"a": 1}) |
| 78 | assert empty.happens_before(non_empty) |
| 79 | assert not non_empty.happens_before(empty) |
| 80 | |
| 81 | def test_transitivity_of_causal_order(self) -> None: |
| 82 | a = VectorClock({"a": 1}) |
| 83 | b = VectorClock({"a": 2, "b": 1}) |
| 84 | c = VectorClock({"a": 3, "b": 2, "c": 1}) |
| 85 | assert a.happens_before(b) |
| 86 | assert b.happens_before(c) |
| 87 | assert a.happens_before(c) |
| 88 | |
| 89 | def test_concurrent_detection_both_ways(self) -> None: |
| 90 | x = VectorClock({"agent-X": 5, "agent-Y": 1}) |
| 91 | y = VectorClock({"agent-X": 1, "agent-Y": 5}) |
| 92 | assert x.concurrent_with(y) |
| 93 | assert y.concurrent_with(x) |
| 94 | |
| 95 | def test_not_concurrent_with_itself(self) -> None: |
| 96 | clk = VectorClock({"a": 3, "b": 2}) |
| 97 | assert not clk.concurrent_with(clk) |
| 98 | |
| 99 | def test_not_concurrent_with_ancestor(self) -> None: |
| 100 | ancestor = VectorClock({"a": 1}) |
| 101 | descendant = VectorClock({"a": 2}) |
| 102 | assert not ancestor.concurrent_with(descendant) |
| 103 | |
| 104 | def test_partition_and_reconnect_converges(self) -> None: |
| 105 | """Simulate network partition: A and B increment independently, then merge.""" |
| 106 | shared = VectorClock({"base": 5}) |
| 107 | # Partition: A and B both do 10 events independently. |
| 108 | a_partition = shared |
| 109 | b_partition = shared |
| 110 | for i in range(10): |
| 111 | a_partition = a_partition.increment("agent-A") |
| 112 | b_partition = b_partition.increment("agent-B") |
| 113 | # Reconnect. |
| 114 | merged = a_partition.merge(b_partition) |
| 115 | assert merged.to_dict()["base"] == 5 |
| 116 | assert merged.to_dict()["agent-A"] == 10 |
| 117 | assert merged.to_dict()["agent-B"] == 10 |
| 118 | |
| 119 | def test_round_trip(self) -> None: |
| 120 | clk = VectorClock({"x": 7, "y": 3, "z": 0}) |
| 121 | restored = VectorClock.from_dict(clk.to_dict()) |
| 122 | assert restored.equivalent(clk) |
| 123 | |
| 124 | |
| 125 | # =========================================================================== |
| 126 | # GCounter — exhaustive |
| 127 | # =========================================================================== |
| 128 | |
| 129 | |
| 130 | class TestGCounterExhaustive: |
| 131 | def test_commutativity(self) -> None: |
| 132 | a = GCounter({"a1": 5, "a2": 3}) |
| 133 | b = GCounter({"a1": 2, "a3": 7}) |
| 134 | assert a.join(b).equivalent(b.join(a)) |
| 135 | |
| 136 | def test_associativity(self) -> None: |
| 137 | a = GCounter({"x": 1}) |
| 138 | b = GCounter({"y": 2}) |
| 139 | c = GCounter({"z": 3, "x": 5}) |
| 140 | left = a.join(b).join(c) |
| 141 | right = a.join(b.join(c)) |
| 142 | assert left.equivalent(right) |
| 143 | |
| 144 | def test_idempotency(self) -> None: |
| 145 | g = GCounter({"a": 5, "b": 3, "c": 1}) |
| 146 | assert g.join(g).equivalent(g) |
| 147 | |
| 148 | def test_100_agents_concurrent_increment(self) -> None: |
| 149 | """100 agents each increment once concurrently; merged total must equal 100.""" |
| 150 | counters = [GCounter().increment(f"agent-{i}") for i in range(100)] |
| 151 | merged = counters[0] |
| 152 | for c in counters[1:]: |
| 153 | merged = merged.join(c) |
| 154 | assert merged.value() == 100 |
| 155 | |
| 156 | def test_increment_by_multiple(self) -> None: |
| 157 | g = GCounter().increment("agent", by=10) |
| 158 | assert g.value_for("agent") == 10 |
| 159 | g2 = g.increment("agent", by=5) |
| 160 | assert g2.value_for("agent") == 15 |
| 161 | |
| 162 | def test_join_takes_max_per_slot(self) -> None: |
| 163 | a = GCounter({"x": 3, "y": 1}) |
| 164 | b = GCounter({"x": 1, "y": 5}) |
| 165 | merged = a.join(b) |
| 166 | assert merged.value_for("x") == 3 |
| 167 | assert merged.value_for("y") == 5 |
| 168 | |
| 169 | def test_monotone_join(self) -> None: |
| 170 | a = GCounter({"a": 10}) |
| 171 | b = GCounter({"a": 20}) |
| 172 | merged = a.join(b) |
| 173 | assert merged.value() >= a.value() |
| 174 | assert merged.value() >= b.value() |
| 175 | |
| 176 | def test_round_trip(self) -> None: |
| 177 | g = GCounter({"x": 7, "y": 3}) |
| 178 | restored = GCounter.from_dict(g.to_dict()) |
| 179 | assert restored.equivalent(g) |
| 180 | |
| 181 | def test_empty_counter_join(self) -> None: |
| 182 | empty = GCounter() |
| 183 | g = GCounter({"a": 5}) |
| 184 | assert empty.join(g).equivalent(g) |
| 185 | assert g.join(empty).equivalent(g) |
| 186 | |
| 187 | def test_value_is_sum_of_slots(self) -> None: |
| 188 | g = GCounter({"a": 3, "b": 4, "c": 5}) |
| 189 | assert g.value() == 12 |
| 190 | |
| 191 | def test_stress_many_increments_single_agent(self) -> None: |
| 192 | g = GCounter() |
| 193 | for i in range(1000): |
| 194 | g = g.increment("solo") |
| 195 | assert g.value() == 1000 |
| 196 | |
| 197 | |
| 198 | # =========================================================================== |
| 199 | # ORSet — exhaustive |
| 200 | # =========================================================================== |
| 201 | |
| 202 | |
| 203 | class TestORSetExhaustive: |
| 204 | def test_commutativity(self) -> None: |
| 205 | s1 = ORSet() |
| 206 | s1, t1 = s1.add("apple") |
| 207 | s2 = ORSet() |
| 208 | s2, t2 = s2.add("banana") |
| 209 | assert s1.join(s2).elements() == s2.join(s1).elements() |
| 210 | |
| 211 | def test_associativity(self) -> None: |
| 212 | s1, _ = ORSet().add("a") |
| 213 | s2, _ = ORSet().add("b") |
| 214 | s3, _ = ORSet().add("c") |
| 215 | left = s1.join(s2).join(s3) |
| 216 | right = s1.join(s2.join(s3)) |
| 217 | assert left.elements() == right.elements() |
| 218 | |
| 219 | def test_idempotency(self) -> None: |
| 220 | s, _ = ORSet().add("x") |
| 221 | s2, _ = s.add("y") |
| 222 | assert s2.join(s2).elements() == s2.elements() |
| 223 | |
| 224 | def test_add_then_remove(self) -> None: |
| 225 | s, tok = ORSet().add("element") |
| 226 | s2 = s.remove("element", tok) |
| 227 | assert "element" not in s2.elements() |
| 228 | |
| 229 | def test_concurrent_add_wins_over_remove(self) -> None: |
| 230 | """ORSet semantics: concurrent add always beats remove.""" |
| 231 | # Replica A: add "item" with token t1. |
| 232 | a, t1 = ORSet().add("item") |
| 233 | # Replica B: independently add "item" with token t2 and then remove t1. |
| 234 | b, t2 = ORSet().add("item") |
| 235 | b_removed = b.remove("item", t1) |
| 236 | # After join, "item" survives because t2 is still alive in b's replica. |
| 237 | merged = a.join(b_removed) |
| 238 | assert "item" in merged.elements() |
| 239 | |
| 240 | def test_remove_nonexistent_is_noop(self) -> None: |
| 241 | s = ORSet() |
| 242 | # Removing something that was never added should not crash. |
| 243 | s2 = s.remove("ghost", "fake-token") |
| 244 | assert "ghost" not in s2.elements() |
| 245 | |
| 246 | def test_100_elements_all_present(self) -> None: |
| 247 | s = ORSet() |
| 248 | for i in range(100): |
| 249 | s, _ = s.add(f"item-{i}") |
| 250 | elems = s.elements() |
| 251 | for i in range(100): |
| 252 | assert f"item-{i}" in elems |
| 253 | |
| 254 | def test_round_trip_serialisation(self) -> None: |
| 255 | s = ORSet() |
| 256 | tokens: list[str] = [] |
| 257 | for label in ["alpha", "beta", "gamma"]: |
| 258 | s, tok = s.add(label) |
| 259 | tokens.append(tok) |
| 260 | d: ORSetDict = s.to_dict() |
| 261 | restored = ORSet.from_dict(d) |
| 262 | assert restored.elements() == s.elements() |
| 263 | |
| 264 | def test_join_preserves_all_adds(self) -> None: |
| 265 | s1, _ = ORSet().add("from-s1") |
| 266 | s2, _ = ORSet().add("from-s2") |
| 267 | merged = s1.join(s2) |
| 268 | assert "from-s1" in merged.elements() |
| 269 | assert "from-s2" in merged.elements() |
| 270 | |
| 271 | def test_many_add_remove_cycles(self) -> None: |
| 272 | """Rapidly add and remove the same element; final state must be empty.""" |
| 273 | s = ORSet() |
| 274 | tok = "" |
| 275 | for _ in range(10): |
| 276 | s, tok = s.add("target") |
| 277 | s = s.remove("target", tok) |
| 278 | assert "target" not in s.elements() |
| 279 | |
| 280 | |
| 281 | # =========================================================================== |
| 282 | # LWWRegister — exhaustive |
| 283 | # =========================================================================== |
| 284 | |
| 285 | |
| 286 | class TestLWWRegisterExhaustive: |
| 287 | def test_later_timestamp_wins(self) -> None: |
| 288 | r1 = LWWRegister("old", 1.0, "agent-A") |
| 289 | r2 = LWWRegister("new", 2.0, "agent-A") |
| 290 | merged = r1.join(r2) |
| 291 | assert merged.read() == "new" |
| 292 | |
| 293 | def test_earlier_timestamp_does_not_overwrite(self) -> None: |
| 294 | current = LWWRegister("current", 5.0, "agent-A") |
| 295 | stale = LWWRegister("stale", 3.0, "agent-A") |
| 296 | merged = current.join(stale) |
| 297 | assert merged.read() == "current" |
| 298 | |
| 299 | def test_concurrent_write_tie_broken_by_agent_id(self) -> None: |
| 300 | """When timestamps are equal, lexicographically larger author wins.""" |
| 301 | ts = 10.0 |
| 302 | r1 = LWWRegister("value-Z", ts, "agent-Z") |
| 303 | r2 = LWWRegister("value-A", ts, "agent-A") |
| 304 | merged = r1.join(r2) |
| 305 | assert merged.read() == "value-Z" # "agent-Z" > "agent-A" |
| 306 | |
| 307 | def test_join_commutativity(self) -> None: |
| 308 | ts = 1.0 |
| 309 | r1 = LWWRegister("alpha", ts, "agent-A") |
| 310 | r2 = LWWRegister("beta", ts, "agent-B") |
| 311 | m1 = r1.join(r2) |
| 312 | m2 = r2.join(r1) |
| 313 | assert m1.read() == m2.read() |
| 314 | |
| 315 | def test_idempotency(self) -> None: |
| 316 | r = LWWRegister("val", 1.0, "agent-A") |
| 317 | assert r.join(r).read() == r.read() |
| 318 | |
| 319 | def test_write_method_updates_register(self) -> None: |
| 320 | r = LWWRegister("old", 1.0, "agent-A") |
| 321 | r2 = r.write("new", 2.0, "agent-A") |
| 322 | assert r2.read() == "new" |
| 323 | |
| 324 | def test_round_trip(self) -> None: |
| 325 | r = LWWRegister("test-value", 42.0, "agent-X") |
| 326 | d = r.to_dict() |
| 327 | restored = LWWRegister.from_dict(d) |
| 328 | assert restored.read() == "test-value" |
| 329 | |
| 330 | def test_100_concurrent_agents_settle_deterministically(self) -> None: |
| 331 | """100 agents all write at the same timestamp; result must be deterministic.""" |
| 332 | ts = 0.0 |
| 333 | registers = [LWWRegister(f"val-{i:03d}", ts, f"agent-{i:03d}") for i in range(100)] |
| 334 | merged = registers[0] |
| 335 | for r in registers[1:]: |
| 336 | merged = merged.join(r) |
| 337 | winner = merged.read() |
| 338 | assert winner is not None |
| 339 | # Shuffle and merge again — result must be identical. |
| 340 | import random |
| 341 | shuffled = list(registers) |
| 342 | random.shuffle(shuffled) |
| 343 | merged2 = shuffled[0] |
| 344 | for r in shuffled[1:]: |
| 345 | merged2 = merged2.join(r) |
| 346 | assert merged2.read() == winner |
| 347 | |
| 348 | def test_higher_timestamp_beats_larger_author(self) -> None: |
| 349 | """Even if agent-A would lose tiebreaker, newer timestamp wins.""" |
| 350 | r_early_z = LWWRegister("early-z", 1.0, "agent-Z") |
| 351 | r_late_a = LWWRegister("late-a", 2.0, "agent-A") |
| 352 | assert r_early_z.join(r_late_a).read() == "late-a" |
| 353 | |
| 354 | |
| 355 | # =========================================================================== |
| 356 | # RGA — exhaustive |
| 357 | # =========================================================================== |
| 358 | |
| 359 | |
| 360 | class TestRGAExhaustive: |
| 361 | def _eid(self, label: str) -> str: |
| 362 | """Generate a deterministic element_id.""" |
| 363 | return f"1.0@{label}" |
| 364 | |
| 365 | def test_single_insert(self) -> None: |
| 366 | rga = RGA() |
| 367 | rga2 = rga.insert(None, "hello", element_id="1.0@agent-A") |
| 368 | assert rga2.to_sequence() == ["hello"] |
| 369 | |
| 370 | def test_insert_order_preserved(self) -> None: |
| 371 | rga = RGA() |
| 372 | rga = rga.insert(None, "a", element_id="1.0@a") |
| 373 | rga = rga.insert("1.0@a", "b", element_id="2.0@b") |
| 374 | rga = rga.insert("2.0@b", "c", element_id="3.0@c") |
| 375 | assert rga.to_sequence() == ["a", "b", "c"] |
| 376 | |
| 377 | def test_delete_removes_element(self) -> None: |
| 378 | rga = RGA() |
| 379 | rga = rga.insert(None, "x", element_id="1.0@a") |
| 380 | rga = rga.delete("1.0@a") |
| 381 | assert "x" not in rga.to_sequence() |
| 382 | |
| 383 | def test_commutativity(self) -> None: |
| 384 | rga1 = RGA() |
| 385 | rga2 = RGA() |
| 386 | rga1 = rga1.insert(None, "A", element_id="1.0@agent-1") |
| 387 | rga2 = rga2.insert(None, "B", element_id="1.0@agent-2") |
| 388 | m1 = rga1.join(rga2) |
| 389 | m2 = rga2.join(rga1) |
| 390 | assert m1.to_sequence() == m2.to_sequence() |
| 391 | |
| 392 | def test_idempotency(self) -> None: |
| 393 | rga = RGA() |
| 394 | rga = rga.insert(None, "elem", element_id="1.0@agent") |
| 395 | assert rga.join(rga).to_sequence() == rga.to_sequence() |
| 396 | |
| 397 | def test_concurrent_inserts_deterministic_ordering(self) -> None: |
| 398 | """5 agents insert concurrently; joined result is always the same total order.""" |
| 399 | agents = [f"agent-{i}" for i in range(5)] |
| 400 | replicas: list[RGA] = [] |
| 401 | for a in agents: |
| 402 | r = RGA() |
| 403 | r = r.insert(None, a, element_id=f"1.0@{a}") |
| 404 | replicas.append(r) |
| 405 | |
| 406 | merged = replicas[0] |
| 407 | for r in replicas[1:]: |
| 408 | merged = merged.join(r) |
| 409 | |
| 410 | result = merged.to_sequence() |
| 411 | assert sorted(result) == sorted(agents) |
| 412 | |
| 413 | # Merge in reverse order — must give the same deterministic ordering. |
| 414 | merged2 = replicas[-1] |
| 415 | for r in reversed(replicas[:-1]): |
| 416 | merged2 = merged2.join(r) |
| 417 | assert merged2.to_sequence() == result |
| 418 | |
| 419 | def test_tombstone_not_revived_after_join(self) -> None: |
| 420 | rga1 = RGA() |
| 421 | rga1 = rga1.insert(None, "deleted", element_id="1.0@a") |
| 422 | rga1 = rga1.delete("1.0@a") |
| 423 | |
| 424 | rga2 = RGA() |
| 425 | rga2 = rga2.insert(None, "other", element_id="1.0@b") |
| 426 | |
| 427 | merged = rga1.join(rga2) |
| 428 | assert "deleted" not in merged.to_sequence() |
| 429 | assert "other" in merged.to_sequence() |
| 430 | |
| 431 | def test_large_sequence_sequential_insert(self) -> None: |
| 432 | rga = RGA() |
| 433 | prev_id: str | None = None |
| 434 | for i in range(50): |
| 435 | eid = f"{i}.0@agent" |
| 436 | rga = rga.insert(prev_id, str(i), element_id=eid) |
| 437 | prev_id = eid |
| 438 | seq = rga.to_sequence() |
| 439 | assert len(seq) == 50 |
| 440 | assert seq[0] == "0" |
| 441 | assert seq[-1] == "49" |
| 442 | |
| 443 | |
| 444 | # =========================================================================== |
| 445 | # AWMap — exhaustive |
| 446 | # =========================================================================== |
| 447 | |
| 448 | |
| 449 | class TestAWMapExhaustive: |
| 450 | def test_set_and_get(self) -> None: |
| 451 | m = AWMap() |
| 452 | m = m.set("key", "value") |
| 453 | assert m.get("key") == "value" |
| 454 | |
| 455 | def test_remove_entry(self) -> None: |
| 456 | m = AWMap() |
| 457 | m = m.set("key", "value") |
| 458 | m = m.remove("key") |
| 459 | assert m.get("key") is None |
| 460 | |
| 461 | def test_concurrent_set_same_key_converges(self) -> None: |
| 462 | """Concurrent writes to the same key; join must be deterministic.""" |
| 463 | m1 = AWMap().set("k", "from-A") |
| 464 | m2 = AWMap().set("k", "from-Z") |
| 465 | merged_ab = m1.join(m2) |
| 466 | merged_ba = m2.join(m1) |
| 467 | # Both orderings must produce the same result (commutativity). |
| 468 | assert merged_ab.get("k") == merged_ba.get("k") |
| 469 | assert merged_ab.get("k") is not None |
| 470 | |
| 471 | def test_commutativity(self) -> None: |
| 472 | m1 = AWMap().set("x", "1") |
| 473 | m2 = AWMap().set("y", "2") |
| 474 | merged_ab = m1.join(m2) |
| 475 | merged_ba = m2.join(m1) |
| 476 | assert merged_ab.get("x") == merged_ba.get("x") |
| 477 | assert merged_ab.get("y") == merged_ba.get("y") |
| 478 | |
| 479 | def test_idempotency(self) -> None: |
| 480 | m = AWMap().set("a", "alpha") |
| 481 | assert m.join(m).get("a") == m.get("a") |
| 482 | |
| 483 | def test_add_wins_over_remove_from_empty(self) -> None: |
| 484 | """Add-wins: if one replica has the key and the other doesn't, add wins.""" |
| 485 | m1 = AWMap().set("key", "added") |
| 486 | m2 = AWMap().remove("key") # remove on empty = no-op |
| 487 | merged = m1.join(m2) |
| 488 | # m2 never had the key, so no token to tombstone → add from m1 wins. |
| 489 | assert merged.get("key") == "added" |
| 490 | |
| 491 | def test_multiple_keys_independent(self) -> None: |
| 492 | m = AWMap() |
| 493 | for i in range(20): |
| 494 | m = m.set(f"key-{i}", f"val-{i}") |
| 495 | for i in range(20): |
| 496 | assert m.get(f"key-{i}") == f"val-{i}" |
| 497 | |
| 498 | def test_remove_absent_key_is_noop(self) -> None: |
| 499 | m = AWMap().remove("ghost") |
| 500 | assert m.get("ghost") is None |
| 501 | |
| 502 | def test_update_replaces_value(self) -> None: |
| 503 | m = AWMap().set("k", "v1").set("k", "v2") |
| 504 | assert m.get("k") == "v2" |
| 505 | |
| 506 | |
| 507 | # =========================================================================== |
| 508 | # Cross-CRDT: vector-clock-guided merge |
| 509 | # =========================================================================== |
| 510 | |
| 511 | |
| 512 | class TestCrossTypeConsistency: |
| 513 | def test_gcounter_and_vclock_agree_on_agent_counts(self) -> None: |
| 514 | """GCounter and VectorClock track the same 'per-agent count' invariant.""" |
| 515 | gc = GCounter() |
| 516 | vc = VectorClock() |
| 517 | agents = ["agent-A", "agent-B", "agent-C"] |
| 518 | for agent in agents: |
| 519 | for _ in range(3): |
| 520 | gc = gc.increment(agent) |
| 521 | vc = vc.increment(agent) |
| 522 | for agent in agents: |
| 523 | assert gc.value_for(agent) == vc.to_dict()[agent] |
| 524 | |
| 525 | def test_lattice_join_never_reduces_information(self) -> None: |
| 526 | """join(a, b) must always contain at least as much information as a.""" |
| 527 | for _ in range(20): |
| 528 | import random |
| 529 | slots = {f"a{i}": random.randint(0, 10) for i in range(5)} |
| 530 | a = GCounter(slots) |
| 531 | extra = {f"a{i}": random.randint(0, 5) for i in range(5)} |
| 532 | b = GCounter(extra) |
| 533 | merged = a.join(b) |
| 534 | for agent in slots: |
| 535 | assert merged.value_for(agent) >= a.value_for(agent) |