Aleph-w 3.0
A C++ Library for Data Structures and Algorithms
Loading...
Searching...
No Matches
ah_arena_exhaustive_test.cc
Go to the documentation of this file.
1
2/*
3 Aleph_w
4
5 Data structures & Algorithms
6 version 2.0.0b
7 https://github.com/lrleon/Aleph-w
8
9 This file is part of Aleph-w library
10
11 Copyright (c) 2002-2026 Leandro Rabindranath Leon
12
13 Permission is hereby granted, free of charge, to any person obtaining a copy
14 of this software and associated documentation files (the "Software"), to deal
15 in the Software without restriction, including without limitation the rights
16 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 copies of the Software, and to permit persons to whom the Software is
18 furnished to do so, subject to the following conditions:
19
20 The above copyright notice and this permission notice shall be included in all
21 copies or substantial portions of the Software.
22
23 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 SOFTWARE.
30*/
31
32
56#include <gtest/gtest.h>
57
58#include <ah-arena.H>
59
60#include <algorithm>
61#include <chrono>
62#include <cstdint>
63#include <limits>
64#include <random>
65#include <stdexcept>
66#include <string>
67#include <type_traits>
68#include <vector>
69
70using namespace Aleph;
71
72// ============================================================================
73// Helper Classes and Utilities
74// ============================================================================
75
76namespace
77{
78// Exception-throwing object for testing exception safety
79class ThrowingObject
80{
81 static bool should_throw_;
82 static int construct_count_;
83 static int destruct_count_;
84
85public:
86 int value;
87
88 explicit ThrowingObject(int v) : value(v)
89 {
90 ++construct_count_;
91 if (should_throw_)
92 throw std::runtime_error("Intentional construction failure");
93 }
94
96 {
97 ++destruct_count_;
98 }
99
100 static void reset_counters()
101 {
102 construct_count_ = 0;
103 destruct_count_ = 0;
104 should_throw_ = false;
105 }
106
107 static void set_throw(bool t) { should_throw_ = t; }
108 static int constructions() { return construct_count_; }
109 static int destructions() { return destruct_count_; }
110};
111
112bool ThrowingObject::should_throw_ = false;
113int ThrowingObject::construct_count_ = 0;
114int ThrowingObject::destruct_count_ = 0;
115
116// Polymorphic base class
117class Base
118{
119public:
120 virtual ~Base() = default;
121 virtual int get_value() const = 0;
122 virtual std::string get_type() const = 0;
123};
124
125// Derived class 1
126class Derived1 : public Base
127{
128 int value_;
129public:
130 explicit Derived1(int v) : value_(v) {}
131 int get_value() const override { return value_; }
132 std::string get_type() const override { return "Derived1"; }
133};
134
135// Derived class 2
136class Derived2 : public Base
137{
138 int value_;
139public:
140 explicit Derived2(int v) : value_(v) {}
141 int get_value() const override { return value_ * 2; }
142 std::string get_type() const override { return "Derived2"; }
143};
144
145// Large object for alignment testing
146struct alignas(64) CacheLineAligned
147{
148 char data[64];
149 int marker = 0xDEADBEEF;
150
151 CacheLineAligned() = default;
152 explicit CacheLineAligned(int m) : marker(m) {}
153};
154
155// Small object for fragmentation testing
156struct TinyObject
157{
158 char byte;
159 explicit TinyObject(char b = 0) : byte(b) {}
160};
161
162// Medium object
163struct MediumObject
164{
165 char data[128];
166 int id;
167 explicit MediumObject(int i = 0) : id(i) { std::fill(data, data + 128, static_cast<char>(i)); }
168};
169
170// Large object
171struct LargeObject
172{
173 char data[4096];
174 int id;
175 explicit LargeObject(int i = 0) : id(i) { std::fill(data, data + 4096, static_cast<char>(i)); }
176};
177
178// RAII wrapper for tracking allocations
179template <typename T>
180class AllocationTracker
181{
182 AhArenaAllocator& arena_;
183 T* ptr_;
184
185public:
186 AllocationTracker(AhArenaAllocator& arena, T* p) : arena_(arena), ptr_(p) {}
187
189 {
190 if (ptr_)
191 deallocate(arena_, ptr_);
192 }
193
194 AllocationTracker(const AllocationTracker&) = delete;
195 AllocationTracker& operator=(const AllocationTracker&) = delete;
196
197 AllocationTracker(AllocationTracker&& other) noexcept
198 : arena_(other.arena_), ptr_(other.ptr_)
199 {
200 other.ptr_ = nullptr;
201 }
202
203 T* get() const { return ptr_; }
204 T* operator->() const { return ptr_; }
205 T& operator*() const { return *ptr_; }
206};
207
208} // anonymous namespace
209
210// ============================================================================
211// Stress Tests - Massive Allocations
212// ============================================================================
213
215{
216 const size_t NUM_ALLOCS = 10000;
217 const size_t ALLOC_SIZE = 8;
218
220
221 std::vector<void*> ptrs;
222 ptrs.reserve(NUM_ALLOCS);
223
224 for (size_t i = 0; i < NUM_ALLOCS; ++i)
225 {
226 void* ptr = arena.alloc(ALLOC_SIZE);
227 ASSERT_NE(ptr, nullptr) << "Allocation " << i << " failed";
228 ptrs.push_back(ptr);
229 }
230
232 EXPECT_TRUE(arena.full());
233
234 // Verify all pointers are distinct
235 std::sort(ptrs.begin(), ptrs.end());
236 auto last = std::unique(ptrs.begin(), ptrs.end());
237 EXPECT_EQ(last, ptrs.end()) << "Found duplicate pointers";
238}
239
241{
242 const size_t ARENA_SIZE = 1024 * 1024; // 1 MB
244
245 std::mt19937 rng(42);
246 std::uniform_int_distribution<size_t> size_dist(1, 1024);
247
248 std::vector<void*> ptrs;
249 size_t total_allocated = 0;
250
251 while (true)
252 {
253 size_t size = size_dist(rng);
254 void* ptr = arena.alloc(size);
255
256 if (ptr == nullptr)
257 break;
258
259 ptrs.push_back(ptr);
261 }
262
263 EXPECT_GT(ptrs.size(), 100u) << "Should have allocated many objects";
265 EXPECT_GE(arena.allocated_size(), total_allocated * 0.9); // Allow some slack
266}
267
269{
270 const size_t NUM_OBJECTS = 5000;
271 AhArenaAllocator arena(NUM_OBJECTS * sizeof(MediumObject));
272
273 std::vector<MediumObject*> objects;
274 objects.reserve(NUM_OBJECTS);
275
276 for (size_t i = 0; i < NUM_OBJECTS; ++i)
277 {
278 MediumObject* obj = allocate<MediumObject>(arena, static_cast<int>(i));
279 ASSERT_NE(obj, nullptr) << "Object " << i << " allocation failed";
280 EXPECT_EQ(obj->id, static_cast<int>(i));
281 objects.push_back(obj);
282 }
283
284 // Verify all objects are correctly initialized
285 for (size_t i = 0; i < objects.size(); ++i)
286 {
287 EXPECT_EQ(objects[i]->id, static_cast<int>(i));
288 EXPECT_EQ(objects[i]->data[0], static_cast<char>(i));
289 }
290
291 // LIFO deallocation
292 for (auto it = objects.rbegin(); it != objects.rend(); ++it)
293 deallocate(arena, *it);
294
295 EXPECT_TRUE(arena.empty());
296}
297
298// ============================================================================
299// Fragmentation and Usage Patterns
300// ============================================================================
301
303{
304 const size_t ARENA_SIZE = 1024 * 100;
306
307 std::vector<void*> small_ptrs;
308 std::vector<void*> large_ptrs;
309
310 // Allocate alternating small and large
311 for (int i = 0; i < 50; ++i)
312 {
313 void* small = arena.alloc(16);
314 void* large = arena.alloc(1024);
315
316 if (small) small_ptrs.push_back(small);
317 if (large) large_ptrs.push_back(large);
318 }
319
320 size_t allocated = arena.allocated_size();
321 EXPECT_GT(allocated, 50u * (16 + 1024) * 0.8); // At least 80% efficiency
322}
323
325{
326 // Allocate largest chunks first, then try to fit small ones
327 const size_t ARENA_SIZE = 10000;
329
330 std::vector<void*> ptrs;
331
332 // Fill with large allocations
333 while (true)
334 {
335 void* ptr = arena.alloc(1000);
336 if (!ptr) break;
337 ptrs.push_back(ptr);
338 }
339
340 size_t before = arena.allocated_size();
341
342 // Try to allocate small (should fail or succeed depending on remaining)
343 void* small = arena.alloc(10);
344 (void)small; // May or may not succeed
345
347}
348
350{
351 const size_t ARENA_SIZE = 50000;
353
354 for (int cycle = 0; cycle < 100; ++cycle)
355 {
356 // Allocate varying amounts
357 std::mt19937 rng(cycle);
358 std::uniform_int_distribution<size_t> size_dist(10, 500);
359
360 size_t total = 0;
361 while (total < ARENA_SIZE * 0.9)
362 {
363 size_t size = size_dist(rng);
364 void* ptr = arena.alloc(size);
365 if (!ptr) break;
366 total += size;
367 }
368
369 EXPECT_GT(arena.allocated_size(), ARENA_SIZE * 0.5);
370
371 arena.reset();
372 EXPECT_TRUE(arena.empty());
373 }
374
375 // Arena should still be usable after 100 cycles
376 void* ptr = arena.alloc(1000);
377 EXPECT_NE(ptr, nullptr);
378}
379
380// ============================================================================
381// Exception Safety Tests
382// ============================================================================
383
385{
386 ThrowingObject::reset_counters();
387 ThrowingObject::set_throw(true);
388
389 AhArenaAllocator arena(4096);
390
391 ThrowingObject* obj = nullptr;
393 obj = allocate<ThrowingObject>(arena, 42),
394 std::runtime_error
395 );
396
397 EXPECT_EQ(obj, nullptr);
398 EXPECT_EQ(ThrowingObject::constructions(), 1); // Constructor was called
399 EXPECT_EQ(ThrowingObject::destructions(), 0); // But object not created
400
401 // Arena should still be usable
402 ThrowingObject::set_throw(false);
403 obj = allocate<ThrowingObject>(arena, 100);
404 EXPECT_NE(obj, nullptr);
405 EXPECT_EQ(obj->value, 100);
406}
407
409{
410 ThrowingObject::reset_counters();
411
412 AhArenaAllocator arena(4096);
413
414 // Allocate some objects successfully
415 std::vector<ThrowingObject*> objs;
416 for (int i = 0; i < 5; ++i)
417 {
418 ThrowingObject* obj = allocate<ThrowingObject>(arena, i);
419 ASSERT_NE(obj, nullptr);
420 objs.push_back(obj);
421 }
422
423 EXPECT_EQ(ThrowingObject::constructions(), 5);
424
425 // Now cause one to throw
426 ThrowingObject::set_throw(true);
428 allocate<ThrowingObject>(arena, 999),
429 std::runtime_error
430 );
431
432 // Previous allocations should still be valid
433 for (size_t i = 0; i < objs.size(); ++i)
434 EXPECT_EQ(objs[i]->value, static_cast<int>(i));
435
436 // Cleanup
437 ThrowingObject::set_throw(false);
438 for (auto it = objs.rbegin(); it != objs.rend(); ++it)
439 deallocate(arena, *it);
440}
441
442// ============================================================================
443// Polymorphic and Complex Objects
444// ============================================================================
445
447{
448 AhArenaAllocator arena(4096);
449
450 std::vector<Base*> objects;
451
452 for (int i = 0; i < 10; ++i)
453 {
454 if (i % 2 == 0)
455 objects.push_back(allocate<Derived1>(arena, i));
456 else
457 objects.push_back(allocate<Derived2>(arena, i));
458 }
459
460 // Verify polymorphic behavior
461 for (size_t i = 0; i < objects.size(); ++i)
462 {
463 if (i % 2 == 0)
464 {
465 EXPECT_EQ(objects[i]->get_type(), "Derived1");
466 EXPECT_EQ(objects[i]->get_value(), static_cast<int>(i));
467 }
468 else
469 {
470 EXPECT_EQ(objects[i]->get_type(), "Derived2");
471 EXPECT_EQ(objects[i]->get_value(), static_cast<int>(i) * 2);
472 }
473 }
474
475 // Cleanup (must call virtual destructors)
476 for (auto it = objects.rbegin(); it != objects.rend(); ++it)
477 {
478 if ((*it)->get_type() == "Derived1")
479 deallocate<Derived1>(arena, static_cast<Derived1*>(*it));
480 else
481 deallocate<Derived2>(arena, static_cast<Derived2*>(*it));
482 }
483}
484
486{
487 struct ComplexObject
488 {
489 std::string name;
490 std::vector<int> numbers;
491 int id;
492
493 ComplexObject(std::string n, std::vector<int> nums, int i)
494 : name(std::move(n)), numbers(std::move(nums)), id(i)
495 {}
496 };
497
498 AhArenaAllocator arena(100000);
499
500 std::vector<ComplexObject*> objects;
501
502 for (int i = 0; i < 100; ++i)
503 {
504 std::string name = "Object_" + std::to_string(i);
505 std::vector<int> nums(i % 10 + 1, i);
506
507 ComplexObject* obj = allocate<ComplexObject>(arena, name, nums, i);
508 ASSERT_NE(obj, nullptr);
509 objects.push_back(obj);
510 }
511
512 // Verify
513 for (size_t i = 0; i < objects.size(); ++i)
514 {
515 EXPECT_EQ(objects[i]->name, "Object_" + std::to_string(i));
516 EXPECT_EQ(objects[i]->id, static_cast<int>(i));
517 EXPECT_EQ(objects[i]->numbers.size(), i % 10 + 1);
518 }
519
520 // Cleanup
521 for (auto it = objects.rbegin(); it != objects.rend(); ++it)
522 deallocate(arena, *it);
523}
524
525// ============================================================================
526// Alignment Tests - Advanced
527// ============================================================================
528
530{
531 AhArenaAllocator arena(10000);
532
533 std::vector<CacheLineAligned*> objects;
534
535 for (int i = 0; i < 50; ++i)
536 {
537 CacheLineAligned* obj = allocate<CacheLineAligned>(arena, i);
538 ASSERT_NE(obj, nullptr);
539 EXPECT_EQ(reinterpret_cast<uintptr_t>(obj) % 64, 0u)
540 << "Object " << i << " not 64-byte aligned";
541 EXPECT_EQ(obj->marker, i);
542 objects.push_back(obj);
543 }
544
545 // Cleanup
546 for (auto it = objects.rbegin(); it != objects.rend(); ++it)
547 deallocate(arena, *it);
548}
549
551{
552 AhArenaAllocator arena(10000);
553
554 struct Align4 { alignas(4) char data[4]; };
555 struct Align8 { alignas(8) char data[8]; };
556 struct Align16 { alignas(16) char data[16]; };
557 struct Align32 { alignas(32) char data[32]; };
558
559 // Allocate in mixed order
560 auto a4 = allocate<Align4>(arena);
561 auto a32 = allocate<Align32>(arena);
562 auto a8 = allocate<Align8>(arena);
563 auto a16 = allocate<Align16>(arena);
564
565 ASSERT_NE(a4, nullptr);
566 ASSERT_NE(a8, nullptr);
567 ASSERT_NE(a16, nullptr);
568 ASSERT_NE(a32, nullptr);
569
570 EXPECT_EQ(reinterpret_cast<uintptr_t>(a4) % 4, 0u);
571 EXPECT_EQ(reinterpret_cast<uintptr_t>(a8) % 8, 0u);
572 EXPECT_EQ(reinterpret_cast<uintptr_t>(a16) % 16, 0u);
573 EXPECT_EQ(reinterpret_cast<uintptr_t>(a32) % 32, 0u);
574}
575
577{
578 AhArenaAllocator arena(100000);
579
580 // 256-byte alignment
581 void* ptr = arena.alloc_aligned(100, 256);
582 ASSERT_NE(ptr, nullptr);
583 EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % 256, 0u);
584
585 // 512-byte alignment
586 ptr = arena.alloc_aligned(100, 512);
587 ASSERT_NE(ptr, nullptr);
588 EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % 512, 0u);
589
590 // 1024-byte alignment
591 ptr = arena.alloc_aligned(100, 1024);
592 ASSERT_NE(ptr, nullptr);
593 EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % 1024, 0u);
594}
595
596// ============================================================================
597// Boundary Conditions
598// ============================================================================
599
601{
602 const size_t SIZE = 1000;
603 AhArenaAllocator arena(SIZE);
604
605 // Fill exactly to capacity with multiple allocations
606 void* p1 = arena.alloc(400);
607 void* p2 = arena.alloc(300);
608 void* p3 = arena.alloc(300);
609
610 ASSERT_NE(p1, nullptr);
611 ASSERT_NE(p2, nullptr);
612 ASSERT_NE(p3, nullptr);
613
614 EXPECT_TRUE(arena.full());
615 EXPECT_EQ(arena.allocated_size(), SIZE);
616 EXPECT_EQ(arena.available_size(), 0u);
617
618 // No more allocations possible
619 void* p4 = arena.alloc(1);
620 EXPECT_EQ(p4, nullptr);
621}
622
624{
625 const size_t SIZE = 100;
626 AhArenaAllocator arena(SIZE);
627
628 std::vector<void*> ptrs;
629
630 for (size_t i = 0; i < SIZE; ++i)
631 {
632 void* ptr = arena.alloc(1);
633 ASSERT_NE(ptr, nullptr) << "Byte " << i << " failed";
634 ptrs.push_back(ptr);
635 }
636
637 EXPECT_TRUE(arena.full());
638
639 // Verify all pointers are consecutive
640 for (size_t i = 1; i < ptrs.size(); ++i)
641 {
642 EXPECT_EQ(
643 static_cast<char*>(ptrs[i]),
644 static_cast<char*>(ptrs[i-1]) + 1
645 );
646 }
647}
648
650{
651 const size_t SIZE = 100;
652 AhArenaAllocator arena(SIZE);
653
654 // Allocate most of arena
655 void* p1 = arena.alloc(90);
656 ASSERT_NE(p1, nullptr);
657
658 // Try aligned allocation that requires padding
659 // This might fail due to padding requirements
660 void* p2 = arena.alloc_aligned(5, 16);
661
662 if (p2 != nullptr)
663 {
664 EXPECT_EQ(reinterpret_cast<uintptr_t>(p2) % 16, 0u);
665 }
666 else
667 {
668 // Failure is acceptable if not enough space for alignment
669 EXPECT_LT(arena.available_size(), 21u); // 5 + up to 15 padding
670 }
671}
672
673// ============================================================================
674// LIFO Pattern Tests - Complex
675// ============================================================================
676
678{
679 // This test allocates 10+20+...+1000 = 50500 bytes.
680 // Use a comfortably larger arena to avoid spurious failures.
681 AhArenaAllocator arena(60000);
682
683 struct Level
684 {
685 void* ptr;
686 size_t requested_size;
687 size_t actual_size; // includes any allocator overhead (e.g. alignment padding)
688 };
689
690 std::vector<Level> stack;
691 stack.reserve(100);
692
693 // Push many levels and record the *actual* consumed bytes so we can
694 // deallocate accurately even if future allocator versions introduce padding.
695 for (int i = 0; i < 100; ++i)
696 {
697 const size_t before = arena.allocated_size();
698 const size_t size = static_cast<size_t>(i + 1) * 10;
699 void* ptr = arena.alloc(size);
700 ASSERT_NE(ptr, nullptr);
701 const size_t after = arena.allocated_size();
702 stack.push_back({ptr, size, after - before});
703 }
704
705 const size_t max_allocated = arena.allocated_size();
706
707 // Pop in LIFO order
708 while (!stack.empty())
709 {
710 Level level = stack.back();
711 stack.pop_back();
712
713 const size_t before = arena.allocated_size();
714 arena.dealloc(level.ptr, level.actual_size);
715 const size_t after = arena.allocated_size();
716
717 EXPECT_EQ(before - after, level.actual_size);
718 }
719
720 EXPECT_TRUE(arena.empty());
721 EXPECT_GT(max_allocated, 45000u);
722}
723
725{
726 AhArenaAllocator arena(100000);
727
728 std::vector<TinyObject*> tiny_stack;
729 std::vector<MediumObject*> medium_stack;
730
731 // Interleaved pushes
732 for (int i = 0; i < 50; ++i)
733 {
734 tiny_stack.push_back(allocate<TinyObject>(arena, static_cast<char>(i)));
735 medium_stack.push_back(allocate<MediumObject>(arena, i));
736 }
737
738 // Interleaved pops (LIFO per type)
739 while (!medium_stack.empty() || !tiny_stack.empty())
740 {
741 if (!medium_stack.empty())
742 {
743 deallocate(arena, medium_stack.back());
744 medium_stack.pop_back();
745 }
746
747 if (!tiny_stack.empty())
748 {
749 deallocate(arena, tiny_stack.back());
750 tiny_stack.pop_back();
751 }
752 }
753
754 // Interleaving different-sized objects can leave the arena non-empty because
755 // deallocation is only effective for a strict LIFO pattern on the raw blocks.
756 // The supported bulk mechanism is reset().
757 arena.reset();
758 EXPECT_TRUE(arena.empty());
759}
760
761// ============================================================================
762// Performance and Scaling Tests
763// ============================================================================
764
766{
767 const size_t NUM_OBJECTS = 50000;
768 AhArenaAllocator arena(NUM_OBJECTS * sizeof(MediumObject));
769
770 auto start = std::chrono::high_resolution_clock::now();
771
772 std::vector<MediumObject*> objects;
773 objects.reserve(NUM_OBJECTS);
774
775 for (size_t i = 0; i < NUM_OBJECTS; ++i)
776 objects.push_back(allocate<MediumObject>(arena, static_cast<int>(i)));
777
778 auto end = std::chrono::high_resolution_clock::now();
779 auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
780
781 // Should be fast (bump allocation is O(1))
782 EXPECT_LT(duration.count(), 1000) << "Allocation too slow: " << duration.count() << "ms";
783
784 // Verify all allocated
785 for (size_t i = 0; i < objects.size(); ++i)
786 {
787 ASSERT_NE(objects[i], nullptr);
788 EXPECT_EQ(objects[i]->id, static_cast<int>(i));
789 }
790
791 std::cout << " [Allocated " << NUM_OBJECTS << " objects in "
792 << duration.count() << "ms]" << std::endl;
793}
794
796{
797 const size_t NUM_ALLOCS = 10000;
798 const size_t ALLOC_SIZE = 64;
799
800 // Malloc timing
801 auto start = std::chrono::high_resolution_clock::now();
802
803 std::vector<void*> malloc_ptrs;
804 malloc_ptrs.reserve(NUM_ALLOCS);
805 for (size_t i = 0; i < NUM_ALLOCS; ++i)
806 malloc_ptrs.push_back(std::malloc(ALLOC_SIZE));
807
808 for (void* ptr : malloc_ptrs)
809 std::free(ptr);
810
811 auto end = std::chrono::high_resolution_clock::now();
812 auto malloc_duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
813
814 // Arena timing
815 start = std::chrono::high_resolution_clock::now();
816
818 std::vector<void*> arena_ptrs;
819 arena_ptrs.reserve(NUM_ALLOCS);
820 for (size_t i = 0; i < NUM_ALLOCS; ++i)
821 arena_ptrs.push_back(arena.alloc(ALLOC_SIZE));
822
823 end = std::chrono::high_resolution_clock::now();
824 auto arena_duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
825
826 std::cout << " [malloc: " << malloc_duration.count() << "μs, "
827 << "arena: " << arena_duration.count() << "μs, "
828 << "speedup: " << (double)malloc_duration.count() / arena_duration.count() << "x]"
829 << std::endl;
830
831 // Arena should be faster (typically 10-100x)
832 EXPECT_LT(arena_duration.count(), malloc_duration.count());
833}
834
835// ============================================================================
836// Statistical Validation
837// ============================================================================
838
840{
841 const size_t NUM_TRIALS = 100;
842 const size_t ARENA_SIZE = 100000;
843
844 std::mt19937 rng(12345);
845 std::uniform_int_distribution<size_t> size_dist(1, 1000);
846 std::bernoulli_distribution dealloc_dist(0.3); // 30% chance to dealloc
847
848 size_t total_allocs = 0;
849 size_t total_deallocs = 0;
850 size_t failed_allocs = 0;
851
852 for (size_t trial = 0; trial < NUM_TRIALS; ++trial)
853 {
855 std::vector<std::pair<void*, size_t>> allocations;
856
857 for (int op = 0; op < 1000; ++op)
858 {
859 // Randomly allocate or deallocate
861 {
862 // Allocate
863 size_t size = size_dist(rng);
864 void* ptr = arena.alloc(size);
865
866 if (ptr != nullptr)
867 {
868 allocations.push_back({ptr, size});
869 ++total_allocs;
870 }
871 else
872 {
874 }
875 }
876 else
877 {
878 // Deallocate (LIFO only works)
879 auto [ptr, size] = allocations.back();
880 allocations.pop_back();
881 arena.dealloc(ptr, size);
883 }
884 }
885 }
886
887 std::cout << " [Trials: " << NUM_TRIALS
888 << ", Allocs: " << total_allocs
889 << ", Deallocs: " << total_deallocs
890 << ", Failed: " << failed_allocs << "]" << std::endl;
891
894}
895
896// ============================================================================
897// Edge Cases and Corner Cases
898// ============================================================================
899
901{
902 const size_t MAX = std::numeric_limits<size_t>::max();
903
904 AhArenaAllocator arena(1024);
905
906 // Try to allocate SIZE_MAX (should fail gracefully)
907 void* ptr = arena.alloc(MAX);
908 EXPECT_EQ(ptr, nullptr);
909
910 // Arena should still be usable
911 ptr = arena.alloc(100);
912 EXPECT_NE(ptr, nullptr);
913}
914
916{
917 AhArenaAllocator arena(1024);
918
919 // Non-power-of-2 alignments (undefined behavior in C++, but test robustness)
920 // The implementation uses bitwise operations that assume power-of-2
921 // This test verifies it doesn't crash, even if result is undefined
922
923 void* ptr1 = arena.alloc_aligned(100, 3); // Not power of 2
924 void* ptr2 = arena.alloc_aligned(100, 7); // Not power of 2
925
926 // Implementation-defined behavior, but should not crash
927 // May return nullptr or incorrectly aligned pointer
928 (void)ptr1;
929 (void)ptr2;
930
931 SUCCEED(); // Didn't crash
932}
933
935{
936 const char buffer[1024] = {}; // Const buffer
937
938 AhArenaAllocator arena(buffer, sizeof(buffer));
939
940 EXPECT_TRUE(arena.is_valid());
941 EXPECT_FALSE(arena.owns_memory());
942
943 // Should be able to allocate (const_cast used internally)
944 void* ptr = arena.alloc(100);
945 EXPECT_NE(ptr, nullptr);
946}
947
949{
950 char buffer[1]; // Minimal buffer
951 AhArenaAllocator arena(buffer, 0); // Zero capacity
952
953 EXPECT_TRUE(arena.is_valid());
954 EXPECT_EQ(arena.capacity(), 0u);
955 EXPECT_TRUE(arena.empty());
956 EXPECT_TRUE(arena.full());
957
958 void* ptr = arena.alloc(1);
959 EXPECT_EQ(ptr, nullptr);
960}
961
962// ============================================================================
963// Integration Tests
964// ============================================================================
965
967{
968 // Simulate a parser that builds an AST
969 struct ASTNode
970 {
971 std::string token;
972 std::vector<ASTNode*> children;
973 int line_number;
974
975 ASTNode(std::string t, int line)
976 : token(std::move(t)), line_number(line)
977 {}
978 };
979
980 const size_t ESTIMATED_NODES = 1000;
982
983 std::vector<ASTNode*> all_nodes;
984
985 // Build tree
986 for (int i = 0; i < 500; ++i)
987 {
988 ASTNode* node = allocate<ASTNode>(arena, "token_" + std::to_string(i), i);
989 ASSERT_NE(node, nullptr);
990 all_nodes.push_back(node);
991
992 // Some nodes have children
993 if (i > 0 && i % 10 == 0)
994 {
995 for (int j = 0; j < 3 && !all_nodes.empty(); ++j)
996 node->children.push_back(all_nodes[all_nodes.size() - j - 1]);
997 }
998 }
999
1000 // Verify tree structure
1001 int nodes_with_children = 0;
1002 for (ASTNode* node : all_nodes)
1003 {
1004 EXPECT_FALSE(node->token.empty());
1005 EXPECT_GE(node->line_number, 0);
1006 if (!node->children.empty())
1008 }
1009
1011
1012 // Cleanup (LIFO)
1013 for (auto it = all_nodes.rbegin(); it != all_nodes.rend(); ++it)
1014 deallocate(arena, *it);
1015
1016 EXPECT_TRUE(arena.empty());
1017}
1018
1019int main(int argc, char** argv)
1020{
1021 ::testing::InitGoogleTest(&argc, argv);
1022 return RUN_ALL_TESTS();
1023}
Memory arena for fast bulk allocations.
int main()
Arena allocator for fast bump-pointer allocation.
Definition ah-arena.H:122
bool is_valid() const noexcept
Returns true if the arena is valid (has memory).
Definition ah-arena.H:383
void * alloc(const size_t size) noexcept
Allocate raw memory from the arena.
Definition ah-arena.H:271
void * alloc_aligned(const size_t size, const size_t alignment) noexcept
Allocate aligned memory from the arena.
Definition ah-arena.H:294
void reset() noexcept
Reset arena, making all memory available again.
Definition ah-arena.H:256
void dealloc(const void *addr, const size_t size) noexcept
Deallocate memory (only effective for LIFO pattern).
Definition ah-arena.H:326
bool owns_memory() const noexcept
Returns true if the arena owns its memory.
Definition ah-arena.H:389
size_t capacity() const noexcept
Get total arena capacity.
Definition ah-arena.H:407
size_t allocated_size() const noexcept
Get total bytes currently allocated.
Definition ah-arena.H:395
bool full() const noexcept
Returns true if the arena is full (no space left).
Definition ah-arena.H:419
bool empty() const noexcept
Returns true if the arena is empty (no allocations).
Definition ah-arena.H:413
size_t available_size() const noexcept
Get remaining bytes available.
Definition ah-arena.H:401
void empty() noexcept
empty the list
Definition htlist.H:1689
size_t size() const noexcept
Count the number of elements of the list.
Definition htlist.H:1319
iterator end() noexcept
Return an STL-compatible end iterator.
iterator begin() noexcept
Return an STL-compatible iterator to the first element.
#define TEST(name)
static mt19937 rng
Main namespace for Aleph-w library functions.
Definition ah-arena.H:89
size_t size(Node *root) noexcept
std::decay_t< typename HeadC::Item_Type > T
Definition ah-zip.H:107
Matrix< Trow, Tcol, NumType > operator*(const NumType &scalar, const Matrix< Trow, Tcol, NumType > &m)
Scalar-matrix multiplication (scalar * matrix).
Definition al-matrix.H:995
void deallocate(AhArenaAllocator &arena, T *ptr) noexcept
Destruct and deallocate an object from an arena.
Definition ah-arena.H:494
DynList< T > maps(const C &c, Op op)
Classic map operation.