@@ -84,16 +84,20 @@ void Allocator::free(uint8_t* address, size_t size) {
84
84
85
85
std::expected<Allocation, Allocator::Error> Allocator::internal_allocate_near (
86
86
const std::vector<uint8_t *>& desired_addresses, size_t size, size_t max_distance) {
87
+ // Align to 2 bytes to pass MFP virtual method check
88
+ // See https://itanium-cxx-abi.github.io/cxx-abi/abi.html#member-function-pointers
89
+ size_t aligned_size = align_up (size, 2 );
90
+
87
91
// First search through our list of allocations for a free block that is large
88
92
// enough.
89
93
for (const auto & allocation : m_memory) {
90
- if (allocation->size < size ) {
94
+ if (allocation->size < aligned_size ) {
91
95
continue ;
92
96
}
93
97
94
98
for (auto node = allocation->freelist .get (); node != nullptr ; node = node->next .get ()) {
95
99
// Enough room?
96
- if (static_cast <size_t >(node->end - node->start ) < size ) {
100
+ if (static_cast <size_t >(node->end - node->start ) < aligned_size ) {
97
101
continue ;
98
102
}
99
103
@@ -104,14 +108,14 @@ std::expected<Allocation, Allocator::Error> Allocator::internal_allocate_near(
104
108
continue ;
105
109
}
106
110
107
- node->start += size ;
111
+ node->start += aligned_size ;
108
112
109
113
return Allocation{shared_from_this (), address, size};
110
114
}
111
115
}
112
116
113
117
// If we didn't find a free block, we need to allocate a new one.
114
- auto allocation_size = align_up (size , system_info ().allocation_granularity );
118
+ auto allocation_size = align_up (aligned_size , system_info ().allocation_granularity );
115
119
auto allocation_address = allocate_nearby_memory (desired_addresses, allocation_size, max_distance);
116
120
117
121
if (!allocation_address) {
@@ -123,13 +127,16 @@ std::expected<Allocation, Allocator::Error> Allocator::internal_allocate_near(
123
127
allocation->address = *allocation_address;
124
128
allocation->size = allocation_size;
125
129
allocation->freelist = std::make_unique<FreeNode>();
126
- allocation->freelist ->start = *allocation_address + size ;
130
+ allocation->freelist ->start = *allocation_address + aligned_size ;
127
131
allocation->freelist ->end = *allocation_address + allocation_size;
128
132
129
133
return Allocation{shared_from_this (), *allocation_address, size};
130
134
}
131
135
132
136
void Allocator::internal_free (uint8_t * address, size_t size) {
137
+ // See internal_allocate_near
138
+ size = align_up (size, 2 );
139
+
133
140
for (const auto & allocation : m_memory) {
134
141
if (allocation->address > address || allocation->address + allocation->size < address) {
135
142
continue ;
0 commit comments