// Foo class
class Foo {
public:
Foo() { vec = my_value; }
float32x4_t vec;
};
// Bar class
class Bar {
public:
Bar() { vec = my_value; }
~Bar() { } // this destructor is important
float32x4_t vec;
};
// 16-byte alignment allocation
// this approach will work on all platforms!
void *operator new(size_t size) {
return memalign(16,size);
}
void *operator new[](size_t size) {
return memalign(16,size);
}
// everything is OK...
Foo *f0 = new Foo();
Bar *b0 = new Bar();
Foo *f1 = new Foo[1];
// Hello SIGBUS! where are you from?
Bar *b1 = new Bar[1];
// I'm from 8-byte cookie.
|
The cookie size on non-__ARM_EABI__ platforms is max(sizeof(size_t) * 2,sizeof(Foo)).
The cookie size of __ARM_EABI__ platform is sizeof(size_t) * 2 and we can't change it.
We you should do with all your code which is using this approach on __ARB_EABI__??
The stupid straightforward solution is to return the wrong 8-byte alignment instead of 16-byte one for all Bar classes:
void *operator new[](size_t size) {
#if defined(_LINUX) && defined(__ARM_EABI__)
if(sizeof(size_t) == 4 && !IS_ALIGNED16(size)) {
size += sizeof(size_t) * 2;
return static_cast<size_t*>(memalign(16,size)) + 2;
}
#endif
return memalign(16,size);
}
void operator delete(void *ptr) {
#if defined(_LINUX) && defined(__ARM_EABI__)
if(sizeof(size_t) == 4 && !IS_ALIGNED16(ptr)) {
return free(static_cast<size_t*>(ptr) - 2);
}
#endif
free(ptr);
}
|