1
0
mirror of https://github.com/godotengine/godot.git synced 2025-12-05 17:15:09 +00:00

Style: clang-format: Disable KeepEmptyLinesAtTheStartOfBlocks

This commit is contained in:
Rémi Verschelde
2021-05-04 14:41:06 +02:00
parent 64a63e0861
commit b5e1e05ef2
1439 changed files with 1 additions and 34187 deletions

View File

@@ -52,12 +52,10 @@ void PoolAllocator::mt_unlock() const {
}
bool PoolAllocator::get_free_entry(EntryArrayPos *p_pos) {
if (entry_count == entry_max)
return false;
for (int i = 0; i < entry_max; i++) {
if (entry_array[i].len == 0) {
*p_pos = i;
return true;
@@ -76,13 +74,11 @@ bool PoolAllocator::get_free_entry(EntryArrayPos *p_pos) {
* @return false if hole found, true if no hole found
*/
bool PoolAllocator::find_hole(EntryArrayPos *p_pos, int p_for_size) {
/* position where previous entry ends. Defaults to zero (begin of pool) */
int prev_entry_end_pos = 0;
for (int i = 0; i < entry_count; i++) {
Entry &entry = entry_array[entry_indices[i]];
/* determine hole size to previous entry */
@@ -110,13 +106,11 @@ bool PoolAllocator::find_hole(EntryArrayPos *p_pos, int p_for_size) {
}
void PoolAllocator::compact(int p_up_to) {
uint32_t prev_entry_end_pos = 0;
if (p_up_to < 0)
p_up_to = entry_count;
for (int i = 0; i < p_up_to; i++) {
Entry &entry = entry_array[entry_indices[i]];
/* determine hole size to previous entry */
@@ -125,7 +119,6 @@ void PoolAllocator::compact(int p_up_to) {
/* if we can compact, do it */
if (hole_size > 0 && !entry.lock) {
COMPACT_CHUNK(entry, prev_entry_end_pos);
}
@@ -135,11 +128,9 @@ void PoolAllocator::compact(int p_up_to) {
}
void PoolAllocator::compact_up(int p_from) {
uint32_t next_entry_end_pos = pool_size; // - static_area_size;
for (int i = entry_count - 1; i >= p_from; i--) {
Entry &entry = entry_array[entry_indices[i]];
/* determine hole size to nextious entry */
@@ -148,7 +139,6 @@ void PoolAllocator::compact_up(int p_from) {
/* if we can compact, do it */
if (hole_size > 0 && !entry.lock) {
COMPACT_CHUNK(entry, (next_entry_end_pos - aligned(entry.len)));
}
@@ -158,13 +148,10 @@ void PoolAllocator::compact_up(int p_from) {
}
bool PoolAllocator::find_entry_index(EntryIndicesPos *p_map_pos, Entry *p_entry) {
EntryArrayPos entry_pos = entry_max;
for (int i = 0; i < entry_count; i++) {
if (&entry_array[entry_indices[i]] == p_entry) {
entry_pos = i;
break;
}
@@ -178,7 +165,6 @@ bool PoolAllocator::find_entry_index(EntryIndicesPos *p_map_pos, Entry *p_entry)
}
PoolAllocator::ID PoolAllocator::alloc(int p_size) {
ERR_FAIL_COND_V(p_size < 1, POOL_ALLOCATOR_INVALID_ID);
#ifdef DEBUG_ENABLED
if (p_size > free_mem)
@@ -220,7 +206,6 @@ PoolAllocator::ID PoolAllocator::alloc(int p_size) {
/* move all entry indices up, make room for this one */
for (int i = entry_count; i > new_entry_indices_pos; i--) {
entry_indices[i] = entry_indices[i - 1];
}
@@ -247,7 +232,6 @@ PoolAllocator::ID PoolAllocator::alloc(int p_size) {
}
PoolAllocator::Entry *PoolAllocator::get_entry(ID p_mem) {
unsigned int check = p_mem & CHECK_MASK;
int entry = p_mem >> CHECK_BITS;
ERR_FAIL_INDEX_V(entry, entry_max, NULL);
@@ -258,7 +242,6 @@ PoolAllocator::Entry *PoolAllocator::get_entry(ID p_mem) {
}
const PoolAllocator::Entry *PoolAllocator::get_entry(ID p_mem) const {
unsigned int check = p_mem & CHECK_MASK;
int entry = p_mem >> CHECK_BITS;
ERR_FAIL_INDEX_V(entry, entry_max, NULL);
@@ -269,7 +252,6 @@ const PoolAllocator::Entry *PoolAllocator::get_entry(ID p_mem) const {
}
void PoolAllocator::free(ID p_mem) {
mt_lock();
Entry *e = get_entry(p_mem);
if (!e) {
@@ -287,13 +269,11 @@ void PoolAllocator::free(ID p_mem) {
bool index_found = find_entry_index(&entry_indices_pos, e);
if (!index_found) {
mt_unlock();
ERR_FAIL_COND(!index_found);
}
for (int i = entry_indices_pos; i < (entry_count - 1); i++) {
entry_indices[i] = entry_indices[i + 1];
}
@@ -304,13 +284,11 @@ void PoolAllocator::free(ID p_mem) {
}
int PoolAllocator::get_size(ID p_mem) const {
int size;
mt_lock();
const Entry *e = get_entry(p_mem);
if (!e) {
mt_unlock();
ERR_PRINT("!e");
return 0;
@@ -324,7 +302,6 @@ int PoolAllocator::get_size(ID p_mem) const {
}
Error PoolAllocator::resize(ID p_mem, int p_new_size) {
mt_lock();
Entry *e = get_entry(p_mem);
@@ -341,12 +318,10 @@ Error PoolAllocator::resize(ID p_mem, int p_new_size) {
uint32_t alloc_size = aligned(p_new_size);
if ((uint32_t)aligned(e->len) == alloc_size) {
e->len = p_new_size;
mt_unlock();
return OK;
} else if (e->len > (uint32_t)p_new_size) {
free_mem += aligned(e->len);
free_mem -= alloc_size;
e->len = p_new_size;
@@ -367,7 +342,6 @@ Error PoolAllocator::resize(ID p_mem, int p_new_size) {
bool index_found = find_entry_index(&entry_indices_pos, e);
if (!index_found) {
mt_unlock();
ERR_FAIL_COND_V(!index_found, ERR_BUG);
}
@@ -422,13 +396,11 @@ Error PoolAllocator::resize(ID p_mem, int p_new_size) {
}
Error PoolAllocator::lock(ID p_mem) {
if (!needs_locking)
return OK;
mt_lock();
Entry *e = get_entry(p_mem);
if (!e) {
mt_unlock();
ERR_PRINT("!e");
return ERR_INVALID_PARAMETER;
@@ -439,14 +411,12 @@ Error PoolAllocator::lock(ID p_mem) {
}
bool PoolAllocator::is_locked(ID p_mem) const {
if (!needs_locking)
return false;
mt_lock();
const Entry *e = ((PoolAllocator *)(this))->get_entry(p_mem);
if (!e) {
mt_unlock();
ERR_PRINT("!e");
return false;
@@ -457,9 +427,7 @@ bool PoolAllocator::is_locked(ID p_mem) const {
}
const void *PoolAllocator::get(ID p_mem) const {
if (!needs_locking) {
const Entry *e = get_entry(p_mem);
ERR_FAIL_COND_V(!e, NULL);
return &pool[e->pos];
@@ -469,19 +437,16 @@ const void *PoolAllocator::get(ID p_mem) const {
const Entry *e = get_entry(p_mem);
if (!e) {
mt_unlock();
ERR_FAIL_COND_V(!e, NULL);
}
if (e->lock == 0) {
mt_unlock();
ERR_PRINT("e->lock == 0");
return NULL;
}
if ((int)e->pos >= pool_size) {
mt_unlock();
ERR_PRINT("e->pos<0 || e->pos>=pool_size");
return NULL;
@@ -494,9 +459,7 @@ const void *PoolAllocator::get(ID p_mem) const {
}
void *PoolAllocator::get(ID p_mem) {
if (!needs_locking) {
Entry *e = get_entry(p_mem);
ERR_FAIL_COND_V(!e, NULL);
return &pool[e->pos];
@@ -506,12 +469,10 @@ void *PoolAllocator::get(ID p_mem) {
Entry *e = get_entry(p_mem);
if (!e) {
mt_unlock();
ERR_FAIL_COND_V(!e, NULL);
}
if (e->lock == 0) {
//assert(0);
mt_unlock();
ERR_PRINT("e->lock == 0");
@@ -519,7 +480,6 @@ void *PoolAllocator::get(ID p_mem) {
}
if ((int)e->pos >= pool_size) {
mt_unlock();
ERR_PRINT("e->pos<0 || e->pos>=pool_size");
return NULL;
@@ -531,7 +491,6 @@ void *PoolAllocator::get(ID p_mem) {
return ptr;
}
void PoolAllocator::unlock(ID p_mem) {
if (!needs_locking)
return;
mt_lock();
@@ -550,22 +509,18 @@ void PoolAllocator::unlock(ID p_mem) {
}
int PoolAllocator::get_used_mem() const {
return pool_size - free_mem;
}
int PoolAllocator::get_free_peak() {
return free_mem_peak;
}
int PoolAllocator::get_free_mem() {
return free_mem;
}
void PoolAllocator::create_pool(void *p_mem, int p_size, int p_max_entries) {
pool = (uint8_t *)p_mem;
pool_size = p_size;
@@ -581,7 +536,6 @@ void PoolAllocator::create_pool(void *p_mem, int p_size, int p_max_entries) {
}
PoolAllocator::PoolAllocator(int p_size, bool p_needs_locking, int p_max_entries) {
mem_ptr = memalloc(p_size);
ERR_FAIL_COND(!mem_ptr);
align = 1;
@@ -590,9 +544,7 @@ PoolAllocator::PoolAllocator(int p_size, bool p_needs_locking, int p_max_entries
}
PoolAllocator::PoolAllocator(void *p_mem, int p_size, int p_align, bool p_needs_locking, int p_max_entries) {
if (p_align > 1) {
uint8_t *mem8 = (uint8_t *)p_mem;
uint64_t ofs = (uint64_t)mem8;
if (ofs % p_align) {
@@ -610,7 +562,6 @@ PoolAllocator::PoolAllocator(void *p_mem, int p_size, int p_align, bool p_needs_
}
PoolAllocator::PoolAllocator(int p_align, int p_size, bool p_needs_locking, int p_max_entries) {
ERR_FAIL_COND(p_align < 1);
mem_ptr = Memory::alloc_static(p_size + p_align, true);
uint8_t *mem8 = (uint8_t *)mem_ptr;
@@ -623,7 +574,6 @@ PoolAllocator::PoolAllocator(int p_align, int p_size, bool p_needs_locking, int
}
PoolAllocator::~PoolAllocator() {
if (mem_ptr)
memfree(mem_ptr);