124
124
*
125
125
* Default size is size of meta block
126
126
*/
127
- #define LWMEM_BLOCK_MIN_SIZE (LWMEM_BLOCK_META_SIZE)
127
+ #define LWMEM_BLOCK_MIN_SIZE (LWMEM_BLOCK_META_SIZE)
128
128
129
129
/**
130
130
* \brief Get LwMEM instance based on user input
131
131
* \param[in] in_lwobj: LwMEM instance. Set to `NULL` for default instance
132
132
*/
133
- #define LWMEM_GET_LWOBJ (in_lwobj ) ((in_lwobj) != NULL ? (in_lwobj) : (&lwmem_default))
133
+ #define LWMEM_GET_LWOBJ (in_lwobj ) ((in_lwobj) != NULL ? (in_lwobj) : (&lwmem_default))
134
134
135
135
/**
136
136
* \brief Gets block before input block (marked as prev) and its previous free block
139
139
* \param[in] in_pp: Previous previous of input block
140
140
* \param[in] in_p: Previous of input block
141
141
*/
142
- #define LWMEM_GET_PREV_CURR_OF_BLOCK (in_lwobj , in_b , in_pp , in_p ) \
142
+ #define LWMEM_GET_PREV_CURR_OF_BLOCK (in_lwobj , in_b , in_pp , in_p ) \
143
143
do { \
144
- for ((in_pp) = NULL, (in_p) = &((in_lwobj)->start_block); (in_p) != NULL && (in_p)->next < (in_b); \
144
+ for ((in_pp) = NULL, (in_p) = &((in_lwobj)->start_block); (in_p) != NULL && (in_p)->next < (in_b); \
145
145
(in_pp) = (in_p), (in_p) = (in_p)->next) {} \
146
146
} while (0)
147
147
156
156
/* Statistics part */
157
157
#if LWMEM_CFG_ENABLE_STATS
158
158
#define LWMEM_INC_STATS (field ) (++(field))
159
- #define LWMEM_UPDATE_MIN_FREE (lwobj ) \
159
+ #define LWMEM_UPDATE_MIN_FREE (lwobj ) \
160
160
do { \
161
- if ((lwobj)->mem_available_bytes < (lwobj)->stats.minimum_ever_mem_available_bytes) { \
162
- (lwobj)->stats.minimum_ever_mem_available_bytes = (lwobj)->mem_available_bytes; \
161
+ if ((lwobj)->mem_available_bytes < (lwobj)->stats.minimum_ever_mem_available_bytes) { \
162
+ (lwobj)->stats.minimum_ever_mem_available_bytes = (lwobj)->mem_available_bytes; \
163
163
} \
164
164
} while (0)
165
165
#else
@@ -369,7 +369,7 @@ prv_alloc(lwmem_t* const lwobj, const lwmem_region_t* region, const size_t size)
369
369
370
370
/* Set default values */
371
371
prev = & (lwobj -> start_block ); /* Use pointer from custom lwmem block */
372
- curr = prev -> next ; /* Curr represents first actual free block */
372
+ curr = prev -> next ; /* Curr represents first actual free block */
373
373
374
374
/*
375
375
* If region is not set to NULL,
@@ -431,7 +431,7 @@ prv_alloc(lwmem_t* const lwobj, const lwmem_region_t* region, const size_t size)
431
431
432
432
lwobj -> mem_available_bytes -= curr -> size ; /* Decrease available bytes by allocated block size */
433
433
prv_split_too_big_block (lwobj , curr , final_size ); /* Split block if it is too big */
434
- LWMEM_BLOCK_SET_ALLOC (curr ); /* Set block as allocated */
434
+ LWMEM_BLOCK_SET_ALLOC (curr ); /* Set block as allocated */
435
435
436
436
LWMEM_UPDATE_MIN_FREE (lwobj );
437
437
LWMEM_INC_STATS (lwobj -> stats .nr_alloc );
@@ -592,8 +592,8 @@ prv_realloc(lwmem_t* const lwobj, const lwmem_region_t* region, void* const ptr,
592
592
prev -> next -> next ; /* Set next to next's next, effectively remove expanded block from free list */
593
593
594
594
prv_split_too_big_block (lwobj , block , final_size ); /* Split block if it is too big */
595
- LWMEM_BLOCK_SET_ALLOC (block ); /* Set block as allocated */
596
- return ptr ; /* Return existing pointer */
595
+ LWMEM_BLOCK_SET_ALLOC (block ); /* Set block as allocated */
596
+ return ptr ; /* Return existing pointer */
597
597
}
598
598
599
599
/*
@@ -625,8 +625,8 @@ prv_realloc(lwmem_t* const lwobj, const lwmem_region_t* region, void* const ptr,
625
625
block = prev ; /* Move block pointer to previous one */
626
626
627
627
prv_split_too_big_block (lwobj , block , final_size ); /* Split block if it is too big */
628
- LWMEM_BLOCK_SET_ALLOC (block ); /* Set block as allocated */
629
- return new_data_ptr ; /* Return new data ptr */
628
+ LWMEM_BLOCK_SET_ALLOC (block ); /* Set block as allocated */
629
+ return new_data_ptr ; /* Return new data ptr */
630
630
}
631
631
632
632
/*
@@ -669,8 +669,8 @@ prv_realloc(lwmem_t* const lwobj, const lwmem_region_t* region, void* const ptr,
669
669
block = prev ; /* Previous block is now current */
670
670
671
671
prv_split_too_big_block (lwobj , block , final_size ); /* Split block if it is too big */
672
- LWMEM_BLOCK_SET_ALLOC (block ); /* Set block as allocated */
673
- return new_data_ptr ; /* Return new data ptr */
672
+ LWMEM_BLOCK_SET_ALLOC (block ); /* Set block as allocated */
673
+ return new_data_ptr ; /* Return new data ptr */
674
674
}
675
675
} else {
676
676
/* Hard error. Input pointer is not NULL and block is not considered allocated */
@@ -691,7 +691,7 @@ prv_realloc(lwmem_t* const lwobj, const lwmem_region_t* region, void* const ptr,
691
691
block_size =
692
692
(block -> size & ~LWMEM_ALLOC_BIT ) - LWMEM_BLOCK_META_SIZE ; /* Get application size from input pointer */
693
693
LWMEM_MEMCPY (retval , ptr , size > block_size ? block_size : size ); /* Copy content to new allocated block */
694
- prv_free (lwobj , ptr ); /* Free input pointer */
694
+ prv_free (lwobj , ptr ); /* Free input pointer */
695
695
}
696
696
return retval ;
697
697
}
@@ -760,7 +760,7 @@ lwmem_assignmem_ex(lwmem_t* lwobj, const lwmem_region_t* regions) {
760
760
#if LWMEM_CFG_OS
761
761
|| lwmem_sys_mutex_isvalid (& (lwobj -> mutex )) /* Check if mutex valid already = must not be */
762
762
|| !lwmem_sys_mutex_create (& (lwobj -> mutex )) /* Final step = try to create mutex for new instance */
763
- #endif /* LWMEM_CFG_OS */
763
+ #endif /* LWMEM_CFG_OS */
764
764
) {
765
765
return 0 ;
766
766
}
0 commit comments