This repository has been archived by the owner on Sep 24, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 2
/
mneme.c
142 lines (114 loc) · 3.21 KB
/
mneme.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#include "mneme.h"
#include <sys/types.h>
#include <sys/mman.h>
#include <errno.h>
#include <unistd.h>
struct slab {
struct slab *next;
uint16_t remain;
uint16_t free;
uint8_t objects[];
};
/* page size */
static long int pgsize = 0;
static inline size_t align(size_t size, size_t alignment) {
return (size + alignment - (size_t) 1) & ~(alignment - (size_t) 1);
}
static inline size_t obj_size(size_t size) {
return align(size, sizeof (void *));
}
int mneme_create(struct cache *cache, uint16_t size) {
if (!pgsize) {
pgsize = sysconf(_SC_PAGESIZE);
if (pgsize < obj_size(1))
return -1;
}
if (size > (pgsize - sizeof (struct slab)) / 2)
return EINVAL;
cache->size = obj_size(size);
cache->empty = (struct slab *) 0;
cache->partial = (struct slab *) 0;
cache->full = (struct slab *) 0;
return 0;
}
/* free a linked list of slabs */
static inline int free_slabs(struct slab* slab) {
register struct slab *temp;
while (slab) {
temp = slab;
slab = slab->next;
if (munmap(temp, pgsize))
return -1;
}
}
int mneme_destroy(struct cache *cache) {
if (free_slabs(cache->empty))
return -1;
if (free_slabs(cache->partial))
return -1;
if (free_slabs(cache->full))
return -1;
return 0;
}
/* allocate object from slab */
static inline void *allocate_object(struct cache *cache, struct slab *slab) {
void *object = slab->objects + cache->size * slab->free;
slab->free = *(uint16_t *) object;
--slab->remain;
return object;
}
/* allocate slab */
static inline struct slab *allocate_slab(struct cache* cache) {
struct slab *slab = mmap((void *) 0, pgsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
if (slab == MAP_FAILED)
return (struct slab *) 0;
/* initialise free list */
slab->remain = (pgsize - sizeof (struct slab)) / cache->size;
slab->free = 0;
register uint16_t iter = 0;
while (iter <= slab->remain)
*(uint16_t *) (slab->objects + cache->size * iter) = ++iter;
return slab;
}
static inline void move_slab(struct slab **source, struct slab **destination) {
register struct slab *temp = *destination;
*destination = *source;
*source = (*source)->next;
(*destination)->next = temp;
}
void *mneme_allocate(struct cache *cache) {
void *object;
/* try to allocate from partial slabs first */
if (cache->partial) {
object = allocate_object(cache, cache->partial);
/* move slab from partial to full if necessary */
if (!cache->partial->remain)
move_slab(&cache->partial, &cache->full);
}
/* then try empty slabs */
else if (cache->empty) {
object = allocate_object(cache, cache->empty);
/* move slab from empty to partial */
move_slab(&cache->empty, &cache->partial);
}
/* allocate new slab */
else {
cache->empty = allocate_slab(cache);
if (!cache->empty)
return (void *) 0;
object = allocate_object(cache, cache->empty);
/* move slab from empty to partial */
move_slab(&cache->empty, &cache->partial);
}
return object;
}
void mneme_release(struct cache *cache, void *object) {
/*
* TODO: Implement this function ;)
* Problem: How do we determine the slab the object belongs to?
* Possible solutions:
* - Iterate over all slabs (slow)
* - Keep a per-page pointer to the slab it belongs to
* - Limit the slab size to the page size
*/
}