-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcachepool.go
585 lines (517 loc) · 13.1 KB
/
cachepool.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
package cachePool
/*
#### 工作中经常需要一个 key value 的 缓存,并且需要经常修改value的值,所以value必须是一个指针,如果按正常的做法
就是map[key]*struct{xxx},每次make一个value对象,如果这个map比较大,
1. 造成内存碎片化
2. 造成gc扫描的时间过长,
3. map 读写过多,锁竞争过大,效率低。
#### 为了避免以上这几个问题,要做到以下:
1. map 的key 和 value都不包含指针,避免gc 扫描, bigcache 就是map[int]int方式避免gc扫描。
验证[slice 和 map 数据类型不同,gc 扫描时间也不同](https://github.com/jursonmo/articles/blob/master/record/go/performent/slice_map_gc.md)
2. map的操作需要读写锁的保护,但是频繁读写map,竞争过大,效率过低,可以使用shardMap 方式减小锁的竞争
3. 预先分配一个大的对象池,每次分配对象时,不用make,直接从对象池里去,用完再放回去。
关键是要实现一个效率高的、可伸缩的对象池;
3.1 slots 快速找到一个可用的对象
3.2 spinLock 自旋锁(需要时间的验证和考验,并且小心使用)
3.3 或者用 环形缓存区ring 来记录对象位置。
all: no pointer in key or value, or value's pointer will not be gc when cachePool is working
1. slots or ringslots record the free buffer position
2. map[key]positionID ,positionID indicate the buffer position, so can get the value
*/
import (
"flag"
"fmt"
"sync"
"sync/atomic"
"unsafe"
)
const (
MaxPoolSize = 1<<31 - 1
IdMask = 1<<31 - 1
HigestBit = 1 << 31 //entry used flag bit, idleSlot init value is HigestBit, int is
UsedFlag = HigestBit
Invalid = HigestBit
)
var InvalidEntryHeader EntryHeader
var useSlots bool
func init() {
InvalidEntryHeader = EntryHeader{entryPosition: entryPosition{entryId: Invalid}}
flag.BoolVar(&useSlots, "useSlots", true, "use slots or use ring for position")
}
type entryPosition struct {
poolId uint32 //uint8,后面才想到需要这个poolid, 其实可以把using flag 放到这里
entryId uint32 //pool buffer index
}
type EntryHeader struct {
//p *Pool
entryPosition
/*
1. nextFree's hihgest bit means using flag
2. slotsPosition: buffer's EntryHeader's nextFree correspond slot index,
but in slots[]EntryHeader,EntryHeader's nextFree means next free slot'index
3. ringPosition: buffer's EntryHeader's nextFree only use hihgest bit ( means entry is used)
, ring EntryHeader's nextFree is for checking if available
*/
nextFree uint32 //golang have no union feature
}
//like list_head, put the list_head on the first position of entry node
type Entry struct {
EntryHeader //first member
//user data Value
Value
}
type Value struct {
A, B, C int //如果包含指针,用uintptr 类型替换, 并且保证指针指向的对象不会被gc 回收
}
//Key must implement Hash() for shardMap
type Key struct {
A, B, C int //尽量不要有指针,避免扫描
}
func (k *Key) Hash() int {
return k.A
}
type CachePoolConf struct {
poolNum int
poolCap int
autoExtend bool
maxPool int
shardSize int
}
type cachePool struct {
pools []*Pool
sm *poolShardMap
sync.Mutex
CachePoolConf
}
//EntryPositioner store the all available entry's position
type EntryPositioner interface {
String() string
InitPosition(buffer []byte, poolIndex, cap, entrySize int) error
PutEntryHeader(*EntryHeader) bool //put entry position to ring buffer or slot
GetEntryHeader() EntryHeader //get available entry's position from ring buffer or slot
}
type Pool struct {
//sync.RWMutex
index int
size uint32 //entry num
buffer []byte
useSlots bool
positioner EntryPositioner
//use slots for pool
//slotsPosition
//use ring for pool
//ringEntryPosition
}
type poolShardMap struct {
sync.RWMutex
origSize int
shardSize int
shardMask int
maps []map[Key]uint64
}
var offset uintptr
var entrySize int
func init() {
e := Entry{}
entrySize = int(unsafe.Sizeof(e))
offset = unsafe.Offsetof(e.Value)
}
//for buffer' EntryHeader
func (e *Entry) String() string {
return fmt.Sprintf("entry:pid=%d, entryId=%d, entry th=%d, used=%v", e.poolId, e.entryId, e.nextFree&IdMask, e.isUsed())
}
func (e *EntryHeader) isUsed() bool {
return e.nextFree&UsedFlag != 0
}
func (e *EntryHeader) String() string {
return fmt.Sprintf("entryheader:pid=%d, entryId=%d, nexfree slot=%d, valid=%v", e.poolId, e.entryId, e.nextFree&IdMask, !e.invalid())
}
func (e *EntryHeader) invalid() bool {
return e.nextFree&Invalid != 0
}
type Option func(*CachePoolConf)
func OptionWithAutoExtend(b bool) Option {
return func(c *CachePoolConf) {
c.autoExtend = b
}
}
func OptionWithMaxPool(n int) Option {
return func(c *CachePoolConf) {
c.maxPool = n
}
}
func OptionWithShardSize(n int) Option {
return func(c *CachePoolConf) {
c.shardSize = n
}
}
func (c *CachePoolConf) Check() error {
if c.poolCap == 0 || c.shardSize == 0 {
return fmt.Errorf("poolCap or shardSize eq 0")
}
return nil
}
func NewCachePool(poolNum, poolCap int, opts ...Option) (cp *cachePool, err error) {
cp = new(cachePool)
cp.poolCap = poolCap
cp.autoExtend = true //default
if cp.shardSize == 0 {
cp.shardSize = poolNum //default shardSize is eq poolNumInit
}
if cp.shardSize > 16 {
cp.shardSize = 16
}
for _, opt := range opts {
opt(&cp.CachePoolConf)
}
err = cp.Check()
if err != nil {
return
}
cp.pools = make([]*Pool, poolNum)
for i := 0; i < len(cp.pools); i++ {
_, err = cp.NewPool()
if err != nil {
return
}
}
cp.sm, err = NewShardMap(cp.shardSize)
return
}
func (cp *cachePool) NewPool() (p *Pool, err error) {
//chose a available slot of cachePool to store the new Pool
for i := 0; i < len(cp.pools); i++ {
if cp.pools[i] == nil {
p, err = NewPool(i, cp.poolCap)
if err != nil {
return
}
cp.pools[i] = p
cp.poolNum++
return
}
}
//there is no chose available slot, so newPool and append to cachePool
p, err = NewPool(len(cp.pools), cp.poolCap)
if err != nil {
return
}
cp.pools = append(cp.pools, p)
cp.poolNum++
return
}
func (cp *cachePool) String() string {
return fmt.Sprintf("poolNum:%d, poolcap:%d, shardMap size:%d", cp.GetPoolNum(), cp.poolCap, cp.sm.shardSize)
}
func (cp *cachePool) GetPoolNum() int {
return cp.poolNum
}
func (cp *cachePool) Capacity() int {
capSum := 0
for _, pool := range cp.pools {
if pool != nil {
capSum += int(pool.Cap())
}
}
return capSum
}
func (cp *cachePool) GetPoolPositioner(i int) EntryPositioner {
return cp.pools[i].positioner
}
func NewPool(index, cap int) (*Pool, error) {
var err error
if index < 0 || cap < 0 {
return nil, fmt.Errorf("pool index or cap invalid")
}
if cap > MaxPoolSize {
return nil, fmt.Errorf("MaxPoolSize is %d", MaxPoolSize)
}
p := &Pool{}
p.index = index
p.size = uint32(cap)
p.buffer = make([]byte, cap*entrySize)
p.useSlots = useSlots
if p.useSlots {
//p.positioner = &p.slotsPosition
p.positioner = new(slotsPosition)
} else {
//p.positioner = &p.ringEntryPosition
p.positioner = new(ringEntryPosition)
}
err = p.positioner.InitPosition(p.buffer, p.index, cap, entrySize)
fmt.Println(p)
p.showEntrys()
return p, err
}
func (p *Pool) Cap() uint32 {
return p.size
}
func (p *Pool) invalid(id uint32) bool {
if uint32(len(p.buffer)) > id {
return false
}
return true
}
func (p *Pool) String() string {
return fmt.Sprintf("pool:index=%d, size=%d, entrySize=%d, bufferSize=%d, useSlots=%v", p.index, p.size, entrySize, len(p.buffer), p.useSlots)
}
func (p *Pool) showEntrys() {
for i := 0; i < int(p.size); i++ {
e := (*EntryHeader)(unsafe.Pointer(&p.buffer[i*entrySize]))
fmt.Printf("i:=%d, %s\n", i, e)
}
}
func GetEntryFromElem(v *Value) *Entry {
return (*Entry)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) - offset))
}
func GetElemID(v *Value) uint64 {
e := GetEntryFromElem(v)
return *(*uint64)(unsafe.Pointer(&e.EntryHeader))
}
func (cp *cachePool) PutValue(v *Value) {
if v == nil {
return
}
e := GetEntryFromElem(v)
cp.PutEntry(e)
}
//here Entry is pool buffer'Entry
func (cp *cachePool) PutEntry(e *Entry) bool {
index := int(e.poolId)
if index >= len(cp.pools) {
return false
}
//todo: 如果使用率过少,可以不用put 回去,当这个pool 使用率为0时,可以清除pool,让gc 回收
//clean UsedFlag even if put fail
//e.nextFree &= (UsedFlag - 1)
//check if this entry have been put back, avoid doing PutEntry twice
//do it lockless
for {
flag := atomic.LoadUint32(&e.nextFree)
if flag&UsedFlag == 0 { //have clean UsedFlag, means it has been put back to pool
break
}
//clean UsedFlag
newflag := flag & (UsedFlag - 1)
if atomic.CompareAndSwapUint32(&e.nextFree, flag, newflag) {
break
}
}
return cp.pools[index].PutEntry(e)
}
//Delete Value --> buffer entry --> putEntry()
func (p *Pool) PutEntry(e *Entry) bool {
return p.positioner.PutEntryHeader(&e.EntryHeader)
}
func (cp *cachePool) GetValue() *Value {
var p *Pool
poolNum := 0
start := getPid()
for {
pools := cp.pools
max := len(pools)
poolNum = cp.GetPoolNum()
for n := 0; n < max; n++ {
start++
p = pools[start%max]
if p == nil {
continue
}
entry := p.GetEntry()
fmt.Printf("process id:%d\n", start)
if entry != nil {
//return entry
return &entry.Value
}
}
if !cp.autoExtend {
return nil
}
var err error
cp.Lock()
if poolNum < cp.GetPoolNum() { //have apppend new pool
//start = len(cp.pools) - 1 //so start from last pool
cp.Unlock()
continue
}
if cp.maxPool != 0 && cp.GetPoolNum() >= cp.maxPool {
//log
fmt.Printf("have touch top, cp.maxPool:%d", cp.maxPool)
cp.Unlock()
return nil
}
p, err = cp.NewPool()
if err != nil {
cp.Unlock()
return nil
}
entry := p.GetEntry()
cp.Unlock()
//log
fmt.Printf("add new pool,now cp:%s\n", cp)
if entry == nil {
panic("new pool, get entry must be successfull")
}
return &entry.Value
}
return nil
}
func (p *Pool) GetEntry() *Entry {
eh := p.positioner.GetEntryHeader()
if p.invalid(eh.entryId) {
//log
return nil
}
entry := (*Entry)(unsafe.Pointer(&p.buffer[int(eh.entryId)]))
fmt.Println("GetValue:", entry)
if entry.isUsed() {
panic("GetEntry: entry have been used?")
}
entry.nextFree |= UsedFlag //means this entry of buffer has been used
return entry
}
func NewShardMap(n int) (*poolShardMap, error) {
if n == 0 {
return nil, fmt.Errorf("Shards number must be > 0 ")
}
// if !IsPowerOfTwo(n) {
// return nil, fmt.Errorf("Shards number must be power of two")
// }
sm := &poolShardMap{}
sm.origSize = n
sm.shardSize = CeilToPowerOfTwo(n)
sm.shardMask = sm.shardSize - 1
sm.maps = make([]map[Key]uint64, sm.shardSize)
for i, _ := range sm.maps {
sm.maps[i] = make(map[Key]uint64)
}
return sm, nil
}
func (cp *cachePool) Store(key Key, v *Value) {
hash := key.Hash()
m := cp.sm.maps[hash&cp.sm.shardMask]
elemID := GetElemID(v)
cp.sm.Lock()
m[key] = elemID
cp.sm.Unlock()
}
func (cp *cachePool) Load(key Key) *Value {
hash := key.Hash()
m := cp.sm.maps[hash&cp.sm.shardMask]
cp.sm.RLock()
elemID, ok := m[key]
cp.sm.RUnlock()
if !ok {
return nil
}
return cp.getValueFromElemID(elemID)
}
func (cp *cachePool) Delete(key Key) {
hash := key.Hash()
m := cp.sm.maps[hash&cp.sm.shardMask]
cp.sm.Lock()
delete(m, key)
cp.sm.Unlock()
return
}
func (cp *cachePool) DeleteAndFreeValue(key Key) bool {
hash := key.Hash()
m := cp.sm.maps[hash&cp.sm.shardMask]
cp.sm.Lock()
elemID, ok := m[key]
if ok {
delete(m, key)
}
cp.sm.Unlock()
if !ok {
return false
}
e := cp.getEntryFromElemID(elemID)
if e == nil {
return false
}
return cp.PutEntry(e)
}
func (cp *cachePool) getValueFromElemID(elemID uint64) *Value {
e := cp.getEntryFromElemID(elemID)
if e == nil {
return nil
}
return &e.Value
}
func (cp *cachePool) getEntryFromElemID(elemID uint64) *Entry {
entryh := (*EntryHeader)(unsafe.Pointer(&elemID))
if int(entryh.poolId) >= len(cp.pools) {
// log
return nil
}
pbuf := cp.pools[entryh.poolId].buffer
if int(entryh.entryId) >= len(pbuf) {
// log
return nil
}
return (*Entry)(unsafe.Pointer(&pbuf[entryh.entryId]))
}
/*
func main() {
flag.Parse()
cp, err := NewCachePool(2, 4)
if err != nil {
return
}
key := Key{1, 2, 3}
v := cp.GetValue()
if v == nil {
panic("v is nil")
}
e := GetEntryFromElem(v)
fmt.Println(e)
//dosomthing with v
v.A = 3
v.B = 2
v.C = 1
cp.Store(key, v)
newv := cp.Load(key)
if v != newv {
fmt.Println(v, newv)
panic("v != newv ")
}
fmt.Println("=========showing================")
fmt.Println(cp.pools[0].positioner)
ok := cp.DeleteAndFreeValue(key)
if !ok {
panic("DeleteAndFreeValue fail")
}
ok = cp.DeleteAndFreeValue(key)
if ok {
panic("DeleteAndFreeValue double delete fail")
}
fmt.Println(cp.pools[0].positioner)
//测试缓存池自动扩展
testExtend()
}
func testExtend() {
fmt.Println("------ testExtend----------------")
poolNum := 2
poolCap := 2
cp, err := NewCachePool(poolNum, poolCap)
if err != nil {
return
}
for i := 0; i < poolCap*poolNum; i++ {
v := cp.GetValue()
if v == nil {
panic("v==nil")
}
}
v := cp.GetValue() //make cp extend pool
if v == nil {
panic("v==nil")
}
n := cp.GetPoolNum()
if n != poolNum+1 {
panic("")
}
fmt.Println(cp)
}
*/