Skip to content

Commit ad2fa8b

Browse files
committed
add 1.13 file
1 parent 7c373b8 commit ad2fa8b

File tree

5 files changed

+704
-0
lines changed

5 files changed

+704
-0
lines changed

1.13/memory.md

Whitespace-only changes.

1.13/rwmutex.md

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
// There is a modified copy of this file in runtime/rwmutex.go.
2+
// If you make any changes here, see if you should make them there.
3+
4+
// A RWMutex is a reader/writer mutual exclusion lock.
5+
// The lock can be held by an arbitrary number of readers or a single writer.
6+
// The zero value for a RWMutex is an unlocked mutex.
7+
//
8+
// A RWMutex must not be copied after first use.
9+
//
10+
// If a goroutine holds a RWMutex for reading and another goroutine might
11+
// call Lock, no goroutine should expect to be able to acquire a read lock
12+
// until the initial read lock is released. In particular, this prohibits
13+
// recursive read locking. This is to ensure that the lock eventually becomes
14+
// available; a blocked Lock call excludes new readers from acquiring the
15+
// lock.
16+
type RWMutex struct {
17+
w Mutex // held if there are pending writers
18+
writerSem uint32 // semaphore for writers to wait for completing readers
19+
readerSem uint32 // semaphore for readers to wait for completing writers
20+
readerCount int32 // number of pending readers
21+
readerWait int32 // number of departing readers
22+
}
23+
24+
const rwmutexMaxReaders = 1 << 30
25+
26+
// RLock locks rw for reading.
27+
//
28+
// It should not be used for recursive read locking; a blocked Lock
29+
// call excludes new readers from acquiring the lock. See the
30+
// documentation on the RWMutex type.
31+
func (rw *RWMutex) RLock() {
32+
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
33+
// A writer is pending, wait for it.
34+
runtime_SemacquireMutex(&rw.readerSem, false, 0)
35+
}
36+
}
37+
38+
// RUnlock undoes a single RLock call;
39+
// it does not affect other simultaneous readers.
40+
// It is a run-time error if rw is not locked for reading
41+
// on entry to RUnlock.
42+
func (rw *RWMutex) RUnlock() {
43+
if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
44+
// Outlined slow-path to allow the fast-path to be inlined
45+
rw.rUnlockSlow(r)
46+
}
47+
}
48+
49+
func (rw *RWMutex) rUnlockSlow(r int32) {
50+
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
51+
throw("sync: RUnlock of unlocked RWMutex")
52+
}
53+
// A writer is pending.
54+
if atomic.AddInt32(&rw.readerWait, -1) == 0 {
55+
// The last reader unblocks the writer.
56+
runtime_Semrelease(&rw.writerSem, false, 1)
57+
}
58+
}
59+
60+
// Lock locks rw for writing.
61+
// If the lock is already locked for reading or writing,
62+
// Lock blocks until the lock is available.
63+
func (rw *RWMutex) Lock() {
64+
// First, resolve competition with other writers.
65+
rw.w.Lock()
66+
// Announce to readers there is a pending writer.
67+
r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
68+
// Wait for active readers.
69+
if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
70+
runtime_SemacquireMutex(&rw.writerSem, false, 0)
71+
}
72+
}
73+
74+
// Unlock unlocks rw for writing. It is a run-time error if rw is
75+
// not locked for writing on entry to Unlock.
76+
//
77+
// As with Mutexes, a locked RWMutex is not associated with a particular
78+
// goroutine. One goroutine may RLock (Lock) a RWMutex and then
79+
// arrange for another goroutine to RUnlock (Unlock) it.
80+
func (rw *RWMutex) Unlock() {
81+
82+
// Announce to readers there is no active writer.
83+
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
84+
if r >= rwmutexMaxReaders {
85+
throw("sync: Unlock of unlocked RWMutex")
86+
}
87+
// Unblock blocked readers, if any.
88+
for i := 0; i < int(r); i++ {
89+
runtime_Semrelease(&rw.readerSem, false, 0)
90+
}
91+
// Allow other writers to proceed.
92+
rw.w.Unlock()
93+
}
94+
95+
// RLocker returns a Locker interface that implements
96+
// the Lock and Unlock methods by calling rw.RLock and rw.RUnlock.
97+
func (rw *RWMutex) RLocker() Locker {
98+
return (*rlocker)(rw)
99+
}
100+
101+
type rlocker RWMutex
102+
103+
func (r *rlocker) Lock() { (*RWMutex)(r).RLock() }
104+
func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() }
105+

1.13/select.md

Whitespace-only changes.

1.13/sync_pool.md

Lines changed: 254 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,254 @@
1+
// A Pool is a set of temporary objects that may be individually saved and
2+
// retrieved.
3+
//
4+
// Any item stored in the Pool may be removed automatically at any time without
5+
// notification. If the Pool holds the only reference when this happens, the
6+
// item might be deallocated.
7+
//
8+
// A Pool is safe for use by multiple goroutines simultaneously.
9+
//
10+
// Pool's purpose is to cache allocated but unused items for later reuse,
11+
// relieving pressure on the garbage collector. That is, it makes it easy to
12+
// build efficient, thread-safe free lists. However, it is not suitable for all
13+
// free lists.
14+
//
15+
// An appropriate use of a Pool is to manage a group of temporary items
16+
// silently shared among and potentially reused by concurrent independent
17+
// clients of a package. Pool provides a way to amortize allocation overhead
18+
// across many clients.
19+
//
20+
// An example of good use of a Pool is in the fmt package, which maintains a
21+
// dynamically-sized store of temporary output buffers. The store scales under
22+
// load (when many goroutines are actively printing) and shrinks when
23+
// quiescent.
24+
//
25+
// On the other hand, a free list maintained as part of a short-lived object is
26+
// not a suitable use for a Pool, since the overhead does not amortize well in
27+
// that scenario. It is more efficient to have such objects implement their own
28+
// free list.
29+
//
30+
// A Pool must not be copied after first use.
31+
type Pool struct {
32+
noCopy noCopy
33+
34+
local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
35+
localSize uintptr // size of the local array
36+
37+
victim unsafe.Pointer // local from previous cycle
38+
victimSize uintptr // size of victims array
39+
40+
// New optionally specifies a function to generate
41+
// a value when Get would otherwise return nil.
42+
// It may not be changed concurrently with calls to Get.
43+
New func() interface{}
44+
}
45+
46+
// Local per-P Pool appendix.
47+
type poolLocalInternal struct {
48+
private interface{} // Can be used only by the respective P.
49+
shared poolChain // Local P can pushHead/popHead; any P can popTail.
50+
}
51+
52+
type poolLocal struct {
53+
poolLocalInternal
54+
55+
// Prevents false sharing on widespread platforms with
56+
// 128 mod (cache line size) = 0 .
57+
pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
58+
}
59+
60+
// from runtime
61+
func fastrand() uint32
62+
63+
var poolRaceHash [128]uint64
64+
65+
// poolRaceAddr returns an address to use as the synchronization point
66+
// for race detector logic. We don't use the actual pointer stored in x
67+
// directly, for fear of conflicting with other synchronization on that address.
68+
// Instead, we hash the pointer to get an index into poolRaceHash.
69+
// See discussion on golang.org/cl/31589.
70+
func poolRaceAddr(x interface{}) unsafe.Pointer {
71+
ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1])
72+
h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16)
73+
return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))])
74+
}
75+
76+
// Put adds x to the pool.
77+
func (p *Pool) Put(x interface{}) {
78+
if x == nil {
79+
return
80+
}
81+
82+
l, _ := p.pin()
83+
if l.private == nil {
84+
l.private = x
85+
x = nil
86+
}
87+
if x != nil {
88+
l.shared.pushHead(x)
89+
}
90+
runtime_procUnpin()
91+
}
92+
93+
// Get selects an arbitrary item from the Pool, removes it from the
94+
// Pool, and returns it to the caller.
95+
// Get may choose to ignore the pool and treat it as empty.
96+
// Callers should not assume any relation between values passed to Put and
97+
// the values returned by Get.
98+
//
99+
// If Get would otherwise return nil and p.New is non-nil, Get returns
100+
// the result of calling p.New.
101+
func (p *Pool) Get() interface{} {
102+
l, pid := p.pin()
103+
x := l.private
104+
l.private = nil
105+
if x == nil {
106+
// Try to pop the head of the local shard. We prefer
107+
// the head over the tail for temporal locality of
108+
// reuse.
109+
x, _ = l.shared.popHead()
110+
if x == nil {
111+
x = p.getSlow(pid)
112+
}
113+
}
114+
runtime_procUnpin()
115+
116+
if x == nil && p.New != nil {
117+
x = p.New()
118+
}
119+
return x
120+
}
121+
122+
func (p *Pool) getSlow(pid int) interface{} {
123+
// See the comment in pin regarding ordering of the loads.
124+
size := atomic.LoadUintptr(&p.localSize) // load-acquire
125+
locals := p.local // load-consume
126+
// Try to steal one element from other procs.
127+
for i := 0; i < int(size); i++ {
128+
l := indexLocal(locals, (pid+i+1)%int(size))
129+
if x, _ := l.shared.popTail(); x != nil {
130+
return x
131+
}
132+
}
133+
134+
// Try the victim cache. We do this after attempting to steal
135+
// from all primary caches because we want objects in the
136+
// victim cache to age out if at all possible.
137+
size = atomic.LoadUintptr(&p.victimSize)
138+
if uintptr(pid) >= size {
139+
return nil
140+
}
141+
locals = p.victim
142+
l := indexLocal(locals, pid)
143+
if x := l.private; x != nil {
144+
l.private = nil
145+
return x
146+
}
147+
for i := 0; i < int(size); i++ {
148+
l := indexLocal(locals, (pid+i)%int(size))
149+
if x, _ := l.shared.popTail(); x != nil {
150+
return x
151+
}
152+
}
153+
154+
// Mark the victim cache as empty for future gets don't bother
155+
// with it.
156+
atomic.StoreUintptr(&p.victimSize, 0)
157+
158+
return nil
159+
}
160+
161+
// pin pins the current goroutine to P, disables preemption and
162+
// returns poolLocal pool for the P and the P's id.
163+
// Caller must call runtime_procUnpin() when done with the pool.
164+
func (p *Pool) pin() (*poolLocal, int) {
165+
pid := runtime_procPin()
166+
// In pinSlow we store to local and then to localSize, here we load in opposite order.
167+
// Since we've disabled preemption, GC cannot happen in between.
168+
// Thus here we must observe local at least as large localSize.
169+
// We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
170+
s := atomic.LoadUintptr(&p.localSize) // load-acquire
171+
l := p.local // load-consume
172+
if uintptr(pid) < s {
173+
return indexLocal(l, pid), pid
174+
}
175+
return p.pinSlow()
176+
}
177+
178+
func (p *Pool) pinSlow() (*poolLocal, int) {
179+
// Retry under the mutex.
180+
// Can not lock the mutex while pinned.
181+
runtime_procUnpin()
182+
allPoolsMu.Lock()
183+
defer allPoolsMu.Unlock()
184+
pid := runtime_procPin()
185+
// poolCleanup won't be called while we are pinned.
186+
s := p.localSize
187+
l := p.local
188+
if uintptr(pid) < s {
189+
return indexLocal(l, pid), pid
190+
}
191+
if p.local == nil {
192+
allPools = append(allPools, p)
193+
}
194+
// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
195+
size := runtime.GOMAXPROCS(0)
196+
local := make([]poolLocal, size)
197+
atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
198+
atomic.StoreUintptr(&p.localSize, uintptr(size)) // store-release
199+
return &local[pid], pid
200+
}
201+
202+
func poolCleanup() {
203+
// This function is called with the world stopped, at the beginning of a garbage collection.
204+
// It must not allocate and probably should not call any runtime functions.
205+
206+
// Because the world is stopped, no pool user can be in a
207+
// pinned section (in effect, this has all Ps pinned).
208+
209+
// Drop victim caches from all pools.
210+
for _, p := range oldPools {
211+
p.victim = nil
212+
p.victimSize = 0
213+
}
214+
215+
// Move primary cache to victim cache.
216+
for _, p := range allPools {
217+
p.victim = p.local
218+
p.victimSize = p.localSize
219+
p.local = nil
220+
p.localSize = 0
221+
}
222+
223+
// The pools with non-empty primary caches now have non-empty
224+
// victim caches and no pools have primary caches.
225+
oldPools, allPools = allPools, nil
226+
}
227+
228+
var (
229+
allPoolsMu Mutex
230+
231+
// allPools is the set of pools that have non-empty primary
232+
// caches. Protected by either 1) allPoolsMu and pinning or 2)
233+
// STW.
234+
allPools []*Pool
235+
236+
// oldPools is the set of pools that may have non-empty victim
237+
// caches. Protected by STW.
238+
oldPools []*Pool
239+
)
240+
241+
func init() {
242+
runtime_registerPoolCleanup(poolCleanup)
243+
}
244+
245+
func indexLocal(l unsafe.Pointer, i int) *poolLocal {
246+
lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{}))
247+
return (*poolLocal)(lp)
248+
}
249+
250+
// Implemented in runtime.
251+
func runtime_registerPoolCleanup(cleanup func())
252+
func runtime_procPin() int
253+
func runtime_procUnpin()
254+

0 commit comments

Comments
 (0)