From 9dfcfb938552cfc601562db1f6e6a97534d4e563 Mon Sep 17 00:00:00 2001 From: Rob Pike Date: Tue, 12 Mar 2013 10:53:01 -0700 Subject: [PATCH] effective_go.html: fix semaphore example It didn't work properly according to the Go memory model. Fixes #5023. R=golang-dev, dvyukov, adg CC=golang-dev https://golang.org/cl/7698045 --- doc/effective_go.html | 54 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 46 insertions(+), 8 deletions(-) diff --git a/doc/effective_go.html b/doc/effective_go.html index 427a88506c..decca34b5a 100644 --- a/doc/effective_go.html +++ b/doc/effective_go.html @@ -2422,7 +2422,7 @@ special case of a general situation: multiple assignment.

If an assignment requires multiple values on the left side, but one of the values will not be used by the program, -a blank identifier on the left-hand-side of the +a blank identifier on the left-hand-side of the assignment avoids the need to create a dummy variable and makes it clear that the value is to be discarded. @@ -2893,18 +2893,26 @@ means waiting until some receiver has retrieved a value.

A buffered channel can be used like a semaphore, for instance to limit throughput. In this example, incoming requests are passed -to handle, which sends a value into the channel, processes -the request, and then receives a value from the channel. +to handle, which receives a value from the channel, processes +the request, and then sends a value back to the channel +to ready the "semaphore" for the next consumer. The capacity of the channel buffer limits the number of -simultaneous calls to process. +simultaneous calls to process, +so during initialization we prime the channel by filling it to capacity.

 var sem = make(chan int, MaxOutstanding)
 
 func handle(r *Request) {
-    sem <- 1    // Wait for active queue to drain.
-    process(r)  // May take a long time.
-    <-sem       // Done; enable next request to run.
+    <-sem          // Wait for active queue to drain.
+    process(r)     // May take a long time.
+    sem <- 1       // Done; enable next request to run.
+}
+
+func init() {
+    for i := 0; i < MaxOutstanding; i++ {
+        sem <- 1
+    }
 }
 
 func Serve(queue chan *Request) {
@@ -2914,8 +2922,37 @@ func Serve(queue chan *Request) {
     }
 }
 
+ +

+Because data synchronization occurs on a receive from a channel +(that is, the send "happens before" the receive; see +The Go Memory Model), +acquisition of the semaphore must be on a channel receive, not a send. +

+ +

+This design has a problem, though: Serve +creates a new goroutine for +every incoming request, even though only MaxOutstanding +of them can run at any moment. +As a result, the program can consume unlimited resources if the requests come in too fast. +We can address that deficiency by changing Serve to +gate the creation of the goroutines. +

+ +
+func Serve(queue chan *Request) {
+    for req := range queue {
+        <-sem
+        go func() {
+            process(req)
+            sem <- 1
+        }
+    }
+}
+

-Here's the same idea implemented by starting a fixed +Another solution that manages resources well is to start a fixed number of handle goroutines all reading from the request channel. The number of goroutines limits the number of simultaneous @@ -2924,6 +2961,7 @@ This Serve function also accepts a channel on which it will be told to exit; after launching the goroutines it blocks receiving from that channel.

+
 func handle(queue chan *Request) {
     for r := range queue {
-- 
2.50.0