This commit is contained in:
Lily Tsai
2021-02-10 08:39:42 -08:00
commit 34a311648c
63 changed files with 76273 additions and 0 deletions

513
src/labrpc/labrpc.go Normal file
View File

@@ -0,0 +1,513 @@
package labrpc
//
// channel-based RPC, for 824 labs.
//
// simulates a network that can lose requests, lose replies,
// delay messages, and entirely disconnect particular hosts.
//
// we will use the original labrpc.go to test your code for grading.
// so, while you can modify this code to help you debug, please
// test against the original before submitting.
//
// adapted from Go net/rpc/server.go.
//
// sends labgob-encoded values to ensure that RPCs
// don't include references to program objects.
//
// net := MakeNetwork() -- holds network, clients, servers.
// end := net.MakeEnd(endname) -- create a client end-point, to talk to one server.
// net.AddServer(servername, server) -- adds a named server to network.
// net.DeleteServer(servername) -- eliminate the named server.
// net.Connect(endname, servername) -- connect a client to a server.
// net.Enable(endname, enabled) -- enable/disable a client.
// net.Reliable(bool) -- false means drop/delay messages
//
// end.Call("Raft.AppendEntries", &args, &reply) -- send an RPC, wait for reply.
// the "Raft" is the name of the server struct to be called.
// the "AppendEntries" is the name of the method to be called.
// Call() returns true to indicate that the server executed the request
// and the reply is valid.
// Call() returns false if the network lost the request or reply
// or the server is down.
// It is OK to have multiple Call()s in progress at the same time on the
// same ClientEnd.
// Concurrent calls to Call() may be delivered to the server out of order,
// since the network may re-order messages.
// Call() is guaranteed to return (perhaps after a delay) *except* if the
// handler function on the server side does not return.
// the server RPC handler function must declare its args and reply arguments
// as pointers, so that their types exactly match the types of the arguments
// to Call().
//
// srv := MakeServer()
// srv.AddService(svc) -- a server can have multiple services, e.g. Raft and k/v
// pass srv to net.AddServer()
//
// svc := MakeService(receiverObject) -- obj's methods will handle RPCs
// much like Go's rpcs.Register()
// pass svc to srv.AddService()
//
import "6.824/labgob"
import "bytes"
import "reflect"
import "sync"
import "log"
import "strings"
import "math/rand"
import "time"
import "sync/atomic"
type reqMsg struct {
endname interface{} // name of sending ClientEnd
svcMeth string // e.g. "Raft.AppendEntries"
argsType reflect.Type
args []byte
replyCh chan replyMsg
}
type replyMsg struct {
ok bool
reply []byte
}
type ClientEnd struct {
endname interface{} // this end-point's name
ch chan reqMsg // copy of Network.endCh
done chan struct{} // closed when Network is cleaned up
}
// send an RPC, wait for the reply.
// the return value indicates success; false means that
// no reply was received from the server.
func (e *ClientEnd) Call(svcMeth string, args interface{}, reply interface{}) bool {
req := reqMsg{}
req.endname = e.endname
req.svcMeth = svcMeth
req.argsType = reflect.TypeOf(args)
req.replyCh = make(chan replyMsg)
qb := new(bytes.Buffer)
qe := labgob.NewEncoder(qb)
if err := qe.Encode(args); err != nil {
panic(err)
}
req.args = qb.Bytes()
//
// send the request.
//
select {
case e.ch <- req:
// the request has been sent.
case <-e.done:
// entire Network has been destroyed.
return false
}
//
// wait for the reply.
//
rep := <-req.replyCh
if rep.ok {
rb := bytes.NewBuffer(rep.reply)
rd := labgob.NewDecoder(rb)
if err := rd.Decode(reply); err != nil {
log.Fatalf("ClientEnd.Call(): decode reply: %v\n", err)
}
return true
} else {
return false
}
}
type Network struct {
mu sync.Mutex
reliable bool
longDelays bool // pause a long time on send on disabled connection
longReordering bool // sometimes delay replies a long time
ends map[interface{}]*ClientEnd // ends, by name
enabled map[interface{}]bool // by end name
servers map[interface{}]*Server // servers, by name
connections map[interface{}]interface{} // endname -> servername
endCh chan reqMsg
done chan struct{} // closed when Network is cleaned up
count int32 // total RPC count, for statistics
bytes int64 // total bytes send, for statistics
}
func MakeNetwork() *Network {
rn := &Network{}
rn.reliable = true
rn.ends = map[interface{}]*ClientEnd{}
rn.enabled = map[interface{}]bool{}
rn.servers = map[interface{}]*Server{}
rn.connections = map[interface{}](interface{}){}
rn.endCh = make(chan reqMsg)
rn.done = make(chan struct{})
// single goroutine to handle all ClientEnd.Call()s
go func() {
for {
select {
case xreq := <-rn.endCh:
atomic.AddInt32(&rn.count, 1)
atomic.AddInt64(&rn.bytes, int64(len(xreq.args)))
go rn.processReq(xreq)
case <-rn.done:
return
}
}
}()
return rn
}
func (rn *Network) Cleanup() {
close(rn.done)
}
func (rn *Network) Reliable(yes bool) {
rn.mu.Lock()
defer rn.mu.Unlock()
rn.reliable = yes
}
func (rn *Network) LongReordering(yes bool) {
rn.mu.Lock()
defer rn.mu.Unlock()
rn.longReordering = yes
}
func (rn *Network) LongDelays(yes bool) {
rn.mu.Lock()
defer rn.mu.Unlock()
rn.longDelays = yes
}
func (rn *Network) readEndnameInfo(endname interface{}) (enabled bool,
servername interface{}, server *Server, reliable bool, longreordering bool,
) {
rn.mu.Lock()
defer rn.mu.Unlock()
enabled = rn.enabled[endname]
servername = rn.connections[endname]
if servername != nil {
server = rn.servers[servername]
}
reliable = rn.reliable
longreordering = rn.longReordering
return
}
func (rn *Network) isServerDead(endname interface{}, servername interface{}, server *Server) bool {
rn.mu.Lock()
defer rn.mu.Unlock()
if rn.enabled[endname] == false || rn.servers[servername] != server {
return true
}
return false
}
func (rn *Network) processReq(req reqMsg) {
enabled, servername, server, reliable, longreordering := rn.readEndnameInfo(req.endname)
if enabled && servername != nil && server != nil {
if reliable == false {
// short delay
ms := (rand.Int() % 27)
time.Sleep(time.Duration(ms) * time.Millisecond)
}
if reliable == false && (rand.Int()%1000) < 100 {
// drop the request, return as if timeout
req.replyCh <- replyMsg{false, nil}
return
}
// execute the request (call the RPC handler).
// in a separate thread so that we can periodically check
// if the server has been killed and the RPC should get a
// failure reply.
ech := make(chan replyMsg)
go func() {
r := server.dispatch(req)
ech <- r
}()
// wait for handler to return,
// but stop waiting if DeleteServer() has been called,
// and return an error.
var reply replyMsg
replyOK := false
serverDead := false
for replyOK == false && serverDead == false {
select {
case reply = <-ech:
replyOK = true
case <-time.After(100 * time.Millisecond):
serverDead = rn.isServerDead(req.endname, servername, server)
if serverDead {
go func() {
<-ech // drain channel to let the goroutine created earlier terminate
}()
}
}
}
// do not reply if DeleteServer() has been called, i.e.
// the server has been killed. this is needed to avoid
// situation in which a client gets a positive reply
// to an Append, but the server persisted the update
// into the old Persister. config.go is careful to call
// DeleteServer() before superseding the Persister.
serverDead = rn.isServerDead(req.endname, servername, server)
if replyOK == false || serverDead == true {
// server was killed while we were waiting; return error.
req.replyCh <- replyMsg{false, nil}
} else if reliable == false && (rand.Int()%1000) < 100 {
// drop the reply, return as if timeout
req.replyCh <- replyMsg{false, nil}
} else if longreordering == true && rand.Intn(900) < 600 {
// delay the response for a while
ms := 200 + rand.Intn(1+rand.Intn(2000))
// Russ points out that this timer arrangement will decrease
// the number of goroutines, so that the race
// detector is less likely to get upset.
time.AfterFunc(time.Duration(ms)*time.Millisecond, func() {
atomic.AddInt64(&rn.bytes, int64(len(reply.reply)))
req.replyCh <- reply
})
} else {
atomic.AddInt64(&rn.bytes, int64(len(reply.reply)))
req.replyCh <- reply
}
} else {
// simulate no reply and eventual timeout.
ms := 0
if rn.longDelays {
// let Raft tests check that leader doesn't send
// RPCs synchronously.
ms = (rand.Int() % 7000)
} else {
// many kv tests require the client to try each
// server in fairly rapid succession.
ms = (rand.Int() % 100)
}
time.AfterFunc(time.Duration(ms)*time.Millisecond, func() {
req.replyCh <- replyMsg{false, nil}
})
}
}
// create a client end-point.
// start the thread that listens and delivers.
func (rn *Network) MakeEnd(endname interface{}) *ClientEnd {
rn.mu.Lock()
defer rn.mu.Unlock()
if _, ok := rn.ends[endname]; ok {
log.Fatalf("MakeEnd: %v already exists\n", endname)
}
e := &ClientEnd{}
e.endname = endname
e.ch = rn.endCh
e.done = rn.done
rn.ends[endname] = e
rn.enabled[endname] = false
rn.connections[endname] = nil
return e
}
func (rn *Network) AddServer(servername interface{}, rs *Server) {
rn.mu.Lock()
defer rn.mu.Unlock()
rn.servers[servername] = rs
}
func (rn *Network) DeleteServer(servername interface{}) {
rn.mu.Lock()
defer rn.mu.Unlock()
rn.servers[servername] = nil
}
// connect a ClientEnd to a server.
// a ClientEnd can only be connected once in its lifetime.
func (rn *Network) Connect(endname interface{}, servername interface{}) {
rn.mu.Lock()
defer rn.mu.Unlock()
rn.connections[endname] = servername
}
// enable/disable a ClientEnd.
func (rn *Network) Enable(endname interface{}, enabled bool) {
rn.mu.Lock()
defer rn.mu.Unlock()
rn.enabled[endname] = enabled
}
// get a server's count of incoming RPCs.
func (rn *Network) GetCount(servername interface{}) int {
rn.mu.Lock()
defer rn.mu.Unlock()
svr := rn.servers[servername]
return svr.GetCount()
}
func (rn *Network) GetTotalCount() int {
x := atomic.LoadInt32(&rn.count)
return int(x)
}
func (rn *Network) GetTotalBytes() int64 {
x := atomic.LoadInt64(&rn.bytes)
return x
}
//
// a server is a collection of services, all sharing
// the same rpc dispatcher. so that e.g. both a Raft
// and a k/v server can listen to the same rpc endpoint.
//
type Server struct {
mu sync.Mutex
services map[string]*Service
count int // incoming RPCs
}
func MakeServer() *Server {
rs := &Server{}
rs.services = map[string]*Service{}
return rs
}
func (rs *Server) AddService(svc *Service) {
rs.mu.Lock()
defer rs.mu.Unlock()
rs.services[svc.name] = svc
}
func (rs *Server) dispatch(req reqMsg) replyMsg {
rs.mu.Lock()
rs.count += 1
// split Raft.AppendEntries into service and method
dot := strings.LastIndex(req.svcMeth, ".")
serviceName := req.svcMeth[:dot]
methodName := req.svcMeth[dot+1:]
service, ok := rs.services[serviceName]
rs.mu.Unlock()
if ok {
return service.dispatch(methodName, req)
} else {
choices := []string{}
for k, _ := range rs.services {
choices = append(choices, k)
}
log.Fatalf("labrpc.Server.dispatch(): unknown service %v in %v.%v; expecting one of %v\n",
serviceName, serviceName, methodName, choices)
return replyMsg{false, nil}
}
}
func (rs *Server) GetCount() int {
rs.mu.Lock()
defer rs.mu.Unlock()
return rs.count
}
// an object with methods that can be called via RPC.
// a single server may have more than one Service.
type Service struct {
name string
rcvr reflect.Value
typ reflect.Type
methods map[string]reflect.Method
}
func MakeService(rcvr interface{}) *Service {
svc := &Service{}
svc.typ = reflect.TypeOf(rcvr)
svc.rcvr = reflect.ValueOf(rcvr)
svc.name = reflect.Indirect(svc.rcvr).Type().Name()
svc.methods = map[string]reflect.Method{}
for m := 0; m < svc.typ.NumMethod(); m++ {
method := svc.typ.Method(m)
mtype := method.Type
mname := method.Name
//fmt.Printf("%v pp %v ni %v 1k %v 2k %v no %v\n",
// mname, method.PkgPath, mtype.NumIn(), mtype.In(1).Kind(), mtype.In(2).Kind(), mtype.NumOut())
if method.PkgPath != "" || // capitalized?
mtype.NumIn() != 3 ||
//mtype.In(1).Kind() != reflect.Ptr ||
mtype.In(2).Kind() != reflect.Ptr ||
mtype.NumOut() != 0 {
// the method is not suitable for a handler
//fmt.Printf("bad method: %v\n", mname)
} else {
// the method looks like a handler
svc.methods[mname] = method
}
}
return svc
}
func (svc *Service) dispatch(methname string, req reqMsg) replyMsg {
if method, ok := svc.methods[methname]; ok {
// prepare space into which to read the argument.
// the Value's type will be a pointer to req.argsType.
args := reflect.New(req.argsType)
// decode the argument.
ab := bytes.NewBuffer(req.args)
ad := labgob.NewDecoder(ab)
ad.Decode(args.Interface())
// allocate space for the reply.
replyType := method.Type.In(2)
replyType = replyType.Elem()
replyv := reflect.New(replyType)
// call the method.
function := method.Func
function.Call([]reflect.Value{svc.rcvr, args.Elem(), replyv})
// encode the reply.
rb := new(bytes.Buffer)
re := labgob.NewEncoder(rb)
re.EncodeValue(replyv)
return replyMsg{true, rb.Bytes()}
} else {
choices := []string{}
for k, _ := range svc.methods {
choices = append(choices, k)
}
log.Fatalf("labrpc.Service.dispatch(): unknown method %v in %v; expecting one of %v\n",
methname, req.svcMeth, choices)
return replyMsg{false, nil}
}
}

597
src/labrpc/test_test.go Normal file
View File

@@ -0,0 +1,597 @@
package labrpc
import "testing"
import "strconv"
import "sync"
import "runtime"
import "time"
import "fmt"
type JunkArgs struct {
X int
}
type JunkReply struct {
X string
}
type JunkServer struct {
mu sync.Mutex
log1 []string
log2 []int
}
func (js *JunkServer) Handler1(args string, reply *int) {
js.mu.Lock()
defer js.mu.Unlock()
js.log1 = append(js.log1, args)
*reply, _ = strconv.Atoi(args)
}
func (js *JunkServer) Handler2(args int, reply *string) {
js.mu.Lock()
defer js.mu.Unlock()
js.log2 = append(js.log2, args)
*reply = "handler2-" + strconv.Itoa(args)
}
func (js *JunkServer) Handler3(args int, reply *int) {
js.mu.Lock()
defer js.mu.Unlock()
time.Sleep(20 * time.Second)
*reply = -args
}
// args is a pointer
func (js *JunkServer) Handler4(args *JunkArgs, reply *JunkReply) {
reply.X = "pointer"
}
// args is a not pointer
func (js *JunkServer) Handler5(args JunkArgs, reply *JunkReply) {
reply.X = "no pointer"
}
func (js *JunkServer) Handler6(args string, reply *int) {
js.mu.Lock()
defer js.mu.Unlock()
*reply = len(args)
}
func (js *JunkServer) Handler7(args int, reply *string) {
js.mu.Lock()
defer js.mu.Unlock()
*reply = ""
for i := 0; i < args; i++ {
*reply = *reply + "y"
}
}
func TestBasic(t *testing.T) {
runtime.GOMAXPROCS(4)
rn := MakeNetwork()
defer rn.Cleanup()
e := rn.MakeEnd("end1-99")
js := &JunkServer{}
svc := MakeService(js)
rs := MakeServer()
rs.AddService(svc)
rn.AddServer("server99", rs)
rn.Connect("end1-99", "server99")
rn.Enable("end1-99", true)
{
reply := ""
e.Call("JunkServer.Handler2", 111, &reply)
if reply != "handler2-111" {
t.Fatalf("wrong reply from Handler2")
}
}
{
reply := 0
e.Call("JunkServer.Handler1", "9099", &reply)
if reply != 9099 {
t.Fatalf("wrong reply from Handler1")
}
}
}
func TestTypes(t *testing.T) {
runtime.GOMAXPROCS(4)
rn := MakeNetwork()
defer rn.Cleanup()
e := rn.MakeEnd("end1-99")
js := &JunkServer{}
svc := MakeService(js)
rs := MakeServer()
rs.AddService(svc)
rn.AddServer("server99", rs)
rn.Connect("end1-99", "server99")
rn.Enable("end1-99", true)
{
var args JunkArgs
var reply JunkReply
// args must match type (pointer or not) of handler.
e.Call("JunkServer.Handler4", &args, &reply)
if reply.X != "pointer" {
t.Fatalf("wrong reply from Handler4")
}
}
{
var args JunkArgs
var reply JunkReply
// args must match type (pointer or not) of handler.
e.Call("JunkServer.Handler5", args, &reply)
if reply.X != "no pointer" {
t.Fatalf("wrong reply from Handler5")
}
}
}
//
// does net.Enable(endname, false) really disconnect a client?
//
func TestDisconnect(t *testing.T) {
runtime.GOMAXPROCS(4)
rn := MakeNetwork()
defer rn.Cleanup()
e := rn.MakeEnd("end1-99")
js := &JunkServer{}
svc := MakeService(js)
rs := MakeServer()
rs.AddService(svc)
rn.AddServer("server99", rs)
rn.Connect("end1-99", "server99")
{
reply := ""
e.Call("JunkServer.Handler2", 111, &reply)
if reply != "" {
t.Fatalf("unexpected reply from Handler2")
}
}
rn.Enable("end1-99", true)
{
reply := 0
e.Call("JunkServer.Handler1", "9099", &reply)
if reply != 9099 {
t.Fatalf("wrong reply from Handler1")
}
}
}
//
// test net.GetCount()
//
func TestCounts(t *testing.T) {
runtime.GOMAXPROCS(4)
rn := MakeNetwork()
defer rn.Cleanup()
e := rn.MakeEnd("end1-99")
js := &JunkServer{}
svc := MakeService(js)
rs := MakeServer()
rs.AddService(svc)
rn.AddServer(99, rs)
rn.Connect("end1-99", 99)
rn.Enable("end1-99", true)
for i := 0; i < 17; i++ {
reply := ""
e.Call("JunkServer.Handler2", i, &reply)
wanted := "handler2-" + strconv.Itoa(i)
if reply != wanted {
t.Fatalf("wrong reply %v from Handler1, expecting %v", reply, wanted)
}
}
n := rn.GetCount(99)
if n != 17 {
t.Fatalf("wrong GetCount() %v, expected 17\n", n)
}
}
//
// test net.GetTotalBytes()
//
func TestBytes(t *testing.T) {
runtime.GOMAXPROCS(4)
rn := MakeNetwork()
defer rn.Cleanup()
e := rn.MakeEnd("end1-99")
js := &JunkServer{}
svc := MakeService(js)
rs := MakeServer()
rs.AddService(svc)
rn.AddServer(99, rs)
rn.Connect("end1-99", 99)
rn.Enable("end1-99", true)
for i := 0; i < 17; i++ {
args := "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
args = args + args
args = args + args
reply := 0
e.Call("JunkServer.Handler6", args, &reply)
wanted := len(args)
if reply != wanted {
t.Fatalf("wrong reply %v from Handler6, expecting %v", reply, wanted)
}
}
n := rn.GetTotalBytes()
if n < 4828 || n > 6000 {
t.Fatalf("wrong GetTotalBytes() %v, expected about 5000\n", n)
}
for i := 0; i < 17; i++ {
args := 107
reply := ""
e.Call("JunkServer.Handler7", args, &reply)
wanted := args
if len(reply) != wanted {
t.Fatalf("wrong reply len=%v from Handler6, expecting %v", len(reply), wanted)
}
}
nn := rn.GetTotalBytes() - n
if nn < 1800 || nn > 2500 {
t.Fatalf("wrong GetTotalBytes() %v, expected about 2000\n", nn)
}
}
//
// test RPCs from concurrent ClientEnds
//
func TestConcurrentMany(t *testing.T) {
runtime.GOMAXPROCS(4)
rn := MakeNetwork()
defer rn.Cleanup()
js := &JunkServer{}
svc := MakeService(js)
rs := MakeServer()
rs.AddService(svc)
rn.AddServer(1000, rs)
ch := make(chan int)
nclients := 20
nrpcs := 10
for ii := 0; ii < nclients; ii++ {
go func(i int) {
n := 0
defer func() { ch <- n }()
e := rn.MakeEnd(i)
rn.Connect(i, 1000)
rn.Enable(i, true)
for j := 0; j < nrpcs; j++ {
arg := i*100 + j
reply := ""
e.Call("JunkServer.Handler2", arg, &reply)
wanted := "handler2-" + strconv.Itoa(arg)
if reply != wanted {
t.Fatalf("wrong reply %v from Handler1, expecting %v", reply, wanted)
}
n += 1
}
}(ii)
}
total := 0
for ii := 0; ii < nclients; ii++ {
x := <-ch
total += x
}
if total != nclients*nrpcs {
t.Fatalf("wrong number of RPCs completed, got %v, expected %v", total, nclients*nrpcs)
}
n := rn.GetCount(1000)
if n != total {
t.Fatalf("wrong GetCount() %v, expected %v\n", n, total)
}
}
//
// test unreliable
//
func TestUnreliable(t *testing.T) {
runtime.GOMAXPROCS(4)
rn := MakeNetwork()
defer rn.Cleanup()
rn.Reliable(false)
js := &JunkServer{}
svc := MakeService(js)
rs := MakeServer()
rs.AddService(svc)
rn.AddServer(1000, rs)
ch := make(chan int)
nclients := 300
for ii := 0; ii < nclients; ii++ {
go func(i int) {
n := 0
defer func() { ch <- n }()
e := rn.MakeEnd(i)
rn.Connect(i, 1000)
rn.Enable(i, true)
arg := i * 100
reply := ""
ok := e.Call("JunkServer.Handler2", arg, &reply)
if ok {
wanted := "handler2-" + strconv.Itoa(arg)
if reply != wanted {
t.Fatalf("wrong reply %v from Handler1, expecting %v", reply, wanted)
}
n += 1
}
}(ii)
}
total := 0
for ii := 0; ii < nclients; ii++ {
x := <-ch
total += x
}
if total == nclients || total == 0 {
t.Fatalf("all RPCs succeeded despite unreliable")
}
}
//
// test concurrent RPCs from a single ClientEnd
//
func TestConcurrentOne(t *testing.T) {
runtime.GOMAXPROCS(4)
rn := MakeNetwork()
defer rn.Cleanup()
js := &JunkServer{}
svc := MakeService(js)
rs := MakeServer()
rs.AddService(svc)
rn.AddServer(1000, rs)
e := rn.MakeEnd("c")
rn.Connect("c", 1000)
rn.Enable("c", true)
ch := make(chan int)
nrpcs := 20
for ii := 0; ii < nrpcs; ii++ {
go func(i int) {
n := 0
defer func() { ch <- n }()
arg := 100 + i
reply := ""
e.Call("JunkServer.Handler2", arg, &reply)
wanted := "handler2-" + strconv.Itoa(arg)
if reply != wanted {
t.Fatalf("wrong reply %v from Handler2, expecting %v", reply, wanted)
}
n += 1
}(ii)
}
total := 0
for ii := 0; ii < nrpcs; ii++ {
x := <-ch
total += x
}
if total != nrpcs {
t.Fatalf("wrong number of RPCs completed, got %v, expected %v", total, nrpcs)
}
js.mu.Lock()
defer js.mu.Unlock()
if len(js.log2) != nrpcs {
t.Fatalf("wrong number of RPCs delivered")
}
n := rn.GetCount(1000)
if n != total {
t.Fatalf("wrong GetCount() %v, expected %v\n", n, total)
}
}
//
// regression: an RPC that's delayed during Enabled=false
// should not delay subsequent RPCs (e.g. after Enabled=true).
//
func TestRegression1(t *testing.T) {
runtime.GOMAXPROCS(4)
rn := MakeNetwork()
defer rn.Cleanup()
js := &JunkServer{}
svc := MakeService(js)
rs := MakeServer()
rs.AddService(svc)
rn.AddServer(1000, rs)
e := rn.MakeEnd("c")
rn.Connect("c", 1000)
// start some RPCs while the ClientEnd is disabled.
// they'll be delayed.
rn.Enable("c", false)
ch := make(chan bool)
nrpcs := 20
for ii := 0; ii < nrpcs; ii++ {
go func(i int) {
ok := false
defer func() { ch <- ok }()
arg := 100 + i
reply := ""
// this call ought to return false.
e.Call("JunkServer.Handler2", arg, &reply)
ok = true
}(ii)
}
time.Sleep(100 * time.Millisecond)
// now enable the ClientEnd and check that an RPC completes quickly.
t0 := time.Now()
rn.Enable("c", true)
{
arg := 99
reply := ""
e.Call("JunkServer.Handler2", arg, &reply)
wanted := "handler2-" + strconv.Itoa(arg)
if reply != wanted {
t.Fatalf("wrong reply %v from Handler2, expecting %v", reply, wanted)
}
}
dur := time.Since(t0).Seconds()
if dur > 0.03 {
t.Fatalf("RPC took too long (%v) after Enable", dur)
}
for ii := 0; ii < nrpcs; ii++ {
<-ch
}
js.mu.Lock()
defer js.mu.Unlock()
if len(js.log2) != 1 {
t.Fatalf("wrong number (%v) of RPCs delivered, expected 1", len(js.log2))
}
n := rn.GetCount(1000)
if n != 1 {
t.Fatalf("wrong GetCount() %v, expected %v\n", n, 1)
}
}
//
// if an RPC is stuck in a server, and the server
// is killed with DeleteServer(), does the RPC
// get un-stuck?
//
func TestKilled(t *testing.T) {
runtime.GOMAXPROCS(4)
rn := MakeNetwork()
defer rn.Cleanup()
e := rn.MakeEnd("end1-99")
js := &JunkServer{}
svc := MakeService(js)
rs := MakeServer()
rs.AddService(svc)
rn.AddServer("server99", rs)
rn.Connect("end1-99", "server99")
rn.Enable("end1-99", true)
doneCh := make(chan bool)
go func() {
reply := 0
ok := e.Call("JunkServer.Handler3", 99, &reply)
doneCh <- ok
}()
time.Sleep(1000 * time.Millisecond)
select {
case <-doneCh:
t.Fatalf("Handler3 should not have returned yet")
case <-time.After(100 * time.Millisecond):
}
rn.DeleteServer("server99")
select {
case x := <-doneCh:
if x != false {
t.Fatalf("Handler3 returned successfully despite DeleteServer()")
}
case <-time.After(100 * time.Millisecond):
t.Fatalf("Handler3 should return after DeleteServer()")
}
}
func TestBenchmark(t *testing.T) {
runtime.GOMAXPROCS(4)
rn := MakeNetwork()
defer rn.Cleanup()
e := rn.MakeEnd("end1-99")
js := &JunkServer{}
svc := MakeService(js)
rs := MakeServer()
rs.AddService(svc)
rn.AddServer("server99", rs)
rn.Connect("end1-99", "server99")
rn.Enable("end1-99", true)
t0 := time.Now()
n := 100000
for iters := 0; iters < n; iters++ {
reply := ""
e.Call("JunkServer.Handler2", 111, &reply)
if reply != "handler2-111" {
t.Fatalf("wrong reply from Handler2")
}
}
fmt.Printf("%v for %v\n", time.Since(t0), n)
// march 2016, rtm laptop, 22 microseconds per RPC
}