-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathmain.go
1710 lines (1438 loc) Β· 45.2 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package main
import (
"context"
"crypto/tls"
"fmt"
"log"
"net"
"os"
"os/signal"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/horgh/irc"
"github.com/pkg/errors"
)
// Catbox holds the state for this local server.
// I put everything global to a server in an instance of struct rather than
// have global variables.
type Catbox struct {
// ConfigFile is the path to the config file.
ConfigFile string
// Config is the currently loaded config.
Config *Config
// Next client ID to issue. This turns into TS6 ID which gets concatenated
// with our SID to make the TS6 UID. We wrap it in a mutex as different
// goroutines must access it.
NextClientID uint64
NextClientIDLock sync.Mutex
// LocalClients are unregistered.
// Client id (uint64) is the locally unique key.
// It is useful to use this instead of TS6UID/TS6SID as we need to look up
// all 3 types inside the event handler.
// When we upgrade them to LocalUser/LocalServer, we drop them from this.
LocalClients map[uint64]*LocalClient
// LocalUsers are clients registered as users.
LocalUsers map[uint64]*LocalUser
// LocalServers are clients registered as servers.
LocalServers map[uint64]*LocalServer
// Users with operator status. They may be local or remote.
Opers map[TS6UID]*User
// Track nicks in use and by which user. Canonicalized nickname to TS6 UID.
Nicks map[string]TS6UID
// Track users on the network. TS6 UID to User. Local or remote.
Users map[TS6UID]*User
// Track servers on the network. TS6 SID to Server. Local or remote.
Servers map[TS6SID]*Server
// Track channels on the network. Channel name (canonicalized) to Channel.
Channels map[string]*Channel
// Active K:Lines (bans).
KLines []KLine
// When we close this channel, this indicates that we're shutting down.
// Other goroutines can check if this channel is closed.
ShutdownChan chan struct{}
// Tell the server something on this channel.
ToServerChan chan Event
// The highest number of local users we have seen at once.
HighestLocalUserCount int
// The highest number of global users we have seen at once.
HighestGlobalUserCount int
// The highest number of local clients (unregistered + users + servers) we have
// seen at once.
HighestConnectionCount int
// Track how many connections we've received in total. The definition of a
// connections in this context: User/server registrations. Not accepted TCP
// connections (which is what it was in the past).
ConnectionCount int
// Our TLS configuration.
TLSConfig *tls.Config
Certificate *tls.Certificate
CertificateMutex *sync.RWMutex
// TCP plaintext and TLS listeners.
Listener net.Listener
TLSListener net.Listener
// WaitGroup to ensure all goroutines clean up before we end.
WG sync.WaitGroup
// Whether we should restart after we cleanly complete shutdown.
// This will always be false unless someone triggered a restart.
Restart bool
// Track the time we last tried to connect to any server.
LastConnectAttempt time.Time
// Track what servers to try to connect to in a queue. This is because we try
// one at a time, and we don't want to favour those that happen to be appear
// first in the config.
LinkQueue []*ServerDefinition
}
// KLine holds a kline (a ban).
type KLine struct {
// Together we have <usermask>@<hostmask>
UserMask string
HostMask string
Reason string
}
// Message tells us the message and its destination. It primarily exists so that
// we can collect these for later processing. It makes it possible for us to
// have less side effects.
type Message struct {
Target *LocalClient
Message irc.Message
}
// TS6ID is a client's unique identifier. Unique to this server only.
type TS6ID string
// TS6SID uniquely identifies a server. Globally.
type TS6SID string
// TS6UID is SID+UID. Uniquely identify a client. Globally.
type TS6UID string
// Event holds a message containing something to tell the server.
type Event struct {
Type EventType
Client *LocalClient
Message irc.Message
// If we have an error associated with the event, such as in the case of
// some DeadClientEvents, populate it here.
Error error
}
// EventType is a type of event we can tell the server about.
type EventType int
const ( // nolint: deadcode
// NullEvent is a default event. This means the event was not populated.
NullEvent EventType = iota // nolint: megacheck
// NewClientEvent means a new client connected.
NewClientEvent
// DeadClientEvent means client died for some reason. Clean it up.
// It's useful to be able to know immediately and inform the client if we're
// going to decide they are getting cut off (e.g., malformed message).
DeadClientEvent
// MessageFromClientEvent means a client sent a message.
MessageFromClientEvent
// WakeUpEvent means the server should wake up and do bookkeeping.
WakeUpEvent
// RehashEvent tells the server to rehash.
RehashEvent
// RestartEvent tells the server to restart.
RestartEvent
)
// UserMessageLimit defines a cap on how many messages a user may send at once.
//
// As part of flood control, each user has a counter that maxes out at this
// number. Each message we process from them decrements their counter by one.
// If their counter reaches 0, we queue their message and process it once their
// counter becomes positive.
//
// Each second we raise each user's counter by one (to this maximum).
//
// This is similar to ircd-ratbox's flood control. See its packet.c.
const UserMessageLimit = 10
// ExcessFloodThreshold defines the number of messages a user may have queued
// before they get disconnected for flooding.
const ExcessFloodThreshold = 50
// ChanModesPerCommand tells how many channel modes we accept per MODE command
// from a user.
const ChanModesPerCommand = 4
func main() {
log.SetFlags(log.Ldate | log.Ltime)
log.SetOutput(os.Stdout)
args := getArgs()
if args == nil {
os.Exit(1)
}
binPath, err := filepath.Abs(os.Args[0])
if err != nil {
log.Fatalf("Unable to determine absolute path to binary: %s: %s",
os.Args[0], err)
}
cb, err := newCatbox(args.ConfigFile)
if err != nil {
log.Fatal(err)
}
if err := cb.start(args.ListenFD); err != nil {
log.Fatal(err)
}
if cb.Restart {
log.Printf("Shutdown completed. Restarting...")
if err := syscall.Exec( // nolint: gas
binPath,
[]string{
binPath,
"-conf",
cb.ConfigFile,
},
nil,
); err != nil {
log.Fatalf("Restart failed: %s", err)
}
log.Fatalf("not reached")
}
log.Printf("Server shutdown cleanly.")
}
func newCatbox(configFile string) (*Catbox, error) {
cb := Catbox{
ConfigFile: configFile,
LocalClients: make(map[uint64]*LocalClient),
LocalUsers: make(map[uint64]*LocalUser),
LocalServers: make(map[uint64]*LocalServer),
Opers: make(map[TS6UID]*User),
Users: make(map[TS6UID]*User),
Nicks: make(map[string]TS6UID),
Servers: make(map[TS6SID]*Server),
Channels: make(map[string]*Channel),
KLines: []KLine{},
// shutdown() closes this channel.
ShutdownChan: make(chan struct{}),
// We never manually close this channel.
ToServerChan: make(chan Event),
}
cfg, err := checkAndParseConfig(configFile)
if err != nil {
return nil, fmt.Errorf("configuration problem: %s", err)
}
cb.Config = cfg
if cb.Config.ListenPortTLS != "-1" || cb.Config.CertificateFile != "" ||
cb.Config.KeyFile != "" {
cb.CertificateMutex = &sync.RWMutex{}
tlsConfig := &tls.Config{
GetCertificate: cb.getCertificate,
PreferServerCipherSuites: true,
SessionTicketsDisabled: true,
// It would be nice to be able to be more restrictive on ciphers, but in
// practice many clients do not support the strictest.
//CipherSuites: []uint16{
// tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
// tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
//},
}
cb.TLSConfig = tlsConfig
if err := cb.loadCertificate(); err != nil {
return nil, err
}
}
return &cb, nil
}
// Return the current certificate.
//
// We use tls.Config's GetCertificate so that we can swap out the certificate
// while running without having to recreate the net.Listener.
func (cb *Catbox) getCertificate(
hello *tls.ClientHelloInfo,
) (*tls.Certificate, error) {
cb.CertificateMutex.RLock()
defer cb.CertificateMutex.RUnlock()
if cb.Certificate == nil {
return nil, errors.New("certificate not set")
}
return cb.Certificate, nil
}
// Load the certificate and key from files.
func (cb *Catbox) loadCertificate() error {
if cb.Config.CertificateFile == "" || cb.Config.KeyFile == "" {
return nil
}
cert, err := tls.LoadX509KeyPair(cb.Config.CertificateFile, cb.Config.KeyFile)
if err != nil {
return errors.Wrap(err, "error loading certificate/key")
}
cb.CertificateMutex.Lock()
defer cb.CertificateMutex.Unlock()
cb.Certificate = &cert
return nil
}
// start starts up the server.
//
// We open the TCP port, start goroutines, and then receive messages on our
// channels.
func (cb *Catbox) start(listenFD int) error {
if listenFD == -1 && cb.Config.ListenPort == "-1" &&
cb.Config.ListenPortTLS == "-1" {
log.Fatalf("You must set a listen port.")
}
// Plaintext listener.
if listenFD != -1 {
f := os.NewFile(uintptr(listenFD), "<fd>")
ln, err := net.FileListener(f)
if err != nil {
return fmt.Errorf("unable to listen: %s", err)
}
cb.Listener = ln
cb.WG.Add(1)
go cb.acceptConnections(cb.Listener)
}
if cb.Config.ListenPort != "-1" {
ln, err := net.Listen("tcp", fmt.Sprintf("%s:%s", cb.Config.ListenHost,
cb.Config.ListenPort))
if err != nil {
return fmt.Errorf("unable to listen: %s", err)
}
cb.Listener = ln
cb.WG.Add(1)
go cb.acceptConnections(cb.Listener)
}
// TLS listener.
if cb.Config.ListenPortTLS != "-1" {
tlsLN, err := tls.Listen("tcp", fmt.Sprintf("%s:%s", cb.Config.ListenHost,
cb.Config.ListenPortTLS), cb.TLSConfig)
if err != nil {
return fmt.Errorf("unable to listen (TLS): %s", err)
}
cb.TLSListener = tlsLN
cb.WG.Add(1)
go cb.acceptConnections(cb.TLSListener)
}
// Alarm is a goroutine to wake up this one periodically so we can do things
// like ping clients.
cb.WG.Add(1)
go cb.alarm()
// Catch SIGHUP and rehash.
// Catch SIGUSR1 and restart.
signalChan := make(chan os.Signal)
signal.Notify(signalChan, syscall.SIGHUP)
signal.Notify(signalChan, syscall.SIGUSR1)
cb.WG.Add(1)
go func() {
defer cb.WG.Done()
for {
select {
case sig := <-signalChan:
if sig == syscall.SIGHUP {
log.Printf("Received SIGHUP signal, rehashing")
cb.newEvent(Event{Type: RehashEvent})
break
}
if sig == syscall.SIGUSR1 {
log.Printf("Received SIGUSR1 signal, restarting")
cb.newEvent(Event{Type: RestartEvent})
break
}
log.Printf("Received unknown signal!")
case <-cb.ShutdownChan:
signal.Stop(signalChan)
// After Stop() we're guaranteed we will receive no more on the channel,
// so we can close the channel, and then drain it.
close(signalChan)
for range signalChan {
}
log.Printf("Signal listener shutting down.")
return
}
}
}()
log.Printf("catbox started")
cb.eventLoop()
// We don't need to drain any channels. None close that will have any
// goroutines blocked on them.
cb.WG.Wait()
return nil
}
// eventLoop processes events on the server's channel.
//
// It continues until the shutdown channel closes, indicating shutdown.
func (cb *Catbox) eventLoop() {
for {
select {
// Careful about using the Client we get back in events. It may have been
// promoted to a different client type (LocalUser, LocalServer).
case evt := <-cb.ToServerChan:
if evt.Type == NewClientEvent {
log.Printf("New client connection: %s", evt.Client)
cb.LocalClients[evt.Client.ID] = evt.Client
continue
}
if evt.Type == DeadClientEvent {
lc, exists := cb.LocalClients[evt.Client.ID]
if exists {
lc.quit("I/O error")
continue
}
lu, exists := cb.LocalUsers[evt.Client.ID]
if exists {
lu.quit(cb.errorToQuitMessage(evt.Error), true)
continue
}
ls, exists := cb.LocalServers[evt.Client.ID]
if exists {
ls.quit("I/O error")
continue
}
continue
}
if evt.Type == MessageFromClientEvent {
lc, exists := cb.LocalClients[evt.Client.ID]
if exists {
lc.handleMessage(evt.Message)
continue
}
lu, exists := cb.LocalUsers[evt.Client.ID]
if exists {
lu.handleMessage(evt.Message)
continue
}
ls, exists := cb.LocalServers[evt.Client.ID]
if exists {
ls.handleMessage(evt.Message)
continue
}
continue
}
if evt.Type == WakeUpEvent {
cb.checkAndPingClients()
cb.connectToServers()
cb.floodControl()
continue
}
if evt.Type == RehashEvent {
cb.rehash(nil)
continue
}
if evt.Type == RestartEvent {
cb.restart(nil)
continue
}
log.Fatalf("Unexpected event: %d", evt.Type)
case <-cb.ShutdownChan:
return
}
}
}
// Given a DeadClientEvent's error, translate that to a QUIT message.
//
// This is only appropriate for users.
//
// The reason we have this is that the errors can be overly verbose. For
// example:
//
// read tcp ip:port->ip:port: i/o timeout
//
// read tcp ip:port->ip:port: read: connection reset by peer
func (cb *Catbox) errorToQuitMessage(err error) string {
if err == nil {
return "I/O error"
}
msg := err.Error()
msg = strings.TrimSpace(msg)
if msg == "" {
return "I/O error"
}
idx := strings.LastIndex(msg, ":")
if idx == -1 {
return msg
}
msg = msg[idx+1:]
msg = strings.TrimSpace(msg)
if msg == "" {
return err.Error()
}
// We get i/o timeout from the reader when we hit the max time waiting for a
// message. The last message may have been something other than a PONG, but
// anyway.
if msg == "i/o timeout" {
return fmt.Sprintf("Ping timeout: %.f seconds",
cb.Config.DeadTime.Seconds())
}
first := strings.ToUpper(string(msg[0]))
return first + msg[1:]
}
// shutdown starts server shutdown.
func (cb *Catbox) shutdown() {
log.Printf("Server shutdown initiated.")
// Closing ShutdownChan indicates to other goroutines that we're shutting
// down.
close(cb.ShutdownChan)
if cb.Listener != nil {
if err := cb.Listener.Close(); err != nil {
log.Printf("Error closing plaintext listener: %s", err)
}
}
if cb.TLSListener != nil {
if err := cb.TLSListener.Close(); err != nil {
log.Printf("Error closing TLS listener: %s", err)
}
}
// All clients need to be told. This also closes their write channels.
for _, client := range cb.LocalClients {
client.quit("Server shutting down")
}
for _, client := range cb.LocalServers {
client.quit("Server shutting down")
}
for _, client := range cb.LocalUsers {
client.quit("Server shutting down", false)
}
}
// getClientID generates a new client ID. Each client that connects to us (or
// we connect to in the case of initiating a connection to a server) we assign
// a unique id using this function.
//
// We take a lock to allow it to be called safely from any goroutine.
func (cb *Catbox) getClientID() uint64 {
cb.NextClientIDLock.Lock()
defer cb.NextClientIDLock.Unlock()
id := cb.NextClientID
if cb.NextClientID+1 == 0 {
log.Fatalf("Client id overflow")
}
cb.NextClientID++
return id
}
// acceptConnections accepts TCP connections and tells the main server loop
// through a channel. It sets up separate goroutines for reading/writing to
// and from the client.
func (cb *Catbox) acceptConnections(listener net.Listener) {
defer cb.WG.Done()
for {
if cb.isShuttingDown() {
break
}
conn, err := listener.Accept()
if err != nil {
log.Printf("Failed to accept connection: %s", err)
continue
}
cb.introduceClient(conn)
}
log.Printf("Connection accepter shutting down.")
}
// introduceClient sets up a client we just accepted.
//
// It creates a Client struct, and sends initial NOTICEs to the client. It also
// attempts to look up the client's hostname.
func (cb *Catbox) introduceClient(conn net.Conn) {
cb.WG.Add(1)
go func() {
defer cb.WG.Done()
id := cb.getClientID()
client := NewLocalClient(cb, id, conn)
cb.WG.Add(1)
go client.writeLoop()
sendAuthNotice(
client,
"*** Processing your connection to "+cb.Config.ServerName,
)
if client.isTLS() {
tlsVersion, tlsCipherSuite, err := client.getTLSState()
if err != nil {
log.Printf("Client %s: %s", client, err)
close(client.WriteChan)
return
}
if tlsVersion != "TLS 1.2" && tlsVersion != "TLS 1.3" {
cb.noticeOpers(fmt.Sprintf("Rejecting client %s using %s",
client.Conn.IP, tlsVersion))
// Send ERROR and start up the writer to try to let them get it. Don't
// bother recording the client or starting the reader. We don't care.
client.messageFromServer("ERROR",
[]string{fmt.Sprintf(
"Your SSL/TLS version is %s. This server requires at least TLS 1.2. Contact %s if this is a problem.",
tlsVersion, cb.Config.AdminEmail)})
close(client.WriteChan)
return
}
sendAuthNotice(
client,
fmt.Sprintf("*** Connected with %s (%s)", tlsVersion, tlsCipherSuite),
)
}
sendAuthNotice(client, "*** Looking up your hostname...")
hostname := lookupHostname(context.TODO(), client.Conn.IP)
if len(hostname) > 0 {
sendAuthNotice(client, "*** Found your hostname")
client.Hostname = hostname
} else {
sendAuthNotice(client, "*** Couldn't look up your hostname")
}
// Inform the main server goroutine about the client.
//
// Do this after sending any messages to the client's channel as it is
// possible the channel will be closed by the server (such as during
// shutdown).
cb.newEvent(Event{Type: NewClientEvent, Client: client})
cb.WG.Add(1)
go client.readLoop()
}()
}
func sendAuthNotice(c *LocalClient, m string) {
c.WriteChan <- irc.Message{
Command: "NOTICE",
Params: []string{"AUTH", m},
}
}
// Return true if the server is shutting down.
func (cb *Catbox) isShuttingDown() bool {
// No messages get sent to this channel, so if we receive a message on it,
// then we know the channel was closed.
select {
case <-cb.ShutdownChan:
return true
default:
return false
}
}
// Alarm sends a message to the server goroutine to wake it up.
// It sleeps and then repeats.
//
// NOTE: You might be tempted to replace this goroutine with time.After().
// However we want to try to always wake up every second. time.After() will
// start a new timer every time through the event loop. So for example if
// we keep receiving messages with delay under a second then we'll never
// receive from the time.After() channel. I think using this separate
// goroutine with its own channel is more reliable.
func (cb *Catbox) alarm() {
defer cb.WG.Done()
for {
if cb.isShuttingDown() {
break
}
// We need to wake up every second for flood control.
time.Sleep(time.Second)
cb.newEvent(Event{Type: WakeUpEvent})
}
log.Printf("Alarm shutting down.")
}
// checkAndPingClients looks at each connected client.
//
// If they've been idle a short time, we send them a PING (if they're
// registered).
//
// If they've been idle a long time, we kill their connection.
//
// We also kill any whose send queue maxed out.
func (cb *Catbox) checkAndPingClients() {
now := time.Now()
// Unregistered clients do not receive PINGs, nor do we care about their
// idle time. Kill them if they are connected too long and still unregistered.
for _, client := range cb.LocalClients {
if client.SendQueueExceeded {
client.quit("SendQ exceeded")
continue
}
timeConnected := now.Sub(client.ConnectionStartTime)
// If it's been connected long enough to need to ping it, cut it off.
if timeConnected > cb.Config.PingTime {
client.quit("Idle too long.")
}
}
// User and server clients we are more lenient with. Ping them if they are
// idle for a while.
for _, client := range cb.LocalUsers {
if client.SendQueueExceeded {
client.quit("SendQ exceeded", true)
continue
}
timeIdle := now.Sub(client.LastActivityTime)
// Was it active recently enough that we don't need to do anything?
if timeIdle < cb.Config.PingTime {
continue
}
// It's been idle a while.
// Has it been idle long enough that we consider it dead?
if timeIdle > cb.Config.DeadTime {
client.quit(fmt.Sprintf("Ping timeout: %d seconds",
int(timeIdle.Seconds())), true)
continue
}
timeSincePing := now.Sub(client.LastPingTime)
// Should we ping it? We might have pinged it recently.
if timeSincePing < cb.Config.PingTime {
continue
}
// Don't send with a prefix. mIRC apparently will not recognize PING if we do.
// It will not respond and it will show the PING in its status window.
// PING <source to reply to, us>
client.maybeQueueMessage(irc.Message{
Command: "PING",
Params: []string{cb.Config.ServerName},
})
client.LastPingTime = now
}
for _, server := range cb.LocalServers {
if server.SendQueueExceeded {
server.quit("SendQ exceeded")
continue
}
// If it is bursting then we want to check it doesn't go on too long. Drop
// it if it does.
if server.Bursting {
timeConnected := now.Sub(server.ConnectionStartTime)
if timeConnected > cb.Config.PingTime {
server.quit("Bursting too long")
}
continue
}
// Its burst completed. Now we monitor the last time we heard from it
// and possibly ping it.
timeIdle := now.Sub(server.LastActivityTime)
// Was it active recently enough that we don't need to do anything?
if timeIdle < cb.Config.PingTime {
continue
}
// It's been idle a while.
// Has it been idle long enough that we consider it dead?
if timeIdle > cb.Config.DeadTime {
server.quit(fmt.Sprintf("Ping timeout: %d seconds",
int(timeIdle.Seconds())))
continue
}
timeSincePing := now.Sub(server.LastPingTime)
// Should we ping it? We might have pinged it recently.
if timeSincePing < cb.Config.PingTime {
continue
}
// PING origin is our SID for servers.
server.messageFromServer("PING", []string{string(cb.Config.TS6SID)})
server.LastPingTime = now
}
}
// connectToServers tries to connect outwards to any servers configured but not
// currently connected to.
//
// We delay a connection attempt to a server until at least ConnectAttemptTime
// elapsed.
//
// Try to link to at most one server per call. Use a queue so we give each
// server a chance rather than favouring those that appear earlier in the
// config. This is also to try to address the race condition where we link with
// two servers in the same network at the "same" time. Such a condition will
// lead to a split, but it can cause noise and collisions. Note this is a best
// effort approach. It is still possible for us to connect out to multiple
// servers through repeated calls to this function. As well, there is no limit
// on inbound linking. My intention is to reduce the likelihood of the race
// happening rather than make it impossible. Mainly because I am not sure a
// simple way to make it impossible.
func (cb *Catbox) connectToServers() {
now := time.Now()
// Delay between any connection attempt. This means we try to connect to at
// most one server, and then wait ConnectAttemptTime before trying any others.
timeSinceLastAttempt := now.Sub(cb.LastConnectAttempt)
if timeSinceLastAttempt < cb.Config.ConnectAttemptTime {
return
}
// Queue up servers to try to connect to if there are any we're not connected
// to.
if len(cb.LinkQueue) == 0 {
for _, linkInfo := range cb.Config.Servers {
// It does not make sense to try to connect to ourself. Even if we're in
// the config.
if linkInfo.Name == cb.Config.ServerName {
continue
}
if cb.isLinkedToServer(linkInfo.Name) {
continue
}
cb.LinkQueue = append(cb.LinkQueue, linkInfo)
}
}
if len(cb.LinkQueue) == 0 {
// We're connected to all servers.
return
}
// Try to connect to one.
for {
if len(cb.LinkQueue) == 0 {
// None left to try.
break
}
// Take first server we're not connected to, and try it.
linkInfo := cb.LinkQueue[0]
if len(cb.LinkQueue) == 1 {
cb.LinkQueue = []*ServerDefinition{}
} else {
cb.LinkQueue = cb.LinkQueue[1:]
}
// Did we link to it since we queued it for an attempt? If so, pick another.
if cb.isLinkedToServer(linkInfo.Name) {
continue
}
// Try to link to it.
cb.connectToServer(linkInfo)
cb.LastConnectAttempt = now
break
}
}
// floodControl updates the message counters for all users, and potentially
// processes queued messages for any that hit their limit.
//
// Each user will have its message counter increased by 1 to a maximum of
// UserMessageLimit.
//
// Each user will have its queued messages processed until their message counter
// hits zero.
//
// If a user has too many queued messages, we cut them off for excess flooding,
// but that does not happen here. It happens where we add to the queue. This is
// to try to kill clients that might otherwise overwhelm us.
//
// We expect to be called every ~second.
//
// Even if a user is flood exempt, continue checking them here. The reason is
// if they became an operator, we want to make sure we process any queued
// messages they may have before that.
func (cb *Catbox) floodControl() {
for _, user := range cb.LocalUsers {
// Bump up their message counter by one if they are not maxed out.
if user.MessageCounter < UserMessageLimit {
user.MessageCounter++
}
// Process their queued messages until their message counter hits zero.
for user.MessageCounter > 0 && len(user.MessageQueue) > 0 {
// Pull a message off the queue.
msg := user.MessageQueue[0]
user.MessageQueue = user.MessageQueue[1:]
// Process it.
// handleMessage decrements our message counter.
user.handleMessage(msg)
}
}
}
// Determine if we are linked to a given server.
func (cb *Catbox) isLinkedToServer(name string) bool {
// We're always linked to ourself.
if name == cb.Config.ServerName {
return true
}
for _, server := range cb.Servers {
if server.Name == name {
return true
}
}
return false
}
// Initiate a connection to a server.
//
// Do this in a goroutine to avoid blocking the main server goroutine.
func (cb *Catbox) connectToServer(linkInfo *ServerDefinition) {
cb.WG.Add(1)
go func() {
defer cb.WG.Done()
var conn net.Conn
var err error
if linkInfo.TLS {
cb.noticeOpers(fmt.Sprintf("Connecting to %s with TLS...", linkInfo.Name))