forked from ThreeDotsLabs/watermill
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.go
152 lines (126 loc) · 3.5 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
package main
import (
"context"
"encoding/json"
"log"
"time"
"github.com/ThreeDotsLabs/watermill"
"github.com/ThreeDotsLabs/watermill-kafka/v2/pkg/kafka"
"github.com/ThreeDotsLabs/watermill/message"
"github.com/ThreeDotsLabs/watermill/message/router/middleware"
"github.com/ThreeDotsLabs/watermill/message/router/plugin"
)
var (
brokers = []string{"kafka:9092"}
consumeTopic = "events"
publishTopic = "events-processed"
logger = watermill.NewStdLogger(
true, // debug
false, // trace
)
marshaler = kafka.DefaultMarshaler{}
)
type event struct {
ID int `json:"id"`
}
type processedEvent struct {
ProcessedID int `json:"processed_id"`
Time time.Time `json:"time"`
}
func main() {
publisher := createPublisher()
// Subscriber is created with consumer group handler_1
subscriber := createSubscriber("handler_1")
router, err := message.NewRouter(message.RouterConfig{}, logger)
if err != nil {
panic(err)
}
router.AddPlugin(plugin.SignalsHandler)
router.AddMiddleware(middleware.Recoverer)
// Adding a handler (multiple handlers can be added)
router.AddHandler(
"handler_1", // handler name, must be unique
consumeTopic, // topic from which messages should be consumed
subscriber,
publishTopic, // topic to which messages should be published
publisher,
func(msg *message.Message) ([]*message.Message, error) {
consumedPayload := event{}
err := json.Unmarshal(msg.Payload, &consumedPayload)
if err != nil {
// When a handler returns an error, the default behavior is to send a Nack (negative-acknowledgement).
// The message will be processed again.
//
// You can change the default behaviour by using middlewares, like Retry or PoisonQueue.
// You can also implement your own middleware.
return nil, err
}
log.Printf("received event %+v", consumedPayload)
newPayload, err := json.Marshal(processedEvent{
ProcessedID: consumedPayload.ID,
Time: time.Now(),
})
if err != nil {
return nil, err
}
newMessage := message.NewMessage(watermill.NewUUID(), newPayload)
return []*message.Message{newMessage}, nil
},
)
// Simulate incoming events in the background
go simulateEvents(publisher)
if err := router.Run(context.Background()); err != nil {
panic(err)
}
}
// createPublisher is a helper function that creates a Publisher, in this case - the Kafka Publisher.
func createPublisher() message.Publisher {
kafkaPublisher, err := kafka.NewPublisher(
kafka.PublisherConfig{
Brokers: brokers,
Marshaler: marshaler,
},
logger,
)
if err != nil {
panic(err)
}
return kafkaPublisher
}
// createSubscriber is a helper function similar to the previous one, but in this case it creates a Subscriber.
func createSubscriber(consumerGroup string) message.Subscriber {
kafkaSubscriber, err := kafka.NewSubscriber(
kafka.SubscriberConfig{
Brokers: brokers,
Unmarshaler: marshaler,
ConsumerGroup: consumerGroup, // every handler will use a separate consumer group
},
logger,
)
if err != nil {
panic(err)
}
return kafkaSubscriber
}
// simulateEvents produces events that will be later consumed.
func simulateEvents(publisher message.Publisher) {
i := 0
for {
e := event{
ID: i,
}
payload, err := json.Marshal(e)
if err != nil {
panic(err)
}
err = publisher.Publish(consumeTopic, message.NewMessage(
watermill.NewUUID(), // internal uuid of the message, useful for debugging
payload,
))
if err != nil {
panic(err)
}
i++
time.Sleep(time.Second)
}
}