背景:
dapr runtime:v1.11.1
dapr默认是用k8s的服务发现机制,但是对于既存在边缘服务,又存在k8s集群的情况下,两者就无法通过kubernetes的nameresolution进行服务发现,需要借助第三方的工具,官方的components中只有consul,没有etcd,所以我这边简单写一个etcd的nameresolution。
项目代码:gongshen/dapr-components · GitHub
首先我们要实现nameresolution接口
// Resolver is the interface of name resolver.
type Resolver interface {
// Init initializes name resolver.
Init(metadata Metadata) error
// ResolveID resolves name to address.
ResolveID(req ResolveRequest) (string, error)
}
我们定义一个resolver结构体
type resolver struct {
conf *configSpec
logger logger.Logger
cli *clientv3.Client
leaseId clientv3.LeaseID
keepAlive <-chan *clientv3.LeaseKeepAliveResponse
wch endpoints.WatchChannel
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
mu sync.RWMutex
newPicker func([]string) Picker
pickers map[string]Picker
allUps map[string]*endpoints.Update
}
type configSpec struct {
Endpoints []string
DialTimeout int64
DialKeepAliveTime int64
DialKeepAliveTimeout int64
TTL int64
RegisterPrefix string
Namespace string
Picker string
}
定义一个初始化方法
// NewResolver creates etcd name resolver.
func NewResolver(logger logger.Logger) nameresolution.Resolver {
return &resolver{
logger: logger,
pickers: make(map[string]Picker),
allUps: make(map[string]*endpoints.Update),
}
}
Init
方法其实就是在项目启动时进行初始化操作,下面请看:
// Init initializes Kubernetes name resolver.
func (k *resolver) Init(metadata nameresolution.Metadata) error {
k.ctx, k.cancel = context.WithCancel(context.Background())
conf, err := parseConfig(metadata.Configuration)
if err != nil {
return err
}
k.conf = conf
cli, err := clientv3.New(clientv3.Config{
Context: k.ctx,
Endpoints: k.conf.Endpoints,
DialTimeout: time.Duration(k.conf.DialTimeout) * time.Second,
DialKeepAliveTime: time.Duration(k.conf.DialKeepAliveTime) * time.Second,
DialKeepAliveTimeout: time.Duration(k.conf.DialKeepAliveTimeout) * time.Second,
PermitWithoutStream: true,
DialOptions: []grpc.DialOption{
grpc.WithBlock(),
grpc.WithConnectParams(grpc.ConnectParams{
Backoff: backoff.DefaultConfig,
}),
},
})
if err != nil {
return fmt.Errorf("failed to init etcd client: %w", err)
}
k.cli = cli
resp, err := cli.Grant(k.ctx, k.conf.TTL)
if err != nil {
return err
}
k.leaseId = resp.ID
manager, err := endpoints.NewManager(cli, k.conf.RegisterPrefix)
if err != nil {
return err
}
var (
appID string
host string
httpPort string
ok bool
daprPort string
)
if daprPort, ok = metadata.Properties[nameresolution.DaprPort]; !ok || daprPort == "" {
return fmt.Errorf("metadata property missing: %s", nameresolution.DaprPort)
}
if appID, ok = metadata.Properties[nameresolution.AppID]; !ok {
return fmt.Errorf("metadata property missing: %s", nameresolution.AppID)
}
if host, ok = metadata.Properties[nameresolution.HostAddress]; !ok {
return fmt.Errorf("metadata property missing: %s", nameresolution.HostAddress)
}
if httpPort, ok = metadata.Properties[nameresolution.DaprHTTPPort]; !ok {
return fmt.Errorf("metadata property missing: %s", nameresolution.DaprHTTPPort)
} else if _, err = strconv.ParseUint(httpPort, 10, 0); err != nil {
return fmt.Errorf("error parsing %s: %w", nameresolution.DaprHTTPPort, err)
}
var servicename string
if k.conf.Namespace != "" {
servicename = fmt.Sprintf("%s.%s", appID, k.conf.Namespace)
} else {
servicename = appID
}
key := fmt.Sprintf("%s/%s/%s:%s", k.conf.RegisterPrefix, servicename, host, daprPort)
if err = manager.Update(k.ctx, []*endpoints.UpdateWithOpts{
endpoints.NewAddUpdateOpts(key, endpoints.Endpoint{
Addr: fmt.Sprintf("%s:%s", host, daprPort),
Metadata: map[string]interface{}{
"servicename": servicename,
},
}, clientv3.WithLease(k.leaseId)),
}); err != nil {
return err
}
k.logger.Info("ETCD register success. key:", key)
k.keepAlive, err = cli.KeepAlive(k.ctx, k.leaseId)
if err != nil {
return err
}
go k.keepalive()
k.wch, err = manager.NewWatchChannel(k.ctx)
if err != nil {
return err
}
// init picker
switch k.conf.Picker {
case randomPicker:
k.newPicker = func(i []string) Picker {
return newRPicker(i)
}
default:
k.newPicker = func(i []string) Picker {
return newRRPicker(i)
}
}
k.watchOnce()
k.wg.Add(1)
go k.watch()
return nil
}
watchOnce表示服务刚启动时需要全量获取一次etcd的地址信息
watch不断的轮询channel获取etcd地址信息
func (k *resolver) watchOnce() {
select {
case <-k.ctx.Done():
return
case ups, ok := <-k.wch:
if !ok {
return
}
for key, addrs := range k.watchHandler(ups) {
k.pickers[key] = k.newPicker(addrs)
}
}
}
func (k *resolver) watch() {
defer k.wg.Done()
for {
select {
case <-k.ctx.Done():
return
case ups, ok := <-k.wch:
if !ok {
return
}
ans := k.watchHandler(ups)
k.mu.Lock()
for key, addrs := range ans {
k.pickers[key] = k.newPicker(addrs)
}
k.mu.Unlock()
}
}
}
watchHandler 表示需要处理监听的 etcd 的地址异动
func (k *resolver) watchHandler(ups []*endpoints.Update) map[string][]string {
upAppIDMap := make(map[string]bool)
for _, up := range ups {
switch up.Op {
case endpoints.Add:
k.allUps[up.Key] = up
case endpoints.Delete:
delete(k.allUps, up.Key)
}
metadata := up.Endpoint.Metadata.(map[string]interface{})
servicename := metadata["servicename"].(string)
upAppIDMap[servicename] = true
}
ans := make(map[string][]string, len(upAppIDMap))
for _, up := range k.allUps {
metadata := up.Endpoint.Metadata.(map[string]interface{})
servicename := metadata["servicename"].(string)
if !upAppIDMap[servicename] {
continue
}
ans[servicename] = append(ans[servicename], up.Endpoint.Addr)
}
return ans
}
ResolveID
进行服务发现时,将服务名转化为具体ip地址:
func (k *resolver) ResolveID(req nameresolution.ResolveRequest) (string, error) {
var servicename string
if req.Namespace != "" {
servicename = fmt.Sprintf("%s.%s", req.ID, req.Namespace)
} else {
servicename = req.ID
}
k.mu.RLock()
defer k.mu.RUnlock()
picker, ok := k.pickers[servicename]
if !ok {
return "", fmt.Errorf("no healthy services found with servicename '%s'", servicename)
}
addr := picker.Pick()
if !ok {
return "", fmt.Errorf("no healthy services found with servicename '%s'", servicename)
}
return addr, nil
}
注意到我们这里有一个pickers ,他表示了你的负载均衡算法:
这里有随机、轮询两种
type Picker interface {
Pick() string
}
const (
randomPicker = "random"
roundRobinPicker = "roundrobin"
)
type rPicker struct {
addrs []string
}
func newRPicker(addrs []string) *rPicker {
return &rPicker{
addrs: addrs,
}
}
func (p *rPicker) Pick() string {
return p.addrs[rand.Int()%len(p.addrs)]
}
type rrPicker struct {
addrs []string
next uint32
}
func newRRPicker(addrs []string) *rrPicker {
return &rrPicker{
addrs: addrs,
}
}
func (p *rrPicker) Pick() string {
addrsLen := uint32(len(p.addrs))
nextIndex := atomic.AddUint32(&p.next, 1)
sc := p.addrs[nextIndex%addrsLen]
return sc
}
大家可能也发现了,如果在k8s集群内部的服务,会在etcd中存在2个地址,一个是带namespace的,一个是不带的,这就是为了区分相同k8s集群调用和边缘网络调用的情况。
最后需要在github.com/dapr/dapr/cmd/daprd/components中加入该component的初始化,新建一个 nameresolution_etcd.go 文件
func init() {
nrLoader.DefaultRegistry.RegisterComponent(etcd.NewResolver, "etcd/v1")
}
如果大家对dapr感兴趣的话,可以在评论下留言O(∩_∩)O
文章详细介绍了如何在Dapr框架下,针对同时存在边缘服务和Kubernetes集群的场景,自定义Etcd服务发现组件。作者创建了一个实现了nameresolution接口的resolver结构体,进行了初始化、服务注册和监控,并提供了随机和轮询两种负载均衡策略。代码示例展示了如何在Dapr的components中集成这个Etcdresolver。
1460

被折叠的 条评论
为什么被折叠?



