数据对象
ConfigStore
ConfigStore描述了基础平台必须支持的一组平台无关的API,以存储和检索Istio配置。配置key定义为配置对象的类型,名称和命名空间的组合。保证配置密钥在存储中是唯一的。此处显示的存储接口假定基础存储层支持Get(list),Update(update),Create(create)和Delete语义,但不保证任何事务语义。Update, Create,和Delete是变量操作。这些操作是异步的,您可能不会立即看到效果(例如,在对存储进行更改后,Get可能不会立即通过键返回对象。)即使操作成功,也可能会出现间歇性错误,因此您应始终检查对象存储是否已被修改即使变异操作返回错误。应该使用_Create_操作创建对象并使用_Update_操作更新对象。资源版本记录每个对象上的最后一个变异操作。如果将变异应用于对象的修订版本与纯等式定义的基础存储所期望的版本不同,则操作将被阻止。此接口的客户端不应假设版本标识符的结构或顺序。从此接口提供和返回的对象引用应视为只读。修改它们会违反线程安全性。
ConfigStoreCache
ConfigStoreCache是配置存储的本地完全复制的缓存。缓存主动将其本地状态与远程存储同步,并提供通知机制以接收更新事件。这样,通知处理程序必须在调用Run之前注册,并且缓存在调用Run之后需要初始同步宽限期。
更新通知要求以下一致性保证:通知到达时,缓存中的视图必须至少是最新的,但是可能更新鲜(例如Delete取消_Add_事件)。
处理程序按照附加的顺序在单个工作程序队列上执行。 处理程序接收通知事件和关联的对象。 请注意,在启动缓存控制器之前,必须注册所有处理程序。
ConfigStoreCache 相较于ConfigStore多了三个方法
type ConfigStoreCache interface {
ConfigStore
// RegisterEventHandler 为指定的类型的更新时间添加处理器
RegisterEventHandler(kind config.GroupVersionKind, handler func(config.Config, config.Config, Event))
// 运行
Run(stop <-chan struct{})
// 初始高速缓存同步完成后,HasSynced返回true
HasSynced() bool
}
handler
当Config发生变化,处理event事件
handler func(config.Config, config.Config, Event)
ConfigStoreCache 类型
memory
istio中有两处用到了memory ConfigStore
- 监听registry配置文件变化,写入到mem store
初始化一个map
store := memory.Make(collections.Pilot)
构造为ConfigStoreCache
configController := memory.NewController(store)
监听文件变化来处理事件
err := s.makeFileMonitor(args.RegistryOptions.FileDir, args.RegistryOptions.KubeOptions.DomainSuffix, configController)
if err != nil {
return err
}
s.ConfigStores = append(s.ConfigStores, configController)
- 监听xds数据变化,写入到mem store
xdsMCP, err := adsc.New(srcAddress.Host, &adsc.Config{
Meta: model.NodeMetadata{
Generator: "api",
}.ToStruct(),
InitialDiscoveryRequests: adsc.ConfigInitialRequests(),
})
if err != nil {
return fmt.Errorf("failed to dial XDS %s %v", configSource.Address, err)
}
store := memory.Make(collections.Pilot)
configController := memory.NewController(store)
xdsMCP.Store = model.MakeIstioStore(configController)
err = xdsMCP.Run()
if err != nil {
return fmt.Errorf("MCP: failed running %v", err)
}
s.ConfigStores = append(s.ConfigStores, configController)
log.Warn("Started XDS config ", s.ConfigStores)
ingress
监听k8s ingress资源变化
ingress.NewController(s.kubeClient, s.environment.Watcher, args.RegistryOptions.KubeOptions)
Kubernetes
监听k8s资源变化
// 创建一个crd监听资源变化
configController, err := s.makeKubeConfigController(args)
if err != nil {
return err
}
s.ConfigStores = append(s.ConfigStores, configController)
// 如果启用了service-api 则需要获取service-api资源
if features.EnableServiceApis {
s.ConfigStores = append(s.ConfigStores, gateway.NewController(s.kubeClient, configController, args.RegistryOptions.KubeOptions))
}
configaggregate
将所有的 []model.ConfigStoreCache转化为一个,从而统一管理,注册对应的EventHandler
configaggregate.MakeCache(s.ConfigStores)
eventhandler
configHandler
用来监听以下crd obj的事件变化
Pilot = collection.NewSchemasBuilder().
MustAdd(IstioNetworkingV1Alpha3Destinationrules).
MustAdd(IstioNetworkingV1Alpha3Envoyfilters).
MustAdd(IstioNetworkingV1Alpha3Gateways).
MustAdd(IstioNetworkingV1Alpha3Serviceentries).
MustAdd(IstioNetworkingV1Alpha3Sidecars).
MustAdd(IstioNetworkingV1Alpha3Virtualservices).
MustAdd(IstioNetworkingV1Alpha3Workloadentries).
MustAdd(IstioNetworkingV1Alpha3Workloadgroups).
MustAdd(IstioSecurityV1Beta1Authorizationpolicies).
MustAdd(IstioSecurityV1Beta1Peerauthentications).
MustAdd(IstioSecurityV1Beta1Requestauthentications).
Build()
confighandler的具体实现
configHandler := func(_, curr config.Config, event model.Event) {
pushReq := &model.PushRequest{
Full: true,
ConfigsUpdated: map[model.ConfigKey]struct{}{{
Kind: curr.GroupVersionKind,
Name: curr.Name,
Namespace: curr.Namespace,
}: {}},
Reason: []model.TriggerReason{model.ConfigUpdate},
}
s.XDSServer.ConfigUpdate(pushReq)
if event != model.EventDelete {
s.statusReporter.AddInProgressResource(curr)
} else {
s.statusReporter.DeleteInProgressResource(curr)
}
}
workloadentryhandler
WorkloadEntryHandler定义WorkloadEntry的handler
func (s *ServiceEntryStore) workloadEntryHandler(old, curr config.Config, event model.Event) {
var oldWle *networking.WorkloadEntry
if old.Spec != nil {
oldWle = old.Spec.(*networking.WorkloadEntry)
}
wle := curr.Spec.(*networking.WorkloadEntry)
key := configKey{
kind: workloadEntryConfigType,
name: curr.Name,
namespace: curr.Namespace,
}
if features.WorkloadEntryHealthChecks && !isHealthy(curr) {
event = model.EventDelete
}
// fire off the k8s handlers
if len(s.workloadHandlers) > 0 {
si := convertWorkloadEntryToWorkloadInstance(curr)
if si != nil {
for _, h := range s.workloadHandlers {
h(si, event)
}
}
}
s.storeMutex.RLock()
// 获取同命名空间的entries
entries := s.seWithSelectorByNamespace[curr.Namespace]
s.storeMutex.RUnlock()
// if there are no service entries, return now to avoid taking unnecessary locks
if len(entries) == 0 {
return
}
log.Debugf("Handle event %s for workload entry %s in namespace %s", event, curr.Name, curr.Namespace)
instancesUpdated := []*model.ServiceInstance{}
instancesDeleted := []*model.ServiceInstance{}
workloadLabels := labels.Collection{wle.Labels}
fullPush := false
configsUpdated := map[model.ConfigKey]struct{}{}
for _, se := range entries {
selected := false
// workloadentry不满足serviceentry label selector
if !workloadLabels.IsSupersetOf(se.entry.WorkloadSelector.Labels) {
// 更新操作
if oldWle != nil {
oldWorkloadLabels := labels.Collection{oldWle.Labels}
// 如果老的workloadentry满足,则需要删除掉原有的endpoint
if oldWorkloadLabels.IsSupersetOf(se.entry.WorkloadSelector.Labels) {
selected = true
instance := convertWorkloadEntryToServiceInstances(oldWle, se.services, se.entry, &key)
instancesDeleted = append(instancesDeleted, instance...)
}
}
} else {
// 满足labelselector 更新为新的
selected = true
instance := convertWorkloadEntryToServiceInstances(wle, se.services, se.entry, &key)
instancesUpdated = append(instancesUpdated, instance...)
}
if selected {
// serviceentry 解析方式为DNS,全量更新
if se.entry.Resolution == networking.ServiceEntry_DNS {
fullPush = true
for key, value := range getUpdatedConfigs(se.services) {
configsUpdated[key] = value
}
}
}
}
if len(instancesDeleted) > 0 {
s.deleteExistingInstances(key, instancesDeleted)
}
if event != model.EventDelete {
s.updateExistingInstances(key, instancesUpdated)
} else {
s.deleteExistingInstances(key, instancesUpdated)
}
// 非全量值更新eds
if !fullPush {
s.edsUpdate(append(instancesUpdated, instancesDeleted...), true)
// trigger full xds push to the related sidecar proxy
if event == model.EventAdd {
s.XdsUpdater.ProxyUpdate(s.Cluster(), wle.Address)
}
return
}
// 更新eds cache
s.edsUpdate(append(instancesUpdated, instancesDeleted...), false)
pushReq := &model.PushRequest{
Full: true,
ConfigsUpdated: configsUpdated,
Reason: []model.TriggerReason{model.EndpointUpdate},
}
// 全量更新 ads
s.XdsUpdater.ConfigUpdate(pushReq)
}
serviceEntryHandler
serviceEntryHandler 为 service entries定义handler
func (s *ServiceEntryStore) serviceEntryHandler(old, curr config.Config, event model.Event) {
// 转换为svc列表
cs := convertServices(curr)
configsUpdated := map[model.ConfigKey]struct{}{}
// 如果是添加/删除事件,始终进行完全推送。 如果是update事件,则仅当服务已更改时,我们才应进行完全推送-否则,只需推送端点更新即可。
var addedSvcs, deletedSvcs, updatedSvcs, unchangedSvcs []*model.Service
switch event {
case model.EventUpdate:
// 转换为svc列表
os := convertServices(old)
// 对比新老serviceentry的workloadselector,变化则进行全量更新
if selectorChanged(old, curr) {
// 更新所有的svc
mark := make(map[host.Name]*model.Service, len(cs))
for _, svc := range cs {
mark[svc.Hostname] = svc
updatedSvcs = append(updatedSvcs, svc)
}
for _, svc := range os {
if _, f := mark[svc.Hostname]; !f {
updatedSvcs = append(updatedSvcs, svc)
}
}
} else {
// 对比差异
addedSvcs, deletedSvcs, updatedSvcs, unchangedSvcs = servicesDiff(os, cs)
}
case model.EventDelete:
deletedSvcs = cs
case model.EventAdd:
addedSvcs = cs
default:
unchangedSvcs = cs
}
for _, svc := range addedSvcs {
s.XdsUpdater.SvcUpdate(s.Cluster(), string(svc.Hostname), svc.Attributes.Namespace, model.EventAdd)
configsUpdated[makeConfigKey(svc)] = struct{}{}
}
for _, svc := range updatedSvcs {
s.XdsUpdater.SvcUpdate(s.Cluster(), string(svc.Hostname), svc.Attributes.Namespace, model.EventUpdate)
configsUpdated[makeConfigKey(svc)] = struct{}{}
}
// 清理endpoint shards
for _, svc := range deletedSvcs {
s.XdsUpdater.SvcUpdate(s.Cluster(), string(svc.Hostname), svc.Attributes.Namespace, model.EventDelete)
configsUpdated[makeConfigKey(svc)] = struct{}{}
}
if len(unchangedSvcs) > 0 {
currentServiceEntry := curr.Spec.(*networking.ServiceEntry)
oldServiceEntry := old.Spec.(*networking.ServiceEntry)
// 解析dns 且ep变化则更新
if currentServiceEntry.Resolution == networking.ServiceEntry_DNS {
if !reflect.DeepEqual(currentServiceEntry.Endpoints, oldServiceEntry.Endpoints) {
// fqdn endpoints have changed. Need full push
for _, svc := range unchangedSvcs {
configsUpdated[makeConfigKey(svc)] = struct{}{}
}
}
}
}
fullPush := len(configsUpdated) > 0
// 没有服务变化,即为update操作且没有更新svc
if !fullPush {
// STATIC服务条目中的IP端点已更改。 我们需要EDS更新如果是全量推送,则将edsUpdate保留。 我们应该对所有未更改的Svcs进行edsUpdate,
instances := convertServiceEntryToInstances(curr, unchangedSvcs)
key := configKey{
kind: serviceEntryConfigType,
name: curr.Name,
namespace: curr.Namespace,
}
//如果后端实例变更,为变更的实例更新索引
s.updateExistingInstances(key, instances)
s.edsUpdate(instances, true)
return
}
// 在这里重新计算索引太昂贵-需要时进行延迟构建。 如果服务已更改,则仅重新计算索引。
s.refreshIndexes.Store(true)
// 进行完全推送时,非DNS添加,更新,未更改的服务会触发eds更新,以便更新endpointshards。
allServices := make([]*model.Service, 0, len(addedSvcs)+len(updatedSvcs)+len(unchangedSvcs))
nonDNSServices := make([]*model.Service, 0, len(addedSvcs)+len(updatedSvcs)+len(unchangedSvcs))
allServices = append(allServices, addedSvcs...)
allServices = append(allServices, updatedSvcs...)
allServices = append(allServices, unchangedSvcs...)
for _, svc := range allServices {
if svc.Resolution != model.DNSLB {
nonDNSServices = append(nonDNSServices, svc)
}
}
// 非dns服务
keys := map[instancesKey]struct{}{}
for _, svc := range nonDNSServices {
keys[instancesKey{hostname: svc.Hostname, namespace: curr.Namespace}] = struct{}{}
}
// 更新eds endpoint shards
s.edsUpdateByKeys(keys, false)
pushReq := &model.PushRequest{
Full: true,
ConfigsUpdated: configsUpdated,
Reason: []model.TriggerReason{model.ServiceUpdate},
}
s.XdsUpdater.ConfigUpdate(pushReq)
}