debug: false
 dexcloud:
 rpc:
 circuitbreakerconf:
 isOn: false
 configclient:
 enabled: true
 base:
 microservice:
 name: configuration
 version: v1
 organization: ranoss
 metaInfo:
 scope: test
 serviceinfo:
 serviceName: uedm-configuration # 应用服务名称
 web:
 swagger:
 beanConfig:
 title: ‘UEDM configuration API Documentation’
 version: ‘1.0’
 discovery:
 msb:
 enabled: true
 server:
 address: ${msb_svrIp}
 port: ${msb_svrPort}
 namespace: ${msb_nameSpace}
 client:
 cache:
 enabled: false
 registry:
 enabled: false
 ftpsrvice:
 type: sftp
 pasv: true
 # userName: ${ftpservice_userName}
 # userPassword: ${ftpservice_ftpServiceConfig_userPassword}
 # ftpServerAddress: ${ftpservice_ftpServerAddress}
 # ftpServerPort: ${ftpservice_ftpServerPort}
 # ftpsServerAddress: ${ftpservice_ftpsServerAddress}
 # ftpsServerPort: ${ftpservice_ftpsServerPort}
 sftpServerAddress: 172.20.1.21sftpServerPort: ${ftpservice_sftpServerPort}
redis:
 redisson:
 host: ${redis_host} #【单机、集群模式】redis服务器的地址。在PaaS环境中,redis_host会自动替换为环境变量OPENPALETTE_REDIS_ADDRESS的值
 port: ${redis_port} #【单机、集群模式】redis服务器的端口。在PaaS环境中,redis_port会自动替换为环境变量OPENPALETTE_REDIS_PORT的值
 password: ${redis_password} #【所有模式】redis服务器的密码。在PaaS环境中,redis_password会自动替换为环境变量OPENPALETTE_REDIS_PASSWORD的值,并自动解密
 sentinelHost: ${redis_sentinel_host:} #【HA模式】redis sentinel的地址。在PaaS环境中,redis_sentinel_host会自动替换为环境变量OPENPALETTE_REDIS_SENTINEL_ADDRESS的值
 sentinelPort: ${redis_sentinel_port:} #【HA模式】redis sentinel的端口。在PaaS环境中,redis_sentinel_port会自动替换为环境变量OPENPALETTE_REDIS_SENTINEL_PORT的值
 masterName: ${redis_sentinel_mastername:} #【HA模式】redis master节点的名称。在PaaS环境中,redis_sentinel_mastername会自动替换为环境变量OPENPALETTE_REDIS_SENTINEL_MASTERNAME的值
 poolSize: 32 #【所有模式】【默认值64】主节点的连接池最大容量。请按需配置
 poolMinIdleSize: 4 #【所有模式】【默认值32】每个主节点的最小保持连接数(长连接)。请按需配置
 slavePoolSize: 2 #【集群、HA模式】【默认值64】每个从节点里用于普通操作(非发布和订阅)连接的连接池最大容量。请按需配置
 slavePoolMinIdleSize: 1 #【集群、HA模式】【默认值32】每个从服务节点里用于普通操作(非发布和订阅)的最小保持连接数(长连接)。请按需配置
 dnsMonitoringInterval: 5000 #【所有模式】【默认值5000】检查节点DNS变化的时间间隔(毫秒)
 readMode: MASTER #【集群、HA模式】【默认值SLAVE】(注意必须大写)设置读取操作选择节点的模式。 可用值为: SLAVE - 只在从服务节点里读取。 MASTER - 只在主服务节点里读取。 MASTER_SLAVE - 在主从服务节点里都可以读取。
 scanInterval: 1000 #【集群模式】【默认值1000】对Redis集群节点状态扫描的时间间隔(毫秒)
 timeout: 3000 #【所有模式】【默认值3000】等待节点回复命令的时间(毫秒)。该时间从命令发送成功时开始计时
 connectTimeout: 10000 #【所有模式】【默认值10000】同任何节点建立连接时的等待超时(毫秒)
 idleConnectionTimeout: 10000 #【所有模式】【默认值10000】如果当前连接池里的连接数量超过了最小空闲连接数,而同时有连接空闲时间超过了该数值,那么这些连接将会自动被关闭,并从连接池里去掉(毫秒)
 retryAttempts: 3 #【所有模式】【默认值3】如果尝试达到 retryAttempts(命令失败重试次数) 仍然不能将命令发送至某个指定的节点时,将抛出错误。如果尝试在此限制之内发送成功,则开始启用 timeout(命令等待超时) 计时。
 retryInterval: 1500 #【所有模式】【默认值1500】在一条命令发送失败以后,等待重试发送的时间间隔(毫秒)
 server:
 port: 29101
 jetty:
 acceptors: 2
 selectors: 4
 threadpool:
 maxThreads: 128
 minThreads: 16spring:
 application:
 name: uedm-configuration-configuration
 datasource:
 url: jdbc:postgresql://{rdb_port}/${rdb_dbname}
 username: ${rdb_user}
 password: ${rdb_password}
 driver-class-name: {server.port}
 cache:
 type: caffeine
 main:
 allow-bean-definition-overriding: truemybatis:
 configuration:
 map-underscore-to-camel-case: true
 pagehelper:
 helperDialect: postgresql
 reasonable: false
 supportMethodsArguments: true
 params: count=countSqlkafkaclientconf:
 zkServers: {kafka_zk_port}
 bootstrapServers: {kafka_port}
 kafkaServiceName: kafka
 kafkaServiceVersion: v1
 consumerConf:
 properties:
 value.deserializer: org.apache.kafka.common.serialization.StringDeserializer
 producerConf:
 properties:
 key.serializer: org.apache.kafka.common.serialization.StringSerializer
 value.serializer: org.apache.kafka.common.serialization.StringSerializer
 zkClientConf:
 sessionTimeout: 15000
 connectionTimeout: 10000
 zkSerializer: org.dexcloud.springboot.kafka.serializer.StringZkSerializer
 zkUtilsConf:
 secure: false
 uedm:
 dependens:
 module: configuration
 times: 100 #请求重试次数
 services: #依赖的服务列表
 - backup-service
 mocPublish:
 waitTime: 3000
 thread:
 pool:
 core-pool-size: 10
 max-pool-size: 100
 queue-capacity: 1000
 keep-alive-time: 60
 name-prefix: cfg-executor-
 db:
 init:
 forced-install: false
 module: configuration
 current-version: @revision@
 base-version: v1
 install-files:
 update-files:
 cache:
 caches:
 - name: ALL_OBJECT # 所有对象的缓存(包括分组、站点、数据中心、楼栋、楼层、机房、微模块、模型、监控对象、监控对象类型)
 maxSize: 5000000
 - name: FORMULA # 公式配置
 maxSize: 8000000
 - name: EFFICIENCY_MODEL #能效模型
 maxSize: 2000000
 - name: POINT_MAPPING #测点映射
 maxSize: 8000000
 - name: UP_DOWNLOAD_TASK #导入导出任务缓存
 maxSize: 100000
 - name: LOGIC_GROUP #逻辑组
 maxSize: 1000000
 - name: TEMP_OBJECT #导入监控对象暂时监控对象名称
 maxSize: 8000000
 - name: ALARM_CFG_CRATE_INSERT #告警码插入
 maxSize: 100000
 - name: ALARM_CFG_CRATE_UPDATE #告警码更新
 maxSize: 100000
 - name: DOWNLOAD_ERROR_FILE #错误信息文件
 maxSize: 100000
 - name: USER
 maxSize: 8000000
 - name: ROLE
 maxSize: 8000000
 - name: USER_AUTHORITY
 maxSize: 8000000
 - name: LOGIC_GROUP_USER
 maxSize: 8000000
 expirationTime: 86400
 - name: ALARM_CONFIG_OBJECT #告警实例缓存
 maxSize: 100000
 - name: TEMP_ALARM_CONFIG_OBJECT #告警模板缓存
 maxSize: 100000
 kafka:
 consumer:
 topics:
 - cma_backup_start_notify
 - zenap_sm_user_change_list
 - zenap_sm_role_change_list
 - zenap_sm_opset_change_list
 - zenap_sm_user_change
 - zenap_sm_role_change
 - alarm_code_model_change
 - alarm_cfg_change
 - template_alarm_code_mapping_change
 - uedm_south_bcua_datacontentlengthlimitconf:
 defaultlimit: -1postgresql:
 ip: ${rdb_ip}
 port: ${rdb_port}
 db-name: ${rdb_dbname}
 username: ${rdb_user}
 password: ${rdb_password}special deal moc
icon2d:
 mocList:
 - r32.uedm.cam
 - r32.uedm.udvalueresouce_key to icon
mocMap:
 BOXCAM: boxCamera
 BALLCAM: ballCamera
 VALVEH: udValveH
 VALVEL: udValveLresouce_key to icon height
iconHeightMap:
BOXCAM: 1200
BALLCAM: 1200
VALVEH: 1400
VALVEL: 1400