〇、常用地址



一、配置文件

1.1 bootatrap.xml

#Spring配置
spring:
  #应用名
  application:
    name: data-xx-platform
  #启动环境
  profiles:
    active: @spring.profiles.active@
  cloud:
    nacos:
      #注册中心
      discovery:
        server-addr: http://nacos.eimos.com/
        namespace: boulder-@spring.profiles.active@
        group: ${NACOS_GROUP:DEFAULT_GROUP}
      config:
        server-addr: http://nacos.eimos.com/
        file-extension: yaml   # 文件后缀名
        namespace: boulder-@spring.profiles.active@
        group: ${NACOS_GROUP:DEFAULT_GROUP}
        extension-configs[0]:
          data-id: healthCheck.yaml
          group: ${NACOS_GROUP:DEFAULT_GROUP}
  servlet:
    multipart:
      max-file-size: 100MB

  feign:
    client:
      config:
        default:
          #不设置connectTimeout会导致readTimeout设置不生效
          connectTimeout: 60000
          readTimeout: 60000

1.2 logback-spring.xml

<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
    <property name="springAppName" value="data-xx-platform" />
    <!-- 日志存放路径 -->
    <property name="log.path" value="logs" />
    <property name="info.fileName" value="info" />
    <property name="debug.fileName" value="debug" />
    <property name="warn.fileName" value="warn" />
    <property name="error.fileName" value="error" />
    <!-- 日志输出格式 -->
    <property name="log.pattern" value="%d{HH:mm:ss.SSS} [%thread] %-5level %logger{20} - [%method,%line] - %msg%n" />

    <!-- 控制台输出 -->
    <appender name="console" class="ch.qos.logback.core.ConsoleAppender">
        <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
            <level>info</level>
        </filter>
        <encoder class="ch.qos.logback.core.encoder.LayoutWrappingEncoder">
            <layout class="org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout">
                <Pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n</Pattern>
            </layout>
        </encoder>
    </appender>

    <!-- 系统日志输出 -->
    <appender name="file_info" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <file>${log.path}/info.log</file>
        <!-- 循环策略:基于时间创建日志文件 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <!-- 日志文件名格式 -->
            <!--<fileNamePattern>${log.path}/info.%d{yyyy-MM-dd}.log</fileNamePattern>-->
            <fileNamePattern>${log.path}/${info.fileName}-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>10MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
            <!-- 日志最大的历史 10天 -->
            <maxHistory>10</maxHistory>
        </rollingPolicy>
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <!-- 过滤的级别 -->
            <level>INFO</level>
            <!-- 匹配时的操作:接收(记录) -->
            <onMatch>ACCEPT</onMatch>
            <!-- 不匹配时的操作:拒绝(不记录) -->
            <onMismatch>DENY</onMismatch>
        </filter>
        <encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
            <providers>
                <pattern>
                    <pattern>
                        {
                        "app_name":"${springAppName}",
                        "time": "%d{yyyy-MM-dd HH:mm:ss.SSS}",
                        "level": "%level",
                        "thread": "%thread",
                        "class": "%logger{40}",
                        "message": "%msg",
                        "stack_trace": "%exception{10}"
                        }
                    </pattern>
                </pattern>
            </providers>
        </encoder>
    </appender>

    <appender name="file_error" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <file>${log.path}/error.log</file>
        <!-- 循环政策:基于时间创建日志文件 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <!-- 日志文件名格式 -->
            <fileNamePattern>${log.path}/error.%d{yyyy-MM-dd}.log</fileNamePattern>
            <!-- 日志最大的历史 60天 -->
            <maxHistory>60</maxHistory>
        </rollingPolicy>
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <!-- 过滤的级别 -->
            <level>ERROR</level>
            <!-- 匹配时的操作:接收(记录) -->
            <onMatch>ACCEPT</onMatch>
            <!-- 不匹配时的操作:拒绝(不记录) -->
            <onMismatch>DENY</onMismatch>
        </filter>
        <encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
            <providers>
                <pattern>
                    <pattern>
                        {
                        "app_name":"${springAppName}",
                        "time": "%d{yyyy-MM-dd HH:mm:ss.SSS}",
                        "level": "%level",
                        "thread": "%thread",
                        "class": "%logger{40}",
                        "message": "%msg",
                        "stack_trace": "%exception{10}"
                        }
                    </pattern>
                </pattern>
            </providers>
        </encoder>
    </appender>

    <appender name="grpc_log" class="org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender">
        <encoder class="ch.qos.logback.core.encoder.LayoutWrappingEncoder">
            <layout class="org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout">
                <Pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n</Pattern>
            </layout>
        </encoder>
    </appender>

    <!--部分类的包下的日志,可以单独处理-->
    <!-- 系统模块日志级别控制  -->
    <logger name="com.gemenyaofei" level="info" />
    <!-- Spring日志级别控制  -->
    <logger name="org.springframework" level="warn" />
    <!--mybatis的SQL语句输出-->
    <logger name="com.gemenyaofei.xx.infrastructure.repository.mapper" level="debug"/>

    <!--其他的默认级别-->
    <!--指定最基础的日志输出级别,用于指定日志输出的根节点。这个元素指定了所有日志事件都会流经的节点,也就是说,所有没有被其他 Appender 匹配的日志事件都会由这个根节点处理。-->
    <root level="INFO">
        <appender-ref ref="grpc_log" />
        <appender-ref ref="console" />
        <appender-ref ref="file_info" />
        <appender-ref ref="file_error" />
    </root>

    <!--开发环境:打印控制台-->
    <springProfile name="dev">
        <root level="info">
            <appender-ref ref="console"/>
        </root>
    </springProfile>

    <springProfile name="test">
        <root level="info">
            <appender-ref ref="console"/>
        </root>
    </springProfile>

    <!--生产环境:输出到文件-->
    <springProfile name="prod">
        <root level="info">
            <appender-ref ref="console"/>
            <appender-ref ref="file_info"/>
            <appender-ref ref="grpc_log"/>
            <appender-ref ref="file_error"/>
        </root>
    </springProfile>

    <springProfile name="uat">
        <root level="info">
            <appender-ref ref="console"/>
            <appender-ref ref="file_info"/>
            <appender-ref ref="grpc_log"/>
            <appender-ref ref="file_error"/>
        </root>
    </springProfile>

</configuration>

1.3 pom.xml


1.4 Dockerfile

FROM swr.cn-east-3.myhuaweicloud.com/release/jdk:1.8_py

# copy arthas
COPY --from=hengyunabc/arthas:latest /opt/arthas /opt/arthas

ENV TZ=CST-8
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone

ADD data-xx-platform-start/target/data-xx-platform.jar /app.jar

ENTRYPOINT ["sh","-c","java -javaagent:/javaagent.jar=7050:/config.yml -jar -Dfile.encoding=UTF-8 -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap /app.jar "]


二、配置类

2.1 WebConfig

package com.gemenyaofei.integration.config;

import com.alibaba.cloud.commons.io.Charsets;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.MapperFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.fasterxml.jackson.databind.ser.std.ToStringSerializer;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.converter.*;
import org.springframework.http.converter.json.Jackson2ObjectMapperBuilder;
import org.springframework.http.converter.json.MappingJackson2HttpMessageConverter;
import org.springframework.web.servlet.config.annotation.CorsRegistry;
import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
import org.springframework.web.servlet.config.annotation.ResourceHandlerRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurationSupport;

import java.text.SimpleDateFormat;
import java.util.List;
import java.util.TimeZone;

/**
 * TODO
 *
 * @author ljh
 * @date 2023/12/21 下午 6:09
 **/
@Configuration
public class WebConfig extends WebMvcConfigurationSupport {
    @Override
    public void configureMessageConverters(List<HttpMessageConverter<?>> converters) {
        Jackson2ObjectMapperBuilder builder = new Jackson2ObjectMapperBuilder();
        builder.serializationInclusion(JsonInclude.Include.NON_NULL);
        ObjectMapper objectMapper = builder.build();
        SimpleModule simpleModule = new SimpleModule();
        // Long转换为String传输
        simpleModule.addSerializer(Long.class, ToStringSerializer.instance);
        objectMapper.registerModule(simpleModule);
        // 忽略 transient 修饰的属性
        objectMapper.configure(MapperFeature.PROPAGATE_TRANSIENT_MARKER, true);
        objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
        // 设置为中国上海时区
        objectMapper.setTimeZone(TimeZone.getTimeZone("GMT+8"));
        objectMapper.setDateFormat(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"));
        converters.add(new MappingJackson2HttpMessageConverter(objectMapper));
        converters.add(new StringHttpMessageConverter(Charsets.UTF_8));
        converters.add(new ByteArrayHttpMessageConverter());
        converters.add(new ResourceHttpMessageConverter());
        converters.add(new ResourceRegionHttpMessageConverter());
        super.configureMessageConverters(converters);
    }

    // 资源目录
    @Override
    public void addResourceHandlers(ResourceHandlerRegistry registry) {
        registry.addResourceHandler("swagger-ui.html")
                .addResourceLocations("classpath:/META-INF/resources/");
        registry.addResourceHandler("/webjars/**")
                .addResourceLocations("classpath:/META-INF/resources/webjars/");
        registry.addResourceHandler("doc.html") // 资源处理器
                .addResourceLocations("classpath:/META-INF/resources/"); // 资源存在的位置
    }

    @Override
    public void addInterceptors(InterceptorRegistry registry) {
        //拦截器
    }

    //支持跨域
    @Override
    public void addCorsMappings(CorsRegistry registry) {
        registry.addMapping("/**")
                .allowCredentials(true)
                .allowedHeaders("*")
                .allowedOrigins("*") //可以访问资源的外部域,是一个域名列表
                .allowedMethods("GET", "POST", "PUT", "DELETE", "OPTIONS")
                .maxAge(3600);
    }
}

2.2 Nacos

package com.gemenyaofei.integration.infrastructure.config;

import com.alibaba.nacos.api.NacosFactory;
import com.alibaba.nacos.api.config.ConfigService;
import com.alibaba.nacos.api.exception.NacosException;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;

import java.util.Properties;

/**
 * @description:
 * @author: ljh
 * @date: 2022/11/24 19:48
 */
@Slf4j
@Configuration
public class NacosConfig {
    @Value("${spring.cloud.nacos.config.server-addr}")
    private String serverAddr;
    @Value("${spring.cloud.nacos.config.namespace}")
    private String namespace;

    @Bean
    @Primary
    public ConfigService configService() {
        Properties properties = new Properties();
        properties.put("serverAddr", serverAddr);
        properties.put("namespace", namespace);
        try {
            return NacosFactory.createConfigService(properties);
        } catch (NacosException e) {
            log.error(e.toString(), e);
        }

        return null;
    }
}

2.3 调度对接配置

package com.gemenyaofei.integration.infrastructure.config;

import com.gemenyaofei.integration.infrastructure.convertor.enums.JudgeEnum;
import lombok.Data;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.cloud.context.config.annotation.RefreshScope;
import org.springframework.context.annotation.Configuration;

/**
 * @description: 调度相关配置
 * @author: ljh
 * @date: 2022/10/26 11:50
 */
@Configuration
@RefreshScope
@Data
public class ScheduleConfig {

    /**
     * 调度平台对接的集成平台项目code
     */
    @Value("${scheduleConfig.projectCode}")
    private Long projectCode;

    /**
     * 调度平台 token
     */
    @Value("${scheduleConfig.token}")
    private String token;

    /**
     * 调度地址 协议://域名:端口
     */
    @Value("${scheduleConfig.scheduleIpPort}")
    private String scheduleIpPort;

    /**
     * 调度 限流
     */
    @Value("${scheduleConfig.jobSpeedRecord}")
    private Integer jobSpeedRecord;

    /**
     * 调度任务执行个数限制
     */
    @Value("${scheduleConfig.jobRunningNum}")
    private Integer jobRunningNum;

    /**
     * 判断是否串行任务 1为串行,其它为并行
     */
    @Value("${scheduleConfig.judgeSerialTask:1}")
    private Integer judgeSerialTask;

    /**
     * 是否需要治理节点
     */
    @Value("${scheduleConfig.needGovernanceNode:true}")
    private Boolean needGovernanceNode;

    /**
     * 数据治理Url
     */
    @Value("${scheduleConfig.dataGovernanceUrl}")
    private String dataGovernanceUrl;
    /**
     * 数据治理Url connectTimeout
     */
    @Value("${scheduleConfig.dataGovernanceUrlConnectTimeout}")
    private Integer dataGovernanceUrlConnectTimeout;
    /**
     * 数据治理Url SocketTimeout
     */
    @Value("${scheduleConfig.dataGovernanceUrlSocketTimeout}")
    private Integer dataGovernanceUrlSocketTimeout;

    /**
     * ods到tp 走增量采集?
     */
    @Value("${scheduleConfig.incrementIntegrationWay4Ods2Tp}")
    private Boolean incrementIntegrationWay4Ods2TpConfig;

    /**
     * TP数据集成数据过滤标?
     */
    @Value("${scheduleConfig.integrationFilterFlagList}")
    private String integrationFilterFlagList;

    /**
     * 更新时间批次减时间,冗余时间间隔?
     */
    @Value("${scheduleConfig.lastTimeIntervalMinutes:5}")
    private String lastTimeIntervalMinutes;

    /**
     * ODS->TP链路,是否使用DataX插件
     * true:DataX
     * false:SQL
     */
    @Value("${scheduleConfig.ods2tpUseDataXPlugin:false}")
    private Boolean ods2tpUseDataXPlugin;

    /**
     * 刷新token调用url
     */
    @Value("${scheduleConfig.refreshTokenUrl}")
    private String refreshTokenUrl;

    /**
     * 拼接URL
     * @param urlKey
     * @return
     */
    public String getHttpUrl(String urlKey) {
        return scheduleIpPort + urlKey;
    }

    public Boolean isSerialConfig(){
        return JudgeEnum.JUDGE_YES.getKey().equals(this.judgeSerialTask);
    }

    public Boolean isNeedGovernanceNode(){
        return this.needGovernanceNode;
    }
}

2.4 线程池配置

1、参数配置

package com.gemenyaofei.integration.infrastructure.config;

import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;

import java.util.concurrent.ThreadPoolExecutor;

/**
 * @Author ljh
 * @Date 2022/11/28
 * @Desc 线程连接池配置
 */
@Configuration
public class ThreadPoolConfig {

    @Bean(name = "threadPoolTaskExecutor")
    public ThreadPoolTaskExecutor getThreadPoolTaskExecutor() {
        ThreadPoolTaskExecutor taskExecutor = new ThreadPoolTaskExecutor();
        // 核心线程数
        taskExecutor.setCorePoolSize(2);
        // 最大线程数
        taskExecutor.setMaxPoolSize(5);
        // 阻塞队列长度
        taskExecutor.setQueueCapacity(100);
        // 空闲线程最大存活时间
        taskExecutor.setKeepAliveSeconds(200);
        // 拒绝策略
        taskExecutor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
        taskExecutor.initialize();
        return taskExecutor;
    }
}

2、工具类

package com.gemenyaofei.integration.domain.common.utils;

import com.gemenyaofei.integration.domain.common.helper.SpringContextHelper;
import com.gemenyaofei.integration.domain.model.dto.AsyncTask;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;

import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import java.util.concurrent.locks.ReentrantLock;

/**
 * @Author ljh
 * @Date 2022/11/28
 * @Desc 异步任务工具类
 */
public class AsyncTaskUtil {

    private volatile static ThreadPoolTaskExecutor threadPoolTaskExecutor;

    private static final ReentrantLock LOCK = new ReentrantLock();

    private static ThreadPoolTaskExecutor getThreadPoolTaskExecutor() {
        if (threadPoolTaskExecutor == null) {
            LOCK.lock();
            try {
                if (threadPoolTaskExecutor == null) {
                    threadPoolTaskExecutor = (ThreadPoolTaskExecutor) SpringContextHelper.getBean("threadPoolTaskExecutor");
                }
            } finally {
                LOCK.unlock();
            }
        }
        return threadPoolTaskExecutor;
    }

    public static void asyncTask(Object object, String methodName, Object[] args) {
        AsyncTask asyncTask = new AsyncTask(object, methodName, args);
        asyncTask(asyncTask);
    }

    public static void asyncTask(Runnable asyncTask) {
        getThreadPoolTaskExecutor().execute(asyncTask);
    }

    public static <T> Future<T> asyncTask(Callable<T> callableTask) {
        return getThreadPoolTaskExecutor().submit(callableTask);
    }

}