发布时间:2024-01-04 09:30
com.alibaba.cloud
spring-cloud-starter-alibaba-nacos-discovery
spring:
application:
name: product
cloud:
nacos:
discovery:
server-addr: 127.0.0.1:8848
(1)注册实例
(2)获取实例
(3)监听服务
【Dubbo】
dubbo.application.name: spring-boot-dubbo-nacos-sample
dubbo.registry.address=nacos://127.0.0.1:8848
dubbo.protocol.name=dubbo
dubbo.protocol.port=20880
com.alibaba.cloud
spring-cloud-starter-alibaba-nacos-config
spring.cloud.nacos.config.server-addr=127.0.0.1:8848
spring.cloud.nacos.config.namespace=6ca49aac-50ec-4e6f-8686-70bb11922fdd
@NacosPropertySource(dataId = "example", autoRefreshed = true)
@RestController
public class NacosConfigController{
@NacosValue(value = "${info:Local Hello World}", autoRefreshed = true)
private String info;
@GetMapping("/config")
public String get(){
return info;
}
}
(1)@NacosPropertySource:用于加载dataId为example地配置源,autoRefreshed表示开启自动更新;
(2)@NacosValue:设置属性的默认值
【服务提供者】
com.springframework.boot
spring-boot-starter
org.apache.dubbo
dubbo-spring-boot-starter
2.7.5
spring.application.name=spring-dubbo-demo
dubbo.application.name=springboot-provider
dubbo.protocol.name=dubbo
dubbo.protocol.port=20880
dubbo.registry.address=N/A
@Service
public class HelloServiceImpl implements HelloService{
@value("${dubbo.application.name}")
private String servicename;
@Override
public String sayHello(String name){
return servicename;
}
}
@DubboComponentScan
@SpringBootApplication
public class ProviderApplication{
public static void main(Sting[] args){
SpringApplication.run(ProviderApplication.class, args);
}
}
【服务调用者】
org.apache.dubbo
dubbo-spring-boot-starter
2.7.5
dubbo.application.name=springboot-consumer
@Reference(url = "dubbo://192.168.13.1:20880/com.gupaoedu.book.dubbo.helloService")
private HelloService helloService;
【高级配置】
@Service(cluster = "failfast")
(1)Failover Cluster:失败自动切换。调用失败后,切换到集群中其他机器,默认重试两次。
(2)Failfast Cluster:快速失败。调用失败后,立即报错,只发起依次调用。
(3)Failsafe Cluster:失败安全。出现异常时,直接忽略。
(4)Failback Cluster:失败后自动回复。后台记录失败请求,定时重发。
(5)Forking Cluster:并行调用集群中的多个服务,只要其中一个成功就返回。
(6)Broadcast Cluster:广播调用所有的服务提供者,任意一个服务报错则表示服务调用失败。
@Service(cluster = "failfast", loadbalance = "roundrobin")
(1)Random LoadBalance:随机算法。可能针对性能较好的服务器设置较大的权重值。
(2)RoundRobin LoadBalance:轮询。按照公约后的权重设置轮询比例。
(3)LeastActive LoadBalance:最少活跃调用书。处理较慢的节点将会收到更少的请求。
(4)ConsistentHash LoadBalance:一致性Hash。相同参数的请求总是发送到同一个服务提供者。
public class MockHelloService implements HelloService{
@Override
public String sayHello(String s){
return "Sorry, 服务无法访问, 返回降级数据";
}
}
@Reference(mock = "com.gupaoedu.book.springcloud.springclouddubboconsumer.MockHelloService", cluster = "failfast")
private HelloService helloService;
无法正常调用原因:Dubbo通过获取本机的hostname映射IP地址,如果IP地址错误,依然会注册ZooKeeper,并且正常启动。
org.springframework.cloud
spring-cloud-starter-openfeign
声明接口的每一个方法都是调用哪个远程服务的哪个请求
@FeignClient("whalemall-coupon")
public interface CouponFeignService {
@PostMapping("/coupon/spubounds/save")
R saveSpuBounds(@RequestBody SpuBoundTo spuBoundTo);
@PostMapping("/coupon/skufullreduction/saveinfo")
R saveSkuReduction(@RequestBody SkuReductionTo skuReductionTo);
}
@EnableFeignClients(basePackages = "com.island.whalemall.product.feign")
@EnableDiscoveryClient
@MapperScan("com.island.whalemall.product.dao")
@SpringBootApplication
public class WhalemallProductApplication {
public static void main(String[] args) {
SpringApplication.run(WhalemallProductApplication.class, args);
}
}
org.springframework.cloud
spring-cloud-starter-gateway
spring:
cloud:
gateway:
routes:
# 商品服务
- id: product_route
uri: lb://whalemall-product
predicates:
- Path=/api/product/**
filters:
- RewritePath=/api/(?.*),/$\{segment}
com.aliyun.oss
aliyun-sdk-oss
3.10.2
// yourEndpoint填写Bucket所在地域对应的Endpoint。以华东1(杭州)为例,Endpoint填写为https://oss-cn-hangzhou.aliyuncs.com。
String endpoint = "yourEndpoint";
// 阿里云账号AccessKey拥有所有API的访问权限,风险很高。强烈建议您创建并使用RAM用户进行API访问或日常运维,请登录RAM控制台创建RAM用户。
String accessKeyId = "yourAccessKeyId";
String accessKeySecret = "yourAccessKeySecret";
// 创建OSSClient实例。
OSS ossClient = new OSSClientBuilder().build(endpoint, accessKeyId, accessKeySecret);
// 关闭OSSClient。
ossClient.shutdown();
// yourEndpoint填写Bucket所在地域对应的Endpoint。以华东1(杭州)为例,Endpoint填写为https://oss-cn-hangzhou.aliyuncs.com。
String endpoint = "yourEndpoint";
// 阿里云账号AccessKey拥有所有API的访问权限,风险很高。强烈建议您创建并使用RAM用户进行API访问或日常运维,请登录RAM控制台创建RAM用户。
String accessKeyId = "yourAccessKeyId";
String accessKeySecret = "yourAccessKeySecret";
// 填写Bucket名称,例如examplebucket。
String bucketName = "examplebucket";
OSS ossClient = null;
try {
// 创建OSSClient实例。
ossClient = new OSSClientBuilder().build(endpoint, accessKeyId, accessKeySecret);
// 创建存储空间。
ossClient.createBucket(bucketName);
} catch (OSSException e){
e.printStackTrace();
} finally {
// 关闭OSSClient。
ossClient.shutdown();
}
// yourEndpoint填写Bucket所在地域对应的Endpoint。以华东1(杭州)为例,Endpoint填写为https://oss-cn-hangzhou.aliyuncs.com。
String endpoint = "yourEndpoint";
// 阿里云账号AccessKey拥有所有API的访问权限,风险很高。强烈建议您创建并使用RAM用户进行API访问或日常运维,请登录RAM控制台创建RAM用户。
String accessKeyId = "yourAccessKeyId";
String accessKeySecret = "yourAccessKeySecret";
// 填写Bucket名称,例如examplebucket。
String bucketName = "examplebucket";
// 填写文件名。文件名包含路径,不包含Bucket名称。例如exampledir/exampleobject.txt。
String objectName = "exampledir/exampleobject.txt";
OSS ossClient = null;
try {
// 创建OSSClient实例。
ossClient = new OSSClientBuilder().build(endpoint, accessKeyId, accessKeySecret);
String content = "Hello OSS";
ossClient.putObject(bucketName, objectName, new ByteArrayInputStream(content.getBytes()));
} catch (OSSException e){
e.printStackTrace();
} finally {
// 关闭OSSClient。
ossClient.shutdown();
}
// yourEndpoint填写Bucket所在地域对应的Endpoint。以华东1(杭州)为例,Endpoint填写为https://oss-cn-hangzhou.aliyuncs.com。
String endpoint = "yourEndpoint";
// 阿里云账号AccessKey拥有所有API的访问权限,风险很高。强烈建议您创建并使用RAM用户进行API访问或日常运维,请登录RAM控制台创建RAM用户。
String accessKeyId = "yourAccessKeyId";
String accessKeySecret = "yourAccessKeySecret";
// 填写Bucket名称,例如examplebucket。
String bucketName = "examplebucket";
// 填写文件名。文件名包含路径,不包含Bucket名称。例如exampledir/exampleobject.txt。
String objectName = "exampledir/exampleobject.txt";
OSS ossClient = null;
try {
// 创建OSSClient实例。
ossClient = new OSSClientBuilder().build(endpoint, accessKeyId, accessKeySecret);
// 调用ossClient.getObject返回一个OSSObject实例,该实例包含文件内容及文件元信息。
OSSObject ossObject = ossClient.getObject(bucketName, objectName);
// 调用ossObject.getObjectContent获取文件输入流,可读取此输入流获取其内容。
InputStream content = ossObject.getObjectContent();
if (content != null) {
BufferedReader reader = new BufferedReader(new InputStreamReader(content));
while (true) {
String line = reader.readLine();
if (line == null) break;
System.out.println("\n" + line);
}
// 数据读取完成后,获取的流必须关闭,否则会造成连接泄漏,导致请求无连接可用,程序无法正常工作。
content.close();
}
} catch (OSSException e){
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
// 关闭OSSClient。
ossClient.shutdown();
}
org.springframework.boot
spring-boot-starter-amqp
# RabbitMQ 配置
# RabbitMQ 服务器地址
spring.rabbitmq.host = localhost
# RabbitMQ 端口
spring.rabbitmq.post = 5672
# RabbitMQ 用户
spring.rabbitmq.username = admin
# RabbitMQ 密码
spring.rabbitmq.password = 123456
# 是否确认发送的消息已经被消费
spring.rabbitmq.publisher-confirms = true
#RabbitMQ 的消息队列名称,由它发送字符串
rabbitmq.queue.msg = spring-boot-queue-msg
#RabbitMQ 的消息队列名称,由它发送用户对象
rabbitmq.queue.user = spring-boot-queue-user
//1、自动注入
@Autowire
AdqpAdmin amqpAdmin;
//2、创建交换机
DirectExchange directExchange = new DirectExchange($name,$type,$autodelete,$Arg参数);
//3、管理组件声明交换机
amqpAdmin.declareExchange(directExchange);
//4、创建队列
Queue queue = new queue($name,$type,$exclusive,$autodelete,$Arg参数);
//5、管理组件声明队列
amqpAdmin.declareQueue(queue)
//6、创建绑定关系
Binding binding = new Binding($distination,$distinationtype【Distination.Type.Queue or Distination.Type.Exchange】,$exchange,$routingKey,$Arg参数(可null));
//7、管理器声明绑定关系
amqpAdmin.declareBinding(binding);
//1、自动注入
@Autowire
RabbitTemplate rabbitTemplate;
//2、发送消息(如果发送消息是对象,会使用序列化机制,所以对象必须实现Serializable)
rabbitTemplate.convertAndSend($exchange,$routingKey,$object【消息】);
//3、发送对象类型的消息可以是JSON
需要配置自己的RabbitConfig.java
给容器中放置消息转换器
@Bean
public MessageConverter messageConverter(){
return new Jackson2JsonMessageConverter;
}
//1、获取消息体(入参Message message,T<发送消息的类型>OrderReturnReasonEntity content,当前传输数据的通道Channel channel)
byte[] body = message.getBody();
JSON.perse();
//2、获取消息头属性信息
MessageProperties = message.getMessageProperties();
//3、Queue中一个消息只能被一个客户端接收,且消息是被有序接收,只有一个消息被处理完才会处理下一个消息
1)开启发送端确认配置application.properties
spring.rabbitmq.publisher-confirms=true
2)定制rabbitTemplate,设置确认回调
@Autowired
RedisTemplate redisTemplate;
//SpringBoot生成config对象后与执行这个方法
@PostConstruct
public void initRabbitTemplate(){
//设置回调
redisTemplate.setConfirmCallback(new RabbitTemplate.ConfirmCallback(){
@override
public void confirm(CorrelationData correlationData【唯一Id】, boolean ack【确认】, String cause【原因】){
}
});
}
1)开启发送端抵达消息队列确认配置application.properties
spring.rabbitmq.publisher-returns=true
# 抵达队列后优先异步执行returns回调确认
spring.rabbitmq.template-mandatory=true
2)定制rabbitTemplate,没有触发投递队列时,进行回调
@Autowired
RedisTemplate redisTemplate;
//SpringBoot生成config对象后与执行这个方法
@PostConstruct
public void initRabbitTemplate(){
//设置回调
redisTemplate.setConfirmCallback(new RabbitTemplate.ConfirmCallback(){
@override
public void returnMessage(Message message【投递失败的消息详细信息】, int replayCode【恢复状态码】, String relayText【回复的文本内容】,String exchange【处理的交换机】,String routingKey【消息的路由键】){
}
});
}
第一种方式:默认自动确认的,只要消息接收到,服务端就会移除这个消息
存在问题:收到很多消息消息时,只有一个消息被成功处理,中途宕机,依然确认全部收到删除了队列中数据,造成消息丢失。
第二种方式:手动确认
1)开启手动确认消息配置application.properties
spring.rabbitmq.listener.simple.acknowledge.-mode=manual
2)确认签收
long deliveryTag = message.getMessageProperties().getDeliveryTag();
channel.basicAck(deliveryTag, false【不开启批量签收】);
3)拒收
channel.basicNAck(deliveryTag, false【不开启批量拒收】,true【重新入队】);
com.alibaba.cloud
spring-cloud-starter-alibaba-sentinel
2.1.1.RELEASE
@RestController
public class HelloController{
@SentinelResource(value = "hello", blockHandler = "blockHandlerHello")
@GetMapping("/say")
public String hello(){
return "Hello World";
}
//限流保护Controller资源
public String blockHandlerHello(BlockException e){
return "被限流了";
}
}
//可以配置流量控制规则、熔断降级规则、系统保护规则、来源访问控制规则、热点参数规则
public class FlowRuleInitFunc implements InitFunc{
@Override
public void init() throws Exception{
List rules = new ArrayList<>();
FlowRule rule = new FlowRule();
//限流阈值
rule.setCount(1);
//设置需要保护的资源
rule.setResource("hello");
//流控规则
rule.setGrade(RuleConstant.FLOW_GRADE_QPS);
//针对调用来源限流
rule.setLimitApp("default");
rules.add(rule);
FlowRuleManager.loadRules(rules);
}
}
【限流】
限流的主要目的是通过,限制并发访问 或者 限制一个时间窗口 内允许处理的请求数量,来保护系统,一旦达到限制数量则对当前请求进行处理采取对应的拒绝策略。
private static void initFlowRules(){
List rules = new ArrayList<>();
FlowRule rule = new FlowRule();
rule.setResource("doSomething");
rule.setCount(20);
rule.setGrade(RuleConstant.FLOW_GRADE_QPS);
rule.setLimitApp("default");
rule.setStrategy(RuleConstant.STRATEGY_CHAIN);
rule.setControlBehavior(RuleConstant.CONTROL_BEHAVIOR_DEFAULT);
rule.setClusterMode(fasle);
rules.add(rule);
FlowRuleManager.loadRules(rules);
}
【熔断】
熔断是指当前服务提供者无法正常为服务调用者提供服务时,比如请求超时、服务异常等,为了防止整个系统出现雪崩效应,暂时将出现故障的接口隔离出来,断绝与外部接口的联系,当触发熔断之后,后续一段时间内该服务调用者的请求都会直接失败,知道目标服务恢复正常。
private static void initDegradeRule(){
List rules = new ArrayList<>();
DegradeRule degradeRule = new DegradeRule();
degradeRule.setResource("KEY");
degradeRule.setCount(10);
degradeRule.setGrade(RuleConstant.DEGRADE_GRADE_RT);
degradeRule.setTimeWindow(10);
degradeRule.setMinRequestAmount(5);
degradeRule.setRtSlowRequestAmount(5);
rules.add(degradeRule);
}
spring:
application:
name: spring-cloud-sentinel-sample
cloud:
sentinel:
transport:
dashboard: 192.168.216.128:7777
@RestController
public class DashboardController{
@GetMapping("/dash")
public String dash(){
return "Hello Dash";
}
}
@Service
public class CustomUrlBlockHandler implements UrlBlockHandler{
@Override
public void blocked(HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse, BlockException e) throws IOException {
httpServletResponse.setHeader("Content-Type", "application/json;charset=UTF-8");
String message = "{\"code\":999,\"msg\":\"访问人数过多\"}";
httpServletResponse.getWriter().write(message);
}
}
降级页面
spring.cloud.sentinel.servlet.block-page={url}
@RestController
public class UrlCleanController{
@GetMapping("/clean/{id}")
public String clean(@PathVariable("id")int id){
return "Hello,Cleaner";
}
}
@Service
public class CustomerUrlCleaner implements UrlCleaner{
@Override
public String clean(String originUrl){
if(StringUtils.isEmpty(originUrl)){
return originUrl;
}
if(originUrl.startsWith("/clean/")){
return "/clean/*";
}
return originUrl;
}
}
com.alibaba.csp
sentinel-apache-dubbo-adapter
1.7.1
1)引入sentinel-transport-simple-http依赖
com.alibaba.csp
sentinel-transport-simple-http
1.7.1
2)添加启动参数
-Djava.net.preferIPv4Stack=true
-Dcsp.sentinel.api.port=8720
-Dcsp.sentinel.dashboard.server=192.168.216.128:7777
-Dproject.name=spring-cloud.sentinel-dubbo.provider
3)登录Sentinel Dashboard进行簇点链路操作
1)添加sentinel-datasource-nacos
com.alibaba.csp
sentinel-datasource-nacos
1.7.1
2)通过Sentinel提供的InitFunc扩展点,实现Nacos数据源配置
public class NacosDataSourceInitFunc implements InitFunc{
private String serverAddr="192.168.216.128:8848";
private String groupId="DEFAULT_GROUP";
private String dataId="spring-cloud.sentinel-dubbo.provider-sentinel-flow";
@Override
public void init() throws Exception {
loadNacosData();
}
private void loadNacosData(){
ReadableDataSource> flowRuleDataSource = new NacosDataSource<>(serverAddr, groupId, dataId, source -> JSON.parseObject(source, new TypeReference>(){
}));
FlowRuleManager.register2Property(flowRuleDataSource.getProperty());
}
}
3)访问Sentinel Dashboard
Java操作ElasticSearch有两种方式,一个是通过ES的9300端口使用TCP的方式操作,另一种是通过ES的9200端口使用HTTP的方式
spring-data-elasticsearch:transport-api.jar
springboot 版本不同, transport-api.jar 不同,不能适配 es 版本
7.x 已经不建议使用,8 以后就要废弃
JestClient:非官方,更新慢
RestTemplate:模拟发 HTTP 请求,ES 很多操作需要自己封装,麻烦
HttpClient:同上
Elasticsearch-Rest-Client:官方 RestClient,封装了 ES 操作,API 层次分明,上手简单
综上所述,选择Elasticsearch-Rest-Client(elasticsearch-rest-high-level-client)是最优的选择,下面记录如何整合
org.elasticsearch
elasticsearch
6.3.2
org.elasticsearch.client
elasticsearch-rest-high-level-client
6.3.2
elasticsearch:
ip: localhost:9200
@Configuration
public class ElasticsearchRestClient {
/**
* ES地址,ip:port
*/
@Value("${elasticsearch.ip}")
String ipPort;
@Bean
public RestClientBuilder restClientBuilder() {
return RestClient.builder(makeHttpHost(ipPort));
}
@Bean(name = "highLevelClient")
public RestHighLevelClient highLevelClient(@Autowired RestClientBuilder restClientBuilder) {
restClientBuilder.setMaxRetryTimeoutMillis(60000);
return new RestHighLevelClient(restClientBuilder);
}
private HttpHost makeHttpHost(String s) {
String[] address = s.split(":");
String ip = address[0];
int port = Integer.parseInt(address[1]);
return new HttpHost(ip, port, "http");
}
}
localhost:9200/customer/_doc/1?pretty
{
"city": "北京",
"useragent": "Mobile Safari",
"sys_version": "Linux armv8l",
"province": "北京",
"event_id": "",
"log_time": 1559191912,
"session": "343730"
}
@Service
public class TestService {
@Autowired
RestHighLevelClient highLevelClient;
private void search(RestHighLevelClient highLevelClient) throws IOException {
SearchRequest searchRequest = new SearchRequest();
searchRequest.indices("customer");
searchRequest.types("_doc");
// 条件=
MatchQueryBuilder matchQuery = QueryBuilders.matchQuery("city", "北京");
TermQueryBuilder termQuery = QueryBuilders.termQuery("province", "福建");
// 范围查询
RangeQueryBuilder timeFilter = QueryBuilders.rangeQuery("log_time").gt(12345).lt(343750);
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
QueryBuilder totalFilter = QueryBuilders.boolQuery()
.filter(matchQuery)
.filter(timeFilter)
.mustNot(termQuery);
int size = 200;
int from = 0;
long total = 0;
do {
try {
sourceBuilder.query(totalFilter).from(from).size(size);
sourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS));
searchRequest.source(sourceBuilder);
SearchResponse response = highLevelClient.search(searchRequest);
SearchHit[] hits = response.getHits().getHits();
for (SearchHit hit : hits) {
System.out.println(hit.getSourceAsString());
}
total = response.getHits().totalHits;
System.out.println("测试:[" + total + "][" + from + "-" + (from + hits.length) + ")");
from += hits.length;
// from + size must be less than or equal to: [10000]
if (from >= 10000) {
System.out.println("测试:超过10000条直接中断");
break;
}
} catch (Exception e) {
e.printStackTrace();
}
} while (from < total);
}
}
mysql
mysql-connector-java
org.springframework.boot
spring-boot-starter-jdbc
org.apache.commons
commons-dbcp2
spring.datasource.url=jdbc:mysql://localhost:3306/spring_boot_chapter5
spring.datasource.username=root
spring.datasource.password=123456
# 即使注释掉了驱动,SpringBoot会尽可能判断数据源,然后默认匹配
# spring.datasource.driver-class-name=com.mysql.jdbc.Driver
# 指定数据库连接池类型
spring.datasource.type=org.apache.commons.dbcp2.BasicDataSource
# 最大等待连接中的数量,设 0 为没有限制
spring.datasource.dbcp2.max-idle=10
# 最大连接活动数
spring.datasource.dbcp2.max-total=50
# 最大等待毫秒数,单位ms
spring.datasource.dbcp2.max-wait-millis=10000
# 数据库连接池初始化连接数
spring.datasource.dbcp2.initial-size=5
org.mybatis.spring.boot
mybatis-spring-boot-starter
1.3.1
# MyBatis映射文件通配
mybatis.mapper-locations=classpath:com/springboot/chapter/mapper/*.xml
# Mybatis扫描别名包,和注解@Alias联用
mybatis.type-aliases-package=com.springboot.chapter.pojo
# 配置typeHandler的扫描包
mybatis.type-handlers-package=com.springboot.chapter.typehandler
可配置属性
redis.clients
jedis
org.springframework.boot
spring-boot-starter-redis
io.lettuce
lettuce-core
# 配置连接池属性
spring.redis.jedis.pool.min-idle=5
spring.redis.jedis.pool.max-idle=10
spring.redis.jedis.pool.max-active=10
spring.redis.jedis.pool.max-wait=2000
# 配置Redis服务器属性
spring.redis.port=6379
spring.redis.host=192.168.11.131
spring.redis.password=123456
# Redis连接超时时间,单位ms
spring.redis.timeout=1000
ValueOperations ops = stringRedisTemplate.opsForValue();
//保存
ops.set("hello", "world_" + UUID.randomUUID().toString());
//查询
String hello = ops.get("hello");