From cff264916b3e1b76c3bb786b7cebe5d6f6b9bdb5 Mon Sep 17 00:00:00 2001
From: "Jindong.Tian"
Date: Mon, 18 Sep 2023 20:16:50 +0800
Subject: [PATCH 01/18] =?UTF-8?q?=E5=AE=8C=E5=96=84RocketMQ=20NameServer?=
=?UTF-8?q?=E8=B7=AF=E7=94=B1=E5=8A=9F=E8=83=BD=E3=80=81=E8=B7=AF=E7=94=B1?=
=?UTF-8?q?=E5=85=83=E6=95=B0=E6=8D=AE=E3=80=81=E8=B7=AF=E7=94=B1=E6=B3=A8?=
=?UTF-8?q?=E5=86=8C=E4=B8=8E=E5=8F=91=E7=8E=B0=E6=9C=BA=E5=88=B6=E4=BB=A3?=
=?UTF-8?q?=E7=A0=81=E6=B3=A8=E9=87=8A?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../rocketmq/broker/BrokerController.java | 8 ++++
.../rocketmq/broker/out/BrokerOuterAPI.java | 5 +++
.../common/protocol/route/BrokerData.java | 1 +
.../common/protocol/route/TopicRouteData.java | 13 +++++++
.../rocketmq/example/quickstart/Consumer.java | 2 +-
.../rocketmq/example/quickstart/Producer.java | 1 +
.../rocketmq/namesrv/NamesrvController.java | 4 ++
.../rocketmq/namesrv/NamesrvStartup.java | 4 ++
.../processor/DefaultRequestProcessor.java | 4 ++
.../namesrv/routeinfo/RouteInfoManager.java | 37 +++++++++++++++++++
.../remoting/netty/NettyServerConfig.java | 23 ++++++++++++
11 files changed, 101 insertions(+), 1 deletion(-)
diff --git a/broker/src/main/java/org/apache/rocketmq/broker/BrokerController.java b/broker/src/main/java/org/apache/rocketmq/broker/BrokerController.java
index 85009d620f5..c4a326f04a3 100644
--- a/broker/src/main/java/org/apache/rocketmq/broker/BrokerController.java
+++ b/broker/src/main/java/org/apache/rocketmq/broker/BrokerController.java
@@ -887,6 +887,8 @@ public void start() throws Exception {
this.registerBrokerAll(true, false, true);
}
+ // 定时任务,每30秒向所有NameServer发送心跳,NameServer中会记录每一个Broker最近心跳时间,
+ // NameServer每10秒会扫描所有Broker的心跳时间,如果NameServer超过120秒未收到心跳,则会将Broker剔除。
this.scheduledExecutorService.scheduleAtFixedRate(new Runnable() {
@Override
@@ -928,6 +930,12 @@ public synchronized void registerIncrementBrokerData(TopicConfig topicConfig, Da
doRegisterBrokerAll(true, false, topicConfigSerializeWrapper);
}
+ /**
+ * 该方法遍历NameServer列表,Broker消息服务器依次向NameServer发送心跳包,如代码清单2-9所示
+ * @param checkOrderConfig
+ * @param oneway
+ * @param forceRegister
+ */
public synchronized void registerBrokerAll(final boolean checkOrderConfig, boolean oneway, boolean forceRegister) {
TopicConfigSerializeWrapper topicConfigWrapper = this.getTopicConfigManager().buildTopicConfigSerializeWrapper();
diff --git a/broker/src/main/java/org/apache/rocketmq/broker/out/BrokerOuterAPI.java b/broker/src/main/java/org/apache/rocketmq/broker/out/BrokerOuterAPI.java
index 6caa2358e32..c95f861e635 100644
--- a/broker/src/main/java/org/apache/rocketmq/broker/out/BrokerOuterAPI.java
+++ b/broker/src/main/java/org/apache/rocketmq/broker/out/BrokerOuterAPI.java
@@ -129,14 +129,17 @@ public List registerBrokerAll(
final RegisterBrokerRequestHeader requestHeader = new RegisterBrokerRequestHeader();
requestHeader.setBrokerAddr(brokerAddr);
+ // brokerId=0 代表Master Broker,brokerId>0代表Slave Broker
requestHeader.setBrokerId(brokerId);
requestHeader.setBrokerName(brokerName);
requestHeader.setClusterName(clusterName);
+ // 主节点地址,初次请求时为空,从节点向NameServer注册后返回
requestHeader.setHaServerAddr(haServerAddr);
requestHeader.setCompressed(compressed);
RegisterBrokerBody requestBody = new RegisterBrokerBody();
requestBody.setTopicConfigSerializeWrapper(topicConfigWrapper);
+ // 消息过滤服务器列表
requestBody.setFilterServerList(filterServerList);
final byte[] body = requestBody.encode(compressed);
final int bodyCrc32 = UtilAll.crc32(body);
@@ -147,6 +150,7 @@ public List registerBrokerAll(
@Override
public void run() {
try {
+ // 向NameServer注册
RegisterBrokerResult result = registerBroker(namesrvAddr,oneway, timeoutMills,requestHeader,body);
if (result != null) {
registerBrokerResultList.add(result);
@@ -179,6 +183,7 @@ private RegisterBrokerResult registerBroker(
final byte[] body
) throws RemotingCommandException, MQBrokerException, RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException,
InterruptedException {
+ // RequestCode.REGISTER_BROKER 代表发送的是一个Broker注册请求
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.REGISTER_BROKER, requestHeader);
request.setBody(body);
diff --git a/common/src/main/java/org/apache/rocketmq/common/protocol/route/BrokerData.java b/common/src/main/java/org/apache/rocketmq/common/protocol/route/BrokerData.java
index 36599fbc874..68be0ab76ab 100644
--- a/common/src/main/java/org/apache/rocketmq/common/protocol/route/BrokerData.java
+++ b/common/src/main/java/org/apache/rocketmq/common/protocol/route/BrokerData.java
@@ -26,6 +26,7 @@
public class BrokerData implements Comparable {
private String cluster;
private String brokerName;
+ // brokerId=0代表主Master,大于0表示从Slave
private HashMap brokerAddrs;
private final Random random = new Random();
diff --git a/common/src/main/java/org/apache/rocketmq/common/protocol/route/TopicRouteData.java b/common/src/main/java/org/apache/rocketmq/common/protocol/route/TopicRouteData.java
index e8f54b8d73e..32b6940ef09 100644
--- a/common/src/main/java/org/apache/rocketmq/common/protocol/route/TopicRouteData.java
+++ b/common/src/main/java/org/apache/rocketmq/common/protocol/route/TopicRouteData.java
@@ -26,9 +26,22 @@
import org.apache.rocketmq.remoting.protocol.RemotingSerializable;
public class TopicRouteData extends RemotingSerializable {
+
+ /**
+ * 顺序消息的配置内容,来自kvConfig
+ */
private String orderTopicConf;
+ /**
+ * topic队列元数据
+ */
private List queueDatas;
+ /**
+ * topic分布的broker元数据
+ */
private List brokerDatas;
+ /**
+ * Broker上过滤服务器的地址列表
+ */
private HashMap/* Filter Server */> filterServerTable;
public TopicRouteData cloneTopicRouteData() {
diff --git a/example/src/main/java/org/apache/rocketmq/example/quickstart/Consumer.java b/example/src/main/java/org/apache/rocketmq/example/quickstart/Consumer.java
index 6d3b936507e..9eba15934da 100644
--- a/example/src/main/java/org/apache/rocketmq/example/quickstart/Consumer.java
+++ b/example/src/main/java/org/apache/rocketmq/example/quickstart/Consumer.java
@@ -48,7 +48,7 @@ public static void main(String[] args) throws InterruptedException, MQClientExce
* }
*
*/
-
+ consumer.setNamesrvAddr("127.0.0.1:9876");
/*
* Specify where to start in case the specified consumer group is a brand new one.
*/
diff --git a/example/src/main/java/org/apache/rocketmq/example/quickstart/Producer.java b/example/src/main/java/org/apache/rocketmq/example/quickstart/Producer.java
index 53a1d4dd64a..eebfd0da1ae 100644
--- a/example/src/main/java/org/apache/rocketmq/example/quickstart/Producer.java
+++ b/example/src/main/java/org/apache/rocketmq/example/quickstart/Producer.java
@@ -44,6 +44,7 @@ public static void main(String[] args) throws MQClientException, InterruptedExce
* }
*
*/
+ producer.setNamesrvAddr("127.0.0.1:9876");
/*
* Launch the instance.
diff --git a/namesrv/src/main/java/org/apache/rocketmq/namesrv/NamesrvController.java b/namesrv/src/main/java/org/apache/rocketmq/namesrv/NamesrvController.java
index a6654f271c3..fadf123a8c9 100644
--- a/namesrv/src/main/java/org/apache/rocketmq/namesrv/NamesrvController.java
+++ b/namesrv/src/main/java/org/apache/rocketmq/namesrv/NamesrvController.java
@@ -77,6 +77,7 @@ public boolean initialize() {
this.kvConfigManager.load();
+ // 初始化Netty信息,会在后序流程启动Netty
this.remotingServer = new NettyRemotingServer(this.nettyServerConfig, this.brokerHousekeepingService);
this.remotingExecutor =
@@ -84,6 +85,8 @@ public boolean initialize() {
this.registerProcessor();
+ // 定时任务,每隔10s会扫描一次brokerLiveTable(存放心跳包的时间戳信息),如果在120s内没有收到心跳包,
+ // 则认为Broker失效,更新topic的路由信息,将失效的Broker信息移除
this.scheduledExecutorService.scheduleAtFixedRate(new Runnable() {
@Override
@@ -92,6 +95,7 @@ public void run() {
}
}, 5, 10, TimeUnit.SECONDS);
+ // 每隔10s打印一次KV配置
this.scheduledExecutorService.scheduleAtFixedRate(new Runnable() {
@Override
diff --git a/namesrv/src/main/java/org/apache/rocketmq/namesrv/NamesrvStartup.java b/namesrv/src/main/java/org/apache/rocketmq/namesrv/NamesrvStartup.java
index 9b49567f322..ae5e253cdd7 100644
--- a/namesrv/src/main/java/org/apache/rocketmq/namesrv/NamesrvStartup.java
+++ b/namesrv/src/main/java/org/apache/rocketmq/namesrv/NamesrvStartup.java
@@ -83,6 +83,8 @@ public static NamesrvController createNamesrvController(String[] args) throws IO
final NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setListenPort(9876);
if (commandLine.hasOption('c')) {
+ // 判断是否有 -c 启动参数,指定配置文件位置
+ // 获取配置的文件地址
String file = commandLine.getOptionValue('c');
if (file != null) {
InputStream in = new BufferedInputStream(new FileInputStream(file));
@@ -143,9 +145,11 @@ public static NamesrvController start(final NamesrvController controller) throws
System.exit(-3);
}
+ // 注册 JVM钩子函数,在JVM进程关闭之前,会执行钩子函数
Runtime.getRuntime().addShutdownHook(new ShutdownHookThread(log, new Callable() {
@Override
public Void call() throws Exception {
+ // 关闭线程池、Netty服务等
controller.shutdown();
return null;
}
diff --git a/namesrv/src/main/java/org/apache/rocketmq/namesrv/processor/DefaultRequestProcessor.java b/namesrv/src/main/java/org/apache/rocketmq/namesrv/processor/DefaultRequestProcessor.java
index 467078c44f8..becf75dc104 100644
--- a/namesrv/src/main/java/org/apache/rocketmq/namesrv/processor/DefaultRequestProcessor.java
+++ b/namesrv/src/main/java/org/apache/rocketmq/namesrv/processor/DefaultRequestProcessor.java
@@ -88,6 +88,7 @@ public RemotingCommand processRequest(ChannelHandlerContext ctx,
case RequestCode.QUERY_DATA_VERSION:
return queryBrokerTopicConfig(ctx, request);
case RequestCode.REGISTER_BROKER:
+ // Broker注册心跳
Version brokerVersion = MQVersion.value2Version(request.getVersion());
if (brokerVersion.ordinal() >= MQVersion.Version.V3_0_11.ordinal()) {
return this.registerBrokerWithFilterServer(ctx, request);
@@ -216,6 +217,7 @@ public RemotingCommand registerBrokerWithFilterServer(ChannelHandlerContext ctx,
registerBrokerBody.getTopicConfigSerializeWrapper().getDataVersion().setTimestamp(0);
}
+ // 注册Broker信息
RegisterBrokerResult result = this.namesrvController.getRouteInfoManager().registerBroker(
requestHeader.getClusterName(),
requestHeader.getBrokerAddr(),
@@ -340,10 +342,12 @@ public RemotingCommand getRouteInfoByTopic(ChannelHandlerContext ctx,
final GetRouteInfoRequestHeader requestHeader =
(GetRouteInfoRequestHeader) request.decodeCommandCustomHeader(GetRouteInfoRequestHeader.class);
+ // 获取topic的路由信息
TopicRouteData topicRouteData = this.namesrvController.getRouteInfoManager().pickupTopicRouteData(requestHeader.getTopic());
if (topicRouteData != null) {
if (this.namesrvController.getNamesrvConfig().isOrderMessageEnable()) {
+ // 如果开启了顺序消息,则从kvConfig获取顺序消息配置
String orderTopicConf =
this.namesrvController.getKvConfigManager().getKVConfig(NamesrvUtil.NAMESPACE_ORDER_TOPIC_CONFIG,
requestHeader.getTopic());
diff --git a/namesrv/src/main/java/org/apache/rocketmq/namesrv/routeinfo/RouteInfoManager.java b/namesrv/src/main/java/org/apache/rocketmq/namesrv/routeinfo/RouteInfoManager.java
index ecd057a29ac..3c3f15cf41b 100644
--- a/namesrv/src/main/java/org/apache/rocketmq/namesrv/routeinfo/RouteInfoManager.java
+++ b/namesrv/src/main/java/org/apache/rocketmq/namesrv/routeinfo/RouteInfoManager.java
@@ -49,10 +49,25 @@ public class RouteInfoManager {
private static final InternalLogger log = InternalLoggerFactory.getLogger(LoggerName.NAMESRV_LOGGER_NAME);
private final static long BROKER_CHANNEL_EXPIRED_TIME = 1000 * 60 * 2;
private final ReadWriteLock lock = new ReentrantReadWriteLock();
+ /**
+ * topic消息队列的路由信息,消息发送时根据路由表进行负载均衡
+ */
private final HashMap> topicQueueTable;
+ /**
+ * Broker基础信息,包含brokerName、所属集群名称、主备Broker地址。
+ */
private final HashMap brokerAddrTable;
+ /**
+ * Broker集群信息,存储集群中所有Broker的名称
+ */
private final HashMap> clusterAddrTable;
+ /**
+ * Broker状态信息,NameServer每次收到心跳包时会替换该信息
+ */
private final HashMap brokerLiveTable;
+ /**
+ * Broker上的FilterServer列表,用于类模式消息过滤。类模式过滤机制在4.4及以后版本被废弃。
+ */
private final HashMap/* Filter Server */> filterServerTable;
public RouteInfoManager() {
@@ -111,26 +126,35 @@ public RegisterBrokerResult registerBroker(
RegisterBrokerResult result = new RegisterBrokerResult();
try {
try {
+ // 注册操作加锁,防止并发修改RouteInfoManager中的路由表
this.lock.writeLock().lockInterruptibly();
Set brokerNames = this.clusterAddrTable.get(clusterName);
+ // 判断所属集群是否存在
if (null == brokerNames) {
brokerNames = new HashSet();
+ // 如果不存在,则创建集群
this.clusterAddrTable.put(clusterName, brokerNames);
}
+ // 将broker名称加入集群的broker集合
brokerNames.add(brokerName);
boolean registerFirst = false;
+ // 维护 brokerData信息
BrokerData brokerData = this.brokerAddrTable.get(brokerName);
if (null == brokerData) {
+ // 该broker是否是新注册的
registerFirst = true;
+ // 如果该broker是新注册的,NameServer内存中不会存在broker信息,则新创建一个
brokerData = new BrokerData(clusterName, brokerName, new HashMap());
this.brokerAddrTable.put(brokerName, brokerData);
}
Map brokerAddrsMap = brokerData.getBrokerAddrs();
//Switch slave to master: first remove <1, IP:PORT> in namesrv, then add <0, IP:PORT>
//The same IP:PORT must only have one record in brokerAddrTable
+ // brokerId=0代表 Master broker, brokerId>0代表Slave broker
+ // 如果brokerId发生变更,则更新内存中的Broker信息
Iterator> it = brokerAddrsMap.entrySet().iterator();
while (it.hasNext()) {
Entry item = it.next();
@@ -146,16 +170,20 @@ public RegisterBrokerResult registerBroker(
&& MixAll.MASTER_ID == brokerId) {
if (this.isBrokerTopicConfigChanged(brokerAddr, topicConfigWrapper.getDataVersion())
|| registerFirst) {
+ // 如果Broker为主节点,并且Broker的topic配置信息发生变化或者是初次注册,
+ // 则需要创建或更新topic路由元数据,并填充topicQueueTable,
ConcurrentMap tcTable =
topicConfigWrapper.getTopicConfigTable();
if (tcTable != null) {
for (Map.Entry entry : tcTable.entrySet()) {
+ // 创建或更新topic路由元数据,并填充topicQueueTable,
this.createAndUpdateQueueData(brokerName, entry.getValue());
}
}
}
}
+ // 更新BrokerLiveInfo,存储状态正常的Broker信息表,BrokeLiveInfo是执行路由删除操作的重要依据
BrokerLiveInfo prevBrokerLiveInfo = this.brokerLiveTable.put(brokerAddr,
new BrokerLiveInfo(
System.currentTimeMillis(),
@@ -179,6 +207,7 @@ public RegisterBrokerResult registerBroker(
if (masterAddr != null) {
BrokerLiveInfo brokerLiveInfo = this.brokerLiveTable.get(masterAddr);
if (brokerLiveInfo != null) {
+ // 主节点地址,初次请求时为空,从节点向NameServer注册后返回
result.setHaServerAddr(brokerLiveInfo.getHaServerAddr());
result.setMasterAddr(masterAddr);
}
@@ -371,6 +400,11 @@ private void removeTopicByBrokerName(final String brokerName) {
}
}
+ /**
+ * 根据已缓存的broker信息,组装Topic路由所需的信息并返回
+ * @param topic
+ * @return
+ */
public TopicRouteData pickupTopicRouteData(final String topic) {
TopicRouteData topicRouteData = new TopicRouteData();
boolean foundQueueData = false;
@@ -473,6 +507,7 @@ public void onChannelDestroy(String remoteAddr, Channel channel) {
try {
try {
+ // 注册的Broker信息变更,需要加互斥锁
this.lock.writeLock().lockInterruptibly();
this.brokerLiveTable.remove(brokerAddrFound);
this.filterServerTable.remove(brokerAddrFound);
@@ -505,6 +540,7 @@ public void onChannelDestroy(String remoteAddr, Channel channel) {
}
}
+ // 删除集群信息中的 broker
if (brokerNameFound != null && removeBrokerName) {
Iterator>> it = this.clusterAddrTable.entrySet().iterator();
while (it.hasNext()) {
@@ -527,6 +563,7 @@ public void onChannelDestroy(String remoteAddr, Channel channel) {
}
}
+ // 删除topic信息中的broker
if (removeBrokerName) {
Iterator>> itTopicQueueTable =
this.topicQueueTable.entrySet().iterator();
diff --git a/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyServerConfig.java b/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyServerConfig.java
index a5e2a232dd5..a631e764087 100644
--- a/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyServerConfig.java
+++ b/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyServerConfig.java
@@ -18,14 +18,36 @@
public class NettyServerConfig implements Cloneable {
private int listenPort = 8888;
+ /**
+ * 业务线程数
+ */
private int serverWorkerThreads = 8;
private int serverCallbackExecutorThreads = 0;
+ /**
+ * NIO线程池线程个数,这类线程主要用于处理网络请求,先解析请求包,然后转发到各个业务线程池完成具体的业务操作,最后将结果返回给调用方。
+ */
private int serverSelectorThreads = 3;
+ /**
+ * send oneway消息请求的并发度(Broker端参数)
+ */
private int serverOnewaySemaphoreValue = 256;
+ /**
+ * 异步消息发送的最大并发度(Broker端参数)
+ */
private int serverAsyncSemaphoreValue = 64;
+ /**
+ * 网络连接最大空闲时间,默认为120s。如果连接空闲时间超过该参数设置的值,连接将被关闭。
+ * 正常情况下,客户端连接会通过心跳保持连接,如果客户端长时间未发送心跳,则认为已断开连接,服务端将关闭连接。
+ */
private int serverChannelMaxIdleTimeSeconds = 120;
+ /**
+ * 网络socket发送缓存区大小,默认为64KB。
+ */
private int serverSocketSndBufSize = NettySystemConfig.socketSndbufSize;
+ /**
+ * 网络socket接收缓存区大小,默认为64KB。
+ */
private int serverSocketRcvBufSize = NettySystemConfig.socketRcvbufSize;
private boolean serverPooledByteBufAllocatorEnable = true;
@@ -36,6 +58,7 @@ public class NettyServerConfig implements Cloneable {
* ../glibc-2.10.1/configure \ --prefix=/usr \ --with-headers=/usr/include \
* --host=x86_64-linux-gnu \ --build=x86_64-pc-linux-gnu \ --without-gd
*/
+ // 是否启用Epoll I/O模型,Linux环境下建议开启。
private boolean useEpollNativeSelector = false;
public int getListenPort() {
From b5d5b0da4c17577740a4ccfcf2bd404bd5f225b2 Mon Sep 17 00:00:00 2001
From: "Jindong.Tian"
Date: Sun, 8 Oct 2023 16:53:47 +0800
Subject: [PATCH 02/18] =?UTF-8?q?=E5=AE=8C=E5=96=84=E6=B6=88=E6=81=AF?=
=?UTF-8?q?=E5=8F=91=E9=80=81=E6=B5=81=E7=A8=8B=E4=BB=A3=E7=A0=81=E6=B3=A8?=
=?UTF-8?q?=E9=87=8A?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../AbstractSendMessageProcessor.java | 3 +
.../apache/rocketmq/client/ClientConfig.java | 13 ++
.../org/apache/rocketmq/client/MQAdmin.java | 7 +
.../apache/rocketmq/client/Validators.java | 4 +
.../rocketmq/client/impl/MQClientAPIImpl.java | 6 +
.../rocketmq/client/impl/MQClientManager.java | 7 +
.../client/impl/factory/MQClientInstance.java | 12 +-
.../impl/producer/DefaultMQProducerImpl.java | 47 ++++-
.../impl/producer/TopicPublishInfo.java | 24 +++
.../client/latency/LatencyFaultTolerance.java | 13 ++
.../latency/LatencyFaultToleranceImpl.java | 17 ++
.../client/latency/MQFaultStrategy.java | 18 +-
.../client/producer/DefaultMQProducer.java | 8 +
.../rocketmq/client/producer/MQProducer.java | 160 ++++++++++++++++++
.../rocketmq/common/message/Message.java | 19 +++
15 files changed, 355 insertions(+), 3 deletions(-)
diff --git a/broker/src/main/java/org/apache/rocketmq/broker/processor/AbstractSendMessageProcessor.java b/broker/src/main/java/org/apache/rocketmq/broker/processor/AbstractSendMessageProcessor.java
index b0668d49f87..08769e52c9c 100644
--- a/broker/src/main/java/org/apache/rocketmq/broker/processor/AbstractSendMessageProcessor.java
+++ b/broker/src/main/java/org/apache/rocketmq/broker/processor/AbstractSendMessageProcessor.java
@@ -164,6 +164,7 @@ protected RemotingCommand msgContentCheck(final ChannelHandlerContext ctx,
protected RemotingCommand msgCheck(final ChannelHandlerContext ctx,
final SendMessageRequestHeader requestHeader, final RemotingCommand response) {
+ // 检查broker是否有写权限
if (!PermName.isWriteable(this.brokerController.getBrokerConfig().getBrokerPermission())
&& this.brokerController.getTopicConfigManager().isOrderTopic(requestHeader.getTopic())) {
response.setCode(ResponseCode.NO_PERMISSION);
@@ -171,6 +172,7 @@ protected RemotingCommand msgCheck(final ChannelHandlerContext ctx,
+ "] sending message is forbidden");
return response;
}
+ // 检查topic是否可以进行消息发送。主要针对默认主题,默认主题不能发送消息,仅供路由查找。
if (!this.brokerController.getTopicConfigManager().isTopicCanSendMessage(requestHeader.getTopic())) {
String errorMsg = "the topic[" + requestHeader.getTopic() + "] is conflict with system reserved words.";
log.warn(errorMsg);
@@ -215,6 +217,7 @@ protected RemotingCommand msgCheck(final ChannelHandlerContext ctx,
}
}
+ // 校验队列ID是否合法
int queueIdInt = requestHeader.getQueueId();
int idValid = Math.max(topicConfig.getWriteQueueNums(), topicConfig.getReadQueueNums());
if (queueIdInt >= idValid) {
diff --git a/client/src/main/java/org/apache/rocketmq/client/ClientConfig.java b/client/src/main/java/org/apache/rocketmq/client/ClientConfig.java
index d0ae5e1b831..d7edd6fd353 100644
--- a/client/src/main/java/org/apache/rocketmq/client/ClientConfig.java
+++ b/client/src/main/java/org/apache/rocketmq/client/ClientConfig.java
@@ -62,6 +62,18 @@ public class ClientConfig {
private LanguageCode language = LanguageCode.JAVA;
+ /**
+ * clientId为客户端IP+instance+unitname(可选),如果在同一
+ * 台物理服务器部署两个应用程序,应用程序的clientId岂不是相同,
+ * 这样是不是会造成混乱?
+ *
+ * 为了避免出现这个问题,如果instance为默认值DEFAULT,
+ * RocketMQ会自动将instance设置为进程ID,这样就避免了不同进程相
+ * 互影响,但同一个JVM中相同clientId的消费者和生产者在启动时获取
+ * 的MQClientInstane实例都是同一个。
+ *
+ * @return
+ */
public String buildMQClientId() {
StringBuilder sb = new StringBuilder();
sb.append(this.getClientIP());
@@ -94,6 +106,7 @@ public void setInstanceName(String instanceName) {
public void changeInstanceNameToPID() {
if (this.instanceName.equals("DEFAULT")) {
+ // 如果 instanceName为 DEFAULT, 则将其设置为PID
this.instanceName = String.valueOf(UtilAll.getPid());
}
}
diff --git a/client/src/main/java/org/apache/rocketmq/client/MQAdmin.java b/client/src/main/java/org/apache/rocketmq/client/MQAdmin.java
index 63b2d14531d..1e011d78be1 100644
--- a/client/src/main/java/org/apache/rocketmq/client/MQAdmin.java
+++ b/client/src/main/java/org/apache/rocketmq/client/MQAdmin.java
@@ -27,6 +27,7 @@
*/
public interface MQAdmin {
/**
+ * 创建主题
* Creates an topic
*
* @param key accesskey
@@ -48,6 +49,7 @@ void createTopic(String key, String newTopic, int queueNum, int topicSysFlag)
throws MQClientException;
/**
+ * 根据时间戳从队列中查找其偏移量
* Gets the message queue offset according to some time in milliseconds
* be cautious to call because of more IO overhead
*
@@ -58,6 +60,7 @@ void createTopic(String key, String newTopic, int queueNum, int topicSysFlag)
long searchOffset(final MessageQueue mq, final long timestamp) throws MQClientException;
/**
+ * 查找该消息队列中最大的物理偏移量
* Gets the max offset
*
* @param mq Instance of MessageQueue
@@ -66,6 +69,7 @@ void createTopic(String key, String newTopic, int queueNum, int topicSysFlag)
long maxOffset(final MessageQueue mq) throws MQClientException;
/**
+ * 查找该消息队列中的最小物理偏移量。
* Gets the minimum offset
*
* @param mq Instance of MessageQueue
@@ -82,6 +86,7 @@ void createTopic(String key, String newTopic, int queueNum, int topicSysFlag)
long earliestMsgStoreTime(final MessageQueue mq) throws MQClientException;
/**
+ * 根据消息偏移量查找消息。
* Query message according to message id
*
* @param offsetMsgId message id
@@ -91,6 +96,7 @@ MessageExt viewMessage(final String offsetMsgId) throws RemotingException, MQBro
InterruptedException, MQClientException;
/**
+ * 根据条件查询消息
* Query messages
*
* @param topic message topic
@@ -104,6 +110,7 @@ QueryResult queryMessage(final String topic, final String key, final int maxNum,
final long end) throws MQClientException, InterruptedException;
/**
+ * 根据主题与消息ID查找消息。
* @return The {@code MessageExt} of given msgId
*/
MessageExt viewMessage(String topic,
diff --git a/client/src/main/java/org/apache/rocketmq/client/Validators.java b/client/src/main/java/org/apache/rocketmq/client/Validators.java
index 1b96cd058ff..63e063a4ef8 100644
--- a/client/src/main/java/org/apache/rocketmq/client/Validators.java
+++ b/client/src/main/java/org/apache/rocketmq/client/Validators.java
@@ -78,6 +78,8 @@ public static boolean regularExpressionMatcher(String origin, Pattern pattern) {
}
/**
+ * 验证消息是否符合相应的规范。具体的规范要求是主题名称、消息体不能为空,
+ * 消息长度不能等于0且默认不能超过允许发送消息的最大长度4MB(maxMessageSize=1024×1024×4)
* Validate message
*/
public static void checkMessage(Message msg, DefaultMQProducer defaultMQProducer)
@@ -89,10 +91,12 @@ public static void checkMessage(Message msg, DefaultMQProducer defaultMQProducer
Validators.checkTopic(msg.getTopic());
// body
+ // 消息不能为空
if (null == msg.getBody()) {
throw new MQClientException(ResponseCode.MESSAGE_ILLEGAL, "the message body is null");
}
+ // 校验消息长度
if (0 == msg.getBody().length) {
throw new MQClientException(ResponseCode.MESSAGE_ILLEGAL, "the message body length is zero");
}
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/MQClientAPIImpl.java b/client/src/main/java/org/apache/rocketmq/client/impl/MQClientAPIImpl.java
index 1ad5fbfe6fd..116bc4d5610 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/MQClientAPIImpl.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/MQClientAPIImpl.java
@@ -461,6 +461,7 @@ public SendResult sendMessage(
SendMessageRequestHeaderV2 requestHeaderV2 = SendMessageRequestHeaderV2.createSendMessageRequestHeaderV2(requestHeader);
request = RemotingCommand.createRequestCommand(msg instanceof MessageBatch ? RequestCode.SEND_BATCH_MESSAGE : RequestCode.SEND_MESSAGE_V2, requestHeaderV2);
} else {
+ // 构建远程调用命令, SEND_MESSAGE代表单条消息发送
request = RemotingCommand.createRequestCommand(RequestCode.SEND_MESSAGE, requestHeader);
}
}
@@ -476,6 +477,7 @@ public SendResult sendMessage(
if (timeoutMillis < costTimeAsync) {
throw new RemotingTooMuchRequestException("sendMessage call timeout");
}
+ // 使用Netty API,发送异步请求
this.sendMessageAsync(addr, brokerName, msg, timeoutMillis - costTimeAsync, request, sendCallback, topicPublishInfo, instance,
retryTimesWhenSendFailed, times, context, producer);
return null;
@@ -484,6 +486,7 @@ public SendResult sendMessage(
if (timeoutMillis < costTimeSync) {
throw new RemotingTooMuchRequestException("sendMessage call timeout");
}
+ // 使用Netty API,发送同步请求
return this.sendMessageSync(addr, brokerName, msg, timeoutMillis - costTimeSync, request);
default:
assert false;
@@ -1364,8 +1367,11 @@ public TopicRouteData getTopicRouteInfoFromNameServer(final String topic, final
GetRouteInfoRequestHeader requestHeader = new GetRouteInfoRequestHeader();
requestHeader.setTopic(topic);
+ // 创建与NameServer通讯的 消息内容,GET_ROUTEINTO_BY_TOPIC 代表查询 TOPIC 路由信息,
+ // NameServer中 org.apache.rocketmq.namesrv.processor.DefaultRequestProcessor.processRequest 会处理该请求
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_ROUTEINTO_BY_TOPIC, requestHeader);
+ // 底层使用 netty连接NameServer,并发送查询请求
RemotingCommand response = this.remotingClient.invokeSync(null, request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/MQClientManager.java b/client/src/main/java/org/apache/rocketmq/client/impl/MQClientManager.java
index 053c049c9cd..4046e6c954b 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/MQClientManager.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/MQClientManager.java
@@ -44,7 +44,14 @@ public MQClientInstance getOrCreateMQClientInstance(final ClientConfig clientCon
return getOrCreateMQClientInstance(clientConfig, null);
}
+ /**
+ * 创建MQClientInstance实例。同一个clientId只会创建一个MQClientInstance实例
+ * @param clientConfig
+ * @param rpcHook
+ * @return
+ */
public MQClientInstance getOrCreateMQClientInstance(final ClientConfig clientConfig, RPCHook rpcHook) {
+ // 生成clientId
String clientId = clientConfig.buildMQClientId();
MQClientInstance instance = this.factoryTable.get(clientId);
if (null == instance) {
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/factory/MQClientInstance.java b/client/src/main/java/org/apache/rocketmq/client/impl/factory/MQClientInstance.java
index bbd2eecb1c3..0089df09382 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/factory/MQClientInstance.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/factory/MQClientInstance.java
@@ -85,6 +85,9 @@
import org.apache.rocketmq.remoting.netty.NettyClientConfig;
import org.apache.rocketmq.remoting.protocol.RemotingCommand;
+/**
+ * MQClientInstance封装了RocketMQ的网络处理API,是消息生产者、消息消费者与NameServer、Broker打交道的网络通道。
+ */
public class MQClientInstance {
private final static long LOCK_TIMEOUT_MILLIS = 3000;
private final InternalLogger log = ClientLogger.getLog();
@@ -611,6 +614,7 @@ public boolean updateTopicRouteInfoFromNameServer(final String topic, boolean is
try {
TopicRouteData topicRouteData;
if (isDefault && defaultMQProducer != null) {
+ // 封装网络请求,连接NameServer查询 默认TOPIC 对应的路由信息
topicRouteData = this.mQClientAPIImpl.getDefaultTopicRouteInfoFromNameServer(defaultMQProducer.getCreateTopicKey(),
1000 * 3);
if (topicRouteData != null) {
@@ -621,6 +625,7 @@ public boolean updateTopicRouteInfoFromNameServer(final String topic, boolean is
}
}
} else {
+ // 封装网络请求,连接NameServer查询 TOPIC 对应的路由信息
topicRouteData = this.mQClientAPIImpl.getTopicRouteInfoFromNameServer(topic, 1000 * 3);
}
if (topicRouteData != null) {
@@ -633,14 +638,18 @@ public boolean updateTopicRouteInfoFromNameServer(final String topic, boolean is
}
if (changed) {
- TopicRouteData cloneTopicRouteData = topicRouteData.cloneTopicRouteData();
+ // 如果topic路由信息发生变更
+ TopicRouteData cloneTopicRouteData = topicRouteData.cloneTopicRouteData();
for (BrokerData bd : topicRouteData.getBrokerDatas()) {
+ // 更新Broker地址缓存表
this.brokerAddrTable.put(bd.getBrokerName(), bd.getBrokerAddrs());
}
// Update Pub info
+ // 更新生产端缓存
{
+ // 将topicRouteData中的List 转换成 topicPublishInfo的List 列表
TopicPublishInfo publishInfo = topicRouteData2TopicPublishInfo(topic, topicRouteData);
publishInfo.setHaveTopicRouterInfo(true);
Iterator> it = this.producerTable.entrySet().iterator();
@@ -654,6 +663,7 @@ public boolean updateTopicRouteInfoFromNameServer(final String topic, boolean is
}
// Update sub info
+ // 更新消费端缓存
{
Set subscribeInfo = topicRouteData2TopicSubscribeInfo(topic, topicRouteData);
Iterator> it = this.consumerTable.entrySet().iterator();
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/producer/DefaultMQProducerImpl.java b/client/src/main/java/org/apache/rocketmq/client/impl/producer/DefaultMQProducerImpl.java
index fca50cc565c..1bdb535b7e2 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/producer/DefaultMQProducerImpl.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/producer/DefaultMQProducerImpl.java
@@ -183,11 +183,14 @@ public void start(final boolean startFactory) throws MQClientException {
this.checkConfig();
if (!this.defaultMQProducer.getProducerGroup().equals(MixAll.CLIENT_INNER_PRODUCER_GROUP)) {
+ // 检查producerGroup是否符合要求,改变生产者的instanceName为进程ID
this.defaultMQProducer.changeInstanceNameToPID();
}
+ // 生成或者获取 MQClientInstance 实例,它是RocketMQ的网络处理API,是消息生产者、消息消费者与NameServer、Broker打交道的网络通道
this.mQClientFactory = MQClientManager.getInstance().getOrCreateMQClientInstance(this.defaultMQProducer, rpcHook);
+ // 向MQClientInstance注册服务,将当前生产者加入MQClientInstance管理,方便后续调用网络请求、进行心跳检测等。
boolean registerOK = mQClientFactory.registerProducer(this.defaultMQProducer.getProducerGroup(), this);
if (!registerOK) {
this.serviceState = ServiceState.CREATE_JUST;
@@ -199,6 +202,7 @@ public void start(final boolean startFactory) throws MQClientException {
this.topicPublishInfoTable.put(this.defaultMQProducer.getCreateTopicKey(), new TopicPublishInfo());
if (startFactory) {
+ // 启动MQClientInstance,如果MQClientInstance已经启动,则本次启动不会真正执行。
mQClientFactory.start();
}
@@ -553,6 +557,7 @@ private SendResult sendDefaultImpl(
long beginTimestampFirst = System.currentTimeMillis();
long beginTimestampPrev = beginTimestampFirst;
long endTimestamp = beginTimestampFirst;
+ // 查询topic对应的路由信息
TopicPublishInfo topicPublishInfo = this.tryToFindTopicPublishInfo(msg.getTopic());
if (topicPublishInfo != null && topicPublishInfo.ok()) {
boolean callTimeout = false;
@@ -564,6 +569,7 @@ private SendResult sendDefaultImpl(
String[] brokersSent = new String[timesTotal];
for (; times < timesTotal; times++) {
String lastBrokerName = null == mq ? null : mq.getBrokerName();
+ // 选择一个消息队列
MessageQueue mqSelected = this.selectOneMessageQueue(topicPublishInfo, lastBrokerName);
if (mqSelected != null) {
mq = mqSelected;
@@ -579,9 +585,10 @@ private SendResult sendDefaultImpl(
callTimeout = true;
break;
}
-
+ // 发送消息
sendResult = this.sendKernelImpl(msg, mq, communicationMode, sendCallback, topicPublishInfo, timeout - costTime);
endTimestamp = System.currentTimeMillis();
+ // 使用本次消息发送延迟时间来计算Broker故障规避时长
this.updateFaultItem(mq.getBrokerName(), endTimestamp - beginTimestampPrev, false);
switch (communicationMode) {
case ASYNC:
@@ -601,6 +608,7 @@ private SendResult sendDefaultImpl(
}
} catch (RemotingException e) {
endTimestamp = System.currentTimeMillis();
+ // 若消息发送失败,则更新失败条目,使用默认时长30s来计算Broker故障规避时长
this.updateFaultItem(mq.getBrokerName(), endTimestamp - beginTimestampPrev, true);
log.warn(String.format("sendKernelImpl exception, resend at once, InvokeID: %s, RT: %sms, Broker: %s", invokeID, endTimestamp - beginTimestampPrev, mq), e);
log.warn(msg.toString());
@@ -685,10 +693,20 @@ private SendResult sendDefaultImpl(
null).setResponseCode(ClientErrorCode.NOT_FOUND_TOPIC_EXCEPTION);
}
+ /**
+ * 如果生产者中缓存了topic的路由信息,且该路由信息包含消息队列,则直
+ * 接返回该路由信息。如果没有缓存或没有包含消息队列,则向
+ * NameServer查询该topic的路由信息。如果最终未找到路由信息,则抛
+ * 出异常,表示无法找到主题相关路由信息异常。
+ * @param topic
+ * @return
+ */
private TopicPublishInfo tryToFindTopicPublishInfo(final String topic) {
+ // 先查本地缓存
TopicPublishInfo topicPublishInfo = this.topicPublishInfoTable.get(topic);
if (null == topicPublishInfo || !topicPublishInfo.ok()) {
this.topicPublishInfoTable.putIfAbsent(topic, new TopicPublishInfo());
+ // 如果本地缓存没有该topic路由信息,则查询NameServer并更新本地缓存
this.mQClientFactory.updateTopicRouteInfoFromNameServer(topic);
topicPublishInfo = this.topicPublishInfoTable.get(topic);
}
@@ -696,12 +714,27 @@ private TopicPublishInfo tryToFindTopicPublishInfo(final String topic) {
if (topicPublishInfo.isHaveTopicRouterInfo() || topicPublishInfo.ok()) {
return topicPublishInfo;
} else {
+ // 如果是新创建的 Topic,NameServer中不会有Topic信息,则会加载 默认的Topic路由信息
this.mQClientFactory.updateTopicRouteInfoFromNameServer(topic, true, this.defaultMQProducer);
topicPublishInfo = this.topicPublishInfoTable.get(topic);
return topicPublishInfo;
}
}
+ /**
+ *
+ * @param msg 待发送消息
+ * @param mq 消息将发送到该消息队列上
+ * @param communicationMode 消息发送模式,包括SYNC、ASYNC、ONEWAY
+ * @param sendCallback 异步消息回调函数
+ * @param topicPublishInfo 主题路由信息
+ * @param timeout 消息发送超时时间
+ * @return
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws MQBrokerException
+ * @throws InterruptedException
+ */
private SendResult sendKernelImpl(final Message msg,
final MessageQueue mq,
final CommunicationMode communicationMode,
@@ -709,8 +742,11 @@ private SendResult sendKernelImpl(final Message msg,
final TopicPublishInfo topicPublishInfo,
final long timeout) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
long beginStartTime = System.currentTimeMillis();
+
+ // 从本地缓存中,获取broker的网络地址
String brokerAddr = this.mQClientFactory.findBrokerAddressInPublish(mq.getBrokerName());
if (null == brokerAddr) {
+ // 去NameServer中加载Topic路由信息,然后获取broker网络地址
tryToFindTopicPublishInfo(mq.getTopic());
brokerAddr = this.mQClientFactory.findBrokerAddressInPublish(mq.getBrokerName());
}
@@ -722,6 +758,7 @@ private SendResult sendKernelImpl(final Message msg,
byte[] prevBody = msg.getBody();
try {
//for MessageBatch,ID has been set in the generating process
+ // 为消息分配全局唯一ID
if (!(msg instanceof MessageBatch)) {
MessageClientIDSetter.setUniqID(msg);
}
@@ -734,6 +771,7 @@ private SendResult sendKernelImpl(final Message msg,
int sysFlag = 0;
boolean msgBodyCompressed = false;
+ // 如果消息体默认超过4KB(compressMsgBody-OverHowmuch),则对消息体采用zip压缩,并置消息的系统标记为MessageSysFlag.COMPRESSED_FLAG。
if (this.tryToCompressMessage(msg)) {
sysFlag |= MessageSysFlag.COMPRESSED_FLAG;
msgBodyCompressed = true;
@@ -757,6 +795,8 @@ private SendResult sendKernelImpl(final Message msg,
}
if (this.hasSendMessageHook()) {
+ // 如果注册了消息发送钩子函数,则执行消息发送之前的增强逻辑
+ // 通过DefaultMQProducerImpl#registerSendMessageHook注册钩子处理类,并且可以注册多个
context = new SendMessageContext();
context.setProducer(this);
context.setProducerGroup(this.defaultMQProducer.getProducerGroup());
@@ -777,11 +817,14 @@ private SendResult sendKernelImpl(final Message msg,
this.executeSendMessageHookBefore(context);
}
+ /**构建消息发送请求包**/
SendMessageRequestHeader requestHeader = new SendMessageRequestHeader();
+ // 生成者组
requestHeader.setProducerGroup(this.defaultMQProducer.getProducerGroup());
requestHeader.setTopic(msg.getTopic());
requestHeader.setDefaultTopic(this.defaultMQProducer.getCreateTopicKey());
requestHeader.setDefaultTopicQueueNums(this.defaultMQProducer.getDefaultTopicQueueNums());
+ // 队列ID
requestHeader.setQueueId(mq.getQueueId());
requestHeader.setSysFlag(sysFlag);
requestHeader.setBornTimestamp(System.currentTimeMillis());
@@ -850,6 +893,7 @@ private SendResult sendKernelImpl(final Message msg,
if (timeout < costTimeSync) {
throw new RemotingTooMuchRequestException("sendKernelImpl call timeout");
}
+ // 发送消息
sendResult = this.mQClientFactory.getMQClientAPIImpl().sendMessage(
brokerAddr,
mq.getBrokerName(),
@@ -866,6 +910,7 @@ private SendResult sendKernelImpl(final Message msg,
}
if (this.hasSendMessageHook()) {
+ // 如果注册消息发送钩子函数,则在发送完毕后,执行后置操作
context.setSendResult(sendResult);
this.executeSendMessageHookAfter(context);
}
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/producer/TopicPublishInfo.java b/client/src/main/java/org/apache/rocketmq/client/impl/producer/TopicPublishInfo.java
index deb02cff285..94f0d5ebc51 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/producer/TopicPublishInfo.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/producer/TopicPublishInfo.java
@@ -24,10 +24,23 @@
import org.apache.rocketmq.common.protocol.route.TopicRouteData;
public class TopicPublishInfo {
+
+ /**
+ * 是否是顺序消息
+ */
private boolean orderTopic = false;
private boolean haveTopicRouterInfo = false;
+ /**
+ * 该Topic的队列信息
+ */
private List messageQueueList = new ArrayList();
+ /**
+ * 每选择一次消息队列,该值会自增1,如果超过Integer.MAX_VALUE,则重置为0,用于选择消息队列。
+ */
private volatile ThreadLocalIndex sendWhichQueue = new ThreadLocalIndex();
+ /**
+ * Topic路由相关信息
+ */
private TopicRouteData topicRouteData;
public boolean isOrderTopic() {
@@ -68,8 +81,14 @@ public void setHaveTopicRouterInfo(boolean haveTopicRouterInfo) {
public MessageQueue selectOneMessageQueue(final String lastBrokerName) {
if (lastBrokerName == null) {
+ // 在消息发送过程中,可能会多次执行选择消息队列这个方法,
+ //lastBrokerName就是上一次选择的执行发送消息失败的Broker。第一
+ //次执行消息队列选择时,lastBrokerName为null,此时直接用
+ //sendWhichQueue自增再获取值,与当前路由表中消息队列的个数取
+ //模,返回该位置的MessageQueue(selectOneMessageQueue()方法
return selectOneMessageQueue();
} else {
+ //如果消息发送失败,下次进行消息队列选择时规避上次MesageQueue所在的Broker,否则有可能再次失败。
int index = this.sendWhichQueue.getAndIncrement();
for (int i = 0; i < this.messageQueueList.size(); i++) {
int pos = Math.abs(index++) % this.messageQueueList.size();
@@ -80,10 +99,15 @@ public MessageQueue selectOneMessageQueue(final String lastBrokerName) {
return mq;
}
}
+ // 如果没有找到其它broker上的队列,则降级为默认逻辑,轮询获取下一个队列信息。
return selectOneMessageQueue();
}
}
+ /**
+ * 轮询选择消息队列
+ * @return
+ */
public MessageQueue selectOneMessageQueue() {
int index = this.sendWhichQueue.getAndIncrement();
int pos = Math.abs(index) % this.messageQueueList.size();
diff --git a/client/src/main/java/org/apache/rocketmq/client/latency/LatencyFaultTolerance.java b/client/src/main/java/org/apache/rocketmq/client/latency/LatencyFaultTolerance.java
index 09a8aa46189..53275a1ff88 100644
--- a/client/src/main/java/org/apache/rocketmq/client/latency/LatencyFaultTolerance.java
+++ b/client/src/main/java/org/apache/rocketmq/client/latency/LatencyFaultTolerance.java
@@ -20,9 +20,22 @@
public interface LatencyFaultTolerance {
void updateFaultItem(final T name, final long currentLatency, final long notAvailableDuration);
+ /**
+ * 判断Broker是否可用
+ * @param name
+ * @return
+ */
boolean isAvailable(final T name);
+ /**
+ * 移除失败条目,意味着Broker重新参与路由计算
+ * @param name
+ */
void remove(final T name);
+ /**
+ * 尝试从规避的Broker中选择一个可用的Broker,如果没有找到,则返回null。
+ * @return
+ */
T pickOneAtLeast();
}
diff --git a/client/src/main/java/org/apache/rocketmq/client/latency/LatencyFaultToleranceImpl.java b/client/src/main/java/org/apache/rocketmq/client/latency/LatencyFaultToleranceImpl.java
index 72d43476f83..d95a42c4bac 100644
--- a/client/src/main/java/org/apache/rocketmq/client/latency/LatencyFaultToleranceImpl.java
+++ b/client/src/main/java/org/apache/rocketmq/client/latency/LatencyFaultToleranceImpl.java
@@ -25,6 +25,10 @@
import org.apache.rocketmq.client.common.ThreadLocalIndex;
public class LatencyFaultToleranceImpl implements LatencyFaultTolerance {
+
+ /**
+ * 失败条目(规避规则条目)
+ */
private final ConcurrentHashMap faultItemTable = new ConcurrentHashMap(16);
private final ThreadLocalIndex whichItemWorst = new ThreadLocalIndex();
@@ -96,9 +100,22 @@ public String toString() {
'}';
}
+ /**
+ * 失败条目(规避规则条目)
+ */
class FaultItem implements Comparable {
+
+ /**
+ * 条目唯一键,这里为brokerName
+ */
private final String name;
+ /**
+ * 本次消息发送的延迟时间
+ */
private volatile long currentLatency;
+ /**
+ * 故障恢复的开始时间
+ */
private volatile long startTimestamp;
public FaultItem(final String name) {
diff --git a/client/src/main/java/org/apache/rocketmq/client/latency/MQFaultStrategy.java b/client/src/main/java/org/apache/rocketmq/client/latency/MQFaultStrategy.java
index 7854fcb7d10..f397118b03a 100644
--- a/client/src/main/java/org/apache/rocketmq/client/latency/MQFaultStrategy.java
+++ b/client/src/main/java/org/apache/rocketmq/client/latency/MQFaultStrategy.java
@@ -26,6 +26,18 @@ public class MQFaultStrategy {
private final static InternalLogger log = ClientLogger.getLog();
private final LatencyFaultTolerance latencyFaultTolerance = new LatencyFaultToleranceImpl();
+ /**
+ * 是否启用Broker故障延迟机制
+ * 开启与不开启sendLatencyFaultEnable机制在消息发送时都能规避故
+ * 障的Broker,那么这两种机制有何区别呢?
+ *
+ * 开启所谓的故障延迟机制,即设置sendLatencyFaultEnable为ture,
+ * 其实是一种较为悲观的做法。当消息发送者遇到一次消息发送失败
+ * 后,就会悲观地认为Broker不可用,在接下来的一段时间内就不再向
+ * 其发送消息,直接避开该Broker。而不开启延迟规避机制,就只会在
+ * 本次消息发送的重试过程中规避该Broker,下一次消息发送还是会继
+ * 续尝试。
+ */
private boolean sendLatencyFaultEnable = false;
private long[] latencyMax = {50L, 100L, 550L, 1000L, 2000L, 3000L, 15000L};
@@ -57,13 +69,16 @@ public void setSendLatencyFaultEnable(final boolean sendLatencyFaultEnable) {
public MessageQueue selectOneMessageQueue(final TopicPublishInfo tpInfo, final String lastBrokerName) {
if (this.sendLatencyFaultEnable) {
+ // 如果开启了故障延迟机制
try {
int index = tpInfo.getSendWhichQueue().getAndIncrement();
for (int i = 0; i < tpInfo.getMessageQueueList().size(); i++) {
int pos = Math.abs(index++) % tpInfo.getMessageQueueList().size();
if (pos < 0)
pos = 0;
+ // 获取一个消息队列
MessageQueue mq = tpInfo.getMessageQueueList().get(pos);
+ // 验证该消息队列是否可用
if (latencyFaultTolerance.isAvailable(mq.getBrokerName())) {
if (null == lastBrokerName || mq.getBrokerName().equals(lastBrokerName))
return mq;
@@ -80,6 +95,7 @@ public MessageQueue selectOneMessageQueue(final TopicPublishInfo tpInfo, final S
}
return mq;
} else {
+ // 移除失败条目,意味着Broker重新参与路由计算
latencyFaultTolerance.remove(notBestBroker);
}
} catch (Exception e) {
@@ -88,7 +104,7 @@ public MessageQueue selectOneMessageQueue(final TopicPublishInfo tpInfo, final S
return tpInfo.selectOneMessageQueue();
}
-
+ // 未开启故障延迟机制
return tpInfo.selectOneMessageQueue(lastBrokerName);
}
diff --git a/client/src/main/java/org/apache/rocketmq/client/producer/DefaultMQProducer.java b/client/src/main/java/org/apache/rocketmq/client/producer/DefaultMQProducer.java
index faa79f54c93..62ca705d2ea 100644
--- a/client/src/main/java/org/apache/rocketmq/client/producer/DefaultMQProducer.java
+++ b/client/src/main/java/org/apache/rocketmq/client/producer/DefaultMQProducer.java
@@ -62,6 +62,7 @@ public class DefaultMQProducer extends ClientConfig implements MQProducer {
protected final transient DefaultMQProducerImpl defaultMQProducerImpl;
private final InternalLogger log = ClientLogger.getLog();
/**
+ * 生产者所属的组
* Producer group conceptually aggregates all producer instances of exactly same role, which is particularly
* important when transactional messages are involved.
*
@@ -77,21 +78,25 @@ public class DefaultMQProducer extends ClientConfig implements MQProducer {
private String createTopicKey = MixAll.AUTO_CREATE_TOPIC_KEY_TOPIC;
/**
+ * 默认主题在每一个Broker队列的数量。
* Number of queues to create per default topic.
*/
private volatile int defaultTopicQueueNums = 4;
/**
+ * 发送消息的超时时间,默认为3s。
* Timeout for sending messages.
*/
private int sendMsgTimeout = 3000;
/**
+ * 消息体超过该值则启用压缩,默认为4KB。
* Compress message body threshold, namely, message body larger than 4k will be compressed on default.
*/
private int compressMsgBodyOverHowmuch = 1024 * 4;
/**
+ * 同步方式发送消息重试次数,默认为2,总共执行3次。
* Maximum number of retry to perform internally before claiming sending failure in synchronous mode.
*
* This may potentially cause message duplication which is up to application developers to resolve.
@@ -99,6 +104,7 @@ public class DefaultMQProducer extends ClientConfig implements MQProducer {
private int retryTimesWhenSendFailed = 2;
/**
+ * 异步方式发送消息的重试次数,默认为2。
* Maximum number of retry to perform internally before claiming sending failure in asynchronous mode.
*
* This may potentially cause message duplication which is up to application developers to resolve.
@@ -106,11 +112,13 @@ public class DefaultMQProducer extends ClientConfig implements MQProducer {
private int retryTimesWhenSendAsyncFailed = 2;
/**
+ * 消息重试时选择另外一个Broker,是否不等待存储结果就返回,默认为false。
* Indicate whether to retry another broker on sending failure internally.
*/
private boolean retryAnotherBrokerWhenNotStoreOK = false;
/**
+ * 允许发送的最大消息长度,默认为4MB,最大值为2^32-1。
* Maximum allowed message size in bytes.
*/
private int maxMessageSize = 1024 * 1024 * 4; // 4M
diff --git a/client/src/main/java/org/apache/rocketmq/client/producer/MQProducer.java b/client/src/main/java/org/apache/rocketmq/client/producer/MQProducer.java
index c6cf4c93596..70222b243b4 100644
--- a/client/src/main/java/org/apache/rocketmq/client/producer/MQProducer.java
+++ b/client/src/main/java/org/apache/rocketmq/client/producer/MQProducer.java
@@ -31,53 +31,204 @@ public interface MQProducer extends MQAdmin {
void shutdown();
+ /**
+ * 查找该主题下所有的消息队列。
+ * @param topic
+ * @return
+ * @throws MQClientException
+ */
List fetchPublishMessageQueues(final String topic) throws MQClientException;
+
+ /**
+ * 同步发送消息,具体发送到主题中的哪个消息队列由负载算法决定
+ * @param msg
+ * @return
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws MQBrokerException
+ * @throws InterruptedException
+ */
SendResult send(final Message msg) throws MQClientException, RemotingException, MQBrokerException,
InterruptedException;
+ /**
+ * 同步发送消息,如果发送超过timeout则抛出超时异常。
+ * @param msg
+ * @param timeout
+ * @return
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws MQBrokerException
+ * @throws InterruptedException
+ */
SendResult send(final Message msg, final long timeout) throws MQClientException,
RemotingException, MQBrokerException, InterruptedException;
+ /**
+ * 异步发送消息,sendCallback参数是消息发送成功后的回调方法。
+ * @param msg
+ * @param sendCallback
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws InterruptedException
+ */
void send(final Message msg, final SendCallback sendCallback) throws MQClientException,
RemotingException, InterruptedException;
+ /**
+ * 异步发送消息,如果发送超过timeout则抛出超时异常。
+ * @param msg
+ * @param sendCallback
+ * @param timeout
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws InterruptedException
+ */
void send(final Message msg, final SendCallback sendCallback, final long timeout)
throws MQClientException, RemotingException, InterruptedException;
+ /**
+ * 单向消息发送,即不在乎发送结果,消息发送出去后该方法立即返回。
+ * @param msg
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws InterruptedException
+ */
void sendOneway(final Message msg) throws MQClientException, RemotingException,
InterruptedException;
+ /**
+ * 同步方式发送消息,且发送到指定的消息队列。
+ * @param msg
+ * @param mq
+ * @return
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws MQBrokerException
+ * @throws InterruptedException
+ */
SendResult send(final Message msg, final MessageQueue mq) throws MQClientException,
RemotingException, MQBrokerException, InterruptedException;
+ /**
+ * 同步方式发送消息,且发送到指定的消息队列,超时抛异常。
+ * @param msg
+ * @param mq
+ * @param timeout
+ * @return
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws MQBrokerException
+ * @throws InterruptedException
+ */
SendResult send(final Message msg, final MessageQueue mq, final long timeout)
throws MQClientException, RemotingException, MQBrokerException, InterruptedException;
+ /**
+ * 异步方式发送消息,且发送到指定的消息队列。
+ * @param msg
+ * @param mq
+ * @param sendCallback
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws InterruptedException
+ */
void send(final Message msg, final MessageQueue mq, final SendCallback sendCallback)
throws MQClientException, RemotingException, InterruptedException;
+ /**
+ * 异步方式发送消息,且发送到指定的消息队列,超时抛异常。
+ * @param msg
+ * @param mq
+ * @param sendCallback
+ * @param timeout
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws InterruptedException
+ */
void send(final Message msg, final MessageQueue mq, final SendCallback sendCallback, long timeout)
throws MQClientException, RemotingException, InterruptedException;
+ /**
+ * 单向方式发送消息,且发送到指定的消息队列。
+ * @param msg
+ * @param mq
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws InterruptedException
+ */
void sendOneway(final Message msg, final MessageQueue mq) throws MQClientException,
RemotingException, InterruptedException;
+ /**
+ * 同步消息发送,指定消息选择算法,覆盖消息生产者默认的消息队列负载。
+ * @param msg
+ * @param selector
+ * @param arg
+ * @return
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws MQBrokerException
+ * @throws InterruptedException
+ */
SendResult send(final Message msg, final MessageQueueSelector selector, final Object arg)
throws MQClientException, RemotingException, MQBrokerException, InterruptedException;
+ /**
+ * 同步消息发送,指定消息选择算法,覆盖消息生产者默认的消息队列负载,超时抛出异常。
+ * @param msg
+ * @param selector
+ * @param arg
+ * @param timeout
+ * @return
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws MQBrokerException
+ * @throws InterruptedException
+ */
SendResult send(final Message msg, final MessageQueueSelector selector, final Object arg,
final long timeout) throws MQClientException, RemotingException, MQBrokerException,
InterruptedException;
+ /**
+ * 异步消息发送,指定消息选择算法,覆盖消息生产者默认的消息队列负载。
+ * @param msg
+ * @param selector
+ * @param arg
+ * @param sendCallback
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws InterruptedException
+ */
void send(final Message msg, final MessageQueueSelector selector, final Object arg,
final SendCallback sendCallback) throws MQClientException, RemotingException,
InterruptedException;
+ /**
+ * 异步消息发送,指定消息选择算法,覆盖消息生产者默认的消息队列负载,超时抛出异常。
+ * @param msg
+ * @param selector
+ * @param arg
+ * @param sendCallback
+ * @param timeout
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws InterruptedException
+ */
void send(final Message msg, final MessageQueueSelector selector, final Object arg,
final SendCallback sendCallback, final long timeout) throws MQClientException, RemotingException,
InterruptedException;
+ /**
+ * 同步单向方式发送消息,指定消息选择算法。
+ * @param msg
+ * @param selector
+ * @param arg
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws InterruptedException
+ */
void sendOneway(final Message msg, final MessageQueueSelector selector, final Object arg)
throws MQClientException, RemotingException, InterruptedException;
@@ -88,6 +239,15 @@ TransactionSendResult sendMessageInTransaction(final Message msg,
final Object arg) throws MQClientException;
//for batch
+ /**
+ * 同步批量发送消息
+ * @param msgs
+ * @return
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws MQBrokerException
+ * @throws InterruptedException
+ */
SendResult send(final Collection msgs) throws MQClientException, RemotingException, MQBrokerException,
InterruptedException;
diff --git a/common/src/main/java/org/apache/rocketmq/common/message/Message.java b/common/src/main/java/org/apache/rocketmq/common/message/Message.java
index c9a133b4d0c..19b8b212608 100644
--- a/common/src/main/java/org/apache/rocketmq/common/message/Message.java
+++ b/common/src/main/java/org/apache/rocketmq/common/message/Message.java
@@ -22,13 +22,29 @@
import java.util.HashMap;
import java.util.Map;
+/**
+ * RocketMQ消息封装类
+ */
public class Message implements Serializable {
private static final long serialVersionUID = 8445773977080406428L;
private String topic;
+ /**
+ * 消息标记(RocketMQ不做处理)
+ */
private int flag;
+ /**
+ * 消息扩展信息
+ * 消息tag、消息检索key都存在该字段里
+ */
private Map properties;
+ /**
+ * 消息体
+ */
private byte[] body;
+ /**
+ * 事务ID
+ */
private String transactionId;
public Message() {
@@ -43,12 +59,15 @@ public Message(String topic, String tags, String keys, int flag, byte[] body, bo
this.flag = flag;
this.body = body;
+ // 消息tag,用于消息过滤
if (tags != null && tags.length() > 0)
this.setTags(tags);
+ // 消息索引键,用空格隔开,RocketMQ可以根据这些 key 快速检索消息
if (keys != null && keys.length() > 0)
this.setKeys(keys);
+ // 消息发送时是否等消息存储完成后再返回
this.setWaitStoreMsgOK(waitStoreMsgOK);
}
From 1942f2f2c9c58753d0cc1998c674dab52436e8be Mon Sep 17 00:00:00 2001
From: "Jindong.Tian"
Date: Tue, 10 Oct 2023 19:48:08 +0800
Subject: [PATCH 03/18] =?UTF-8?q?=E5=AE=8C=E5=96=84=E6=B6=88=E6=81=AF?=
=?UTF-8?q?=E5=AD=98=E5=82=A8=E6=B5=81=E7=A8=8B=E4=BB=A3=E7=A0=81=E6=B3=A8?=
=?UTF-8?q?=E9=87=8A?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../namesrv/routeinfo/RouteInfoManager.java | 4 +
.../rocketmq/store/AppendMessageResult.java | 23 ++-
.../org/apache/rocketmq/store/CommitLog.java | 102 +++++++++---
.../apache/rocketmq/store/ConsumeQueue.java | 45 +++++-
.../rocketmq/store/DefaultMessageStore.java | 148 ++++++++++++++++++
.../rocketmq/store/DispatchRequest.java | 44 ++++++
.../org/apache/rocketmq/store/MappedFile.java | 86 +++++++++-
.../rocketmq/store/MappedFileQueue.java | 78 ++++++++-
.../rocketmq/store/StoreCheckpoint.java | 14 ++
.../rocketmq/store/TransientStorePool.java | 17 ++
.../store/config/MessageStoreConfig.java | 3 +
.../rocketmq/store/index/IndexFile.java | 70 ++++++++-
.../rocketmq/store/index/IndexHeader.java | 19 ++-
.../rocketmq/store/index/IndexService.java | 12 ++
14 files changed, 633 insertions(+), 32 deletions(-)
diff --git a/namesrv/src/main/java/org/apache/rocketmq/namesrv/routeinfo/RouteInfoManager.java b/namesrv/src/main/java/org/apache/rocketmq/namesrv/routeinfo/RouteInfoManager.java
index 3c3f15cf41b..6e021ce8e7e 100644
--- a/namesrv/src/main/java/org/apache/rocketmq/namesrv/routeinfo/RouteInfoManager.java
+++ b/namesrv/src/main/java/org/apache/rocketmq/namesrv/routeinfo/RouteInfoManager.java
@@ -790,6 +790,10 @@ public byte[] getHasUnitSubUnUnitTopicList() {
}
class BrokerLiveInfo {
+
+ /**
+ * 最近一次心跳上报的时间
+ */
private long lastUpdateTimestamp;
private DataVersion dataVersion;
private Channel channel;
diff --git a/store/src/main/java/org/apache/rocketmq/store/AppendMessageResult.java b/store/src/main/java/org/apache/rocketmq/store/AppendMessageResult.java
index d6d1aa6a31c..00c78a12a82 100644
--- a/store/src/main/java/org/apache/rocketmq/store/AppendMessageResult.java
+++ b/store/src/main/java/org/apache/rocketmq/store/AppendMessageResult.java
@@ -21,19 +21,40 @@
*/
public class AppendMessageResult {
// Return code
+ /**
+ * 消息追加结果。取值为PUT_OK则代表追加成功、
+ * END_OF_FILE则代表超过文件大小、
+ * MESSAGE_SIZE_EXCEEDED则代表消息长度超过最大允许长度、
+ * PROPERTIES_SIZE_EXCEEDED则代表消息属性超过最大允许长度、
+ * UNKNOWN_ERROR则代表未知异常。
+ */
private AppendMessageStatus status;
// Where to start writing
+ /**
+ * 消息的物理偏移量。
+ */
private long wroteOffset;
// Write Bytes
private int wroteBytes;
// Message ID
private String msgId;
// Message storage timestamp
+ /**
+ * 消息存储时间戳
+ */
private long storeTimestamp;
// Consume queue's offset(step by one)
+ /**
+ * 消息消费队列的逻辑偏移量,类似于数组下标
+ */
private long logicsOffset;
+ /**
+ * 写入页缓存的响应时间
+ */
private long pagecacheRT = 0;
-
+ /**
+ * 批量发送消息时的消息条数
+ */
private int msgNum = 1;
public AppendMessageResult(AppendMessageStatus status) {
diff --git a/store/src/main/java/org/apache/rocketmq/store/CommitLog.java b/store/src/main/java/org/apache/rocketmq/store/CommitLog.java
index 3d89fe4c519..581ff40afa7 100644
--- a/store/src/main/java/org/apache/rocketmq/store/CommitLog.java
+++ b/store/src/main/java/org/apache/rocketmq/store/CommitLog.java
@@ -159,31 +159,40 @@ public SelectMappedBufferResult getData(final long offset, final boolean returnF
/**
* When the normal exit, data recovery, all memory data have been flush
+ *
*/
public void recoverNormally(long maxPhyOffsetOfConsumeQueue) {
+ // checkCRCOnRecover参数用于在进行文件恢复时查找消息是否验证CRC
boolean checkCRCOnRecover = this.defaultMessageStore.getMessageStoreConfig().isCheckCRCOnRecover();
final List mappedFiles = this.mappedFileQueue.getMappedFiles();
if (!mappedFiles.isEmpty()) {
// Began to recover from the last third file
+ // Broker正常停止再重启时,从倒数第3个文件开始恢复,如果不足3个文件,则从第一个文件开始恢复
int index = mappedFiles.size() - 3;
if (index < 0)
index = 0;
MappedFile mappedFile = mappedFiles.get(index);
ByteBuffer byteBuffer = mappedFile.sliceByteBuffer();
+ // processOffset为CommitLog文件已确认的物理偏移量,等于mappedFile.getFileFromOffset加上mappedFileOffset
long processOffset = mappedFile.getFileFromOffset();
+ // mappedFileOffset为当前文件已校验通过的物理偏移量
long mappedFileOffset = 0;
+ // 遍历CommitLog文件,每次取出一条信息
while (true) {
DispatchRequest dispatchRequest = this.checkMessageAndReturnSize(byteBuffer, checkCRCOnRecover);
int size = dispatchRequest.getMsgSize();
// Normal data
if (dispatchRequest.isSuccess() && size > 0) {
+ // 如果查找结果为true并且消息的长度大于0,表示消息正确,mappedFileOffset指针向前移动本条消息的长度
mappedFileOffset += size;
}
// Come the end of the file, switch to the next file Since the
// return 0 representatives met last hole,
// this can not be included in truncate offset
else if (dispatchRequest.isSuccess() && size == 0) {
+ //如果查找结果为true并且消息的长度等于0,表示已到该文件的末尾,如果还有下一个文件,则重置
+ //processOffset、mappedFileOffset并重复上述步骤,否则跳出循环
index++;
if (index >= mappedFiles.size()) {
// Current branch can not happen
@@ -199,11 +208,13 @@ else if (dispatchRequest.isSuccess() && size == 0) {
}
// Intermediate file read error
else if (!dispatchRequest.isSuccess()) {
+ // 如果查找结果为false,表明该文件未填满所有消息,则跳出循环
log.info("recover physics file end, " + mappedFile.getFileName());
break;
}
}
+
processOffset += mappedFileOffset;
this.mappedFileQueue.setFlushedWhere(processOffset);
this.mappedFileQueue.setCommittedWhere(processOffset);
@@ -384,26 +395,34 @@ public DispatchRequest checkMessageAndReturnSize(java.nio.ByteBuffer byteBuffer,
return new DispatchRequest(-1, false /* success */);
}
+ /**
+ * CommitLog条目是不定长的,每一个条目的长度存储在前4个字节中。
+ * @param sysFlag
+ * @param bodyLength 消息body长度
+ * @param topicLength topic长度
+ * @param propertiesLength properties长度
+ * @return
+ */
protected static int calMsgLength(int sysFlag, int bodyLength, int topicLength, int propertiesLength) {
int bornhostLength = (sysFlag & MessageSysFlag.BORNHOST_V6_FLAG) == 0 ? 8 : 20;
int storehostAddressLength = (sysFlag & MessageSysFlag.STOREHOSTADDRESS_V6_FLAG) == 0 ? 8 : 20;
- final int msgLen = 4 //TOTALSIZE
+ final int msgLen = 4 //TOTALSIZE 消息头部4字节,记录消息条目总长度
+ 4 //MAGICCODE
- + 4 //BODYCRC
- + 4 //QUEUEID
- + 4 //FLAG
- + 8 //QUEUEOFFSET
- + 8 //PHYSICALOFFSET
- + 4 //SYSFLAG
- + 8 //BORNTIMESTAMP
- + bornhostLength //BORNHOST
- + 8 //STORETIMESTAMP
- + storehostAddressLength //STOREHOSTADDRESS
- + 4 //RECONSUMETIMES
- + 8 //Prepared Transaction Offset
- + 4 + (bodyLength > 0 ? bodyLength : 0) //BODY
- + 1 + topicLength //TOPIC
- + 2 + (propertiesLength > 0 ? propertiesLength : 0) //propertiesLength
+ + 4 //BODYCRC 消息体的crc校验码
+ + 4 //QUEUEID 消息消费队列ID
+ + 4 //FLAG 消息标记,RocketMQ对其不做处理,供应用程序使用, 默认4字节。
+ + 8 //QUEUEOFFSET 消息在ConsumeQuene文件中的物理偏移量,8字节
+ + 8 //PHYSICALOFFSET 消息在CommitLog文件中的物理偏移量,8字 节。
+ + 4 //SYSFLAG 消息系统标记,例如是否压缩、是否是事务消息 等,4字节。
+ + 8 //BORNTIMESTAMP 消息生产者调用消息发送API的时间戳,8字 节。
+ + bornhostLength //BORNHOST 消息发送者IP、端口号,8字节。
+ + 8 //STORETIMESTAMP 消息存储时间戳,8字节。
+ + storehostAddressLength //STOREHOSTADDRESS Broker服务器IP+端口号,8字节。
+ + 4 //RECONSUMETIMES 消息重试次数,4字节。
+ + 8 //Prepared Transaction Offset 事务消息的物理偏移量,8字节
+ + 4 + (bodyLength > 0 ? bodyLength : 0) //BODY 消息体长度,4字节 + 消息体内容,长度为bodyLenth中存储的值。
+ + 1 + topicLength //TOPIC 主题存储长度,1字节,表示主题名称不能超 过255个字符 + Topic内容,长度为 topicLength
+ + 2 + (propertiesLength > 0 ? propertiesLength : 0) //propertiesLength 消息属性长度,2字节,表示消息属性长度不能超过65536个字符 + 消息属性内容
+ 0;
return msgLen;
}
@@ -552,6 +571,7 @@ public long getBeginTimeInLock() {
public PutMessageResult putMessage(final MessageExtBrokerInner msg) {
// Set the storage time
+ // 记录消息写入时间
msg.setStoreTimestamp(System.currentTimeMillis());
// Set the message body BODY CRC (consider the most appropriate setting
// on the client)
@@ -573,10 +593,13 @@ public PutMessageResult putMessage(final MessageExtBrokerInner msg) {
msg.setDelayTimeLevel(this.defaultMessageStore.getScheduleMessageService().getMaxDelayLevel());
}
+
topic = ScheduleMessageService.SCHEDULE_TOPIC;
queueId = ScheduleMessageService.delayLevel2QueueId(msg.getDelayTimeLevel());
// Backup real topic, queueId
+ // 如果消息的延迟级别大于0,将消息的原主题名称与原消息队列ID存入消息属性中,用延迟消息主题SCHEDULE_TOPIC_XXXX、消
+ //息队列ID更新原先消息的主题与队列,这是并发消息重试关键的异步
MessageAccessor.putProperty(msg, MessageConst.PROPERTY_REAL_TOPIC, msg.getTopic());
MessageAccessor.putProperty(msg, MessageConst.PROPERTY_REAL_QUEUE_ID, String.valueOf(msg.getQueueId()));
msg.setPropertiesString(MessageDecoder.messageProperties2String(msg.getProperties()));
@@ -599,8 +622,9 @@ public PutMessageResult putMessage(final MessageExtBrokerInner msg) {
long eclipsedTimeInLock = 0;
MappedFile unlockMappedFile = null;
+ // 获取当前可以写入的CommitLog文件,对应 ${ROCKET_HOME}/store/commitlog 文件夹下的文件
MappedFile mappedFile = this.mappedFileQueue.getLastMappedFile();
-
+ // 在将消息写入CommitLog之前,先申请putMessageLock
putMessageLock.lock(); //spin or ReentrantLock ,depending on store config
try {
long beginLockTimestamp = this.defaultMessageStore.getSystemClock().now();
@@ -611,19 +635,24 @@ public PutMessageResult putMessage(final MessageExtBrokerInner msg) {
msg.setStoreTimestamp(beginLockTimestamp);
if (null == mappedFile || mappedFile.isFull()) {
+ //如果mappedFile为空,表明 ${ROCKET_HOME}/store/commitlog目录下不存在任何文件,说明本次
+ //消息是第一次发送,用偏移量0创建第一个CommitLog文件,文件名为00000000000000000000,
mappedFile = this.mappedFileQueue.getLastMappedFile(0); // Mark: NewFile may be cause noise
}
if (null == mappedFile) {
log.error("create mapped file1 error, topic: " + msg.getTopic() + " clientAddr: " + msg.getBornHostString());
beginTimeInLock = 0;
+ // 如果文件创建失败,抛出CREATE_MAPEDFILE_FAILED,这很有可能是磁盘空间不足或权限不够导致的,
return new PutMessageResult(PutMessageStatus.CREATE_MAPEDFILE_FAILED, null);
}
+ // 将消息追加到 CommitLog 中
result = mappedFile.appendMessage(msg, this.appendMessageCallback);
switch (result.getStatus()) {
case PUT_OK:
break;
case END_OF_FILE:
+ // 当前CommitLog文件不够写入此条消息 (CommitLog定长1GB)
unlockMappedFile = mappedFile;
// Create a new file, re-write the message
mappedFile = this.mappedFileQueue.getLastMappedFile(0);
@@ -650,6 +679,7 @@ public PutMessageResult putMessage(final MessageExtBrokerInner msg) {
eclipsedTimeInLock = this.defaultMessageStore.getSystemClock().now() - beginLockTimestamp;
beginTimeInLock = 0;
} finally {
+ // 追加完毕,释放锁
putMessageLock.unlock();
}
@@ -667,7 +697,9 @@ public PutMessageResult putMessage(final MessageExtBrokerInner msg) {
storeStatsService.getSinglePutMessageTopicTimesTotal(msg.getTopic()).incrementAndGet();
storeStatsService.getSinglePutMessageTopicSizeTotal(topic).addAndGet(result.getWroteBytes());
+ // appendMessage只是将消息追加到内存中,需要根据采取的是同步刷盘方式还是异步刷盘方式,将内存中的数据持久化到磁盘中。
handleDiskFlush(result, putMessageResult, msg);
+ // HA主从同步复制
handleHA(result, putMessageResult, msg);
return putMessageResult;
@@ -855,9 +887,16 @@ public long pickupStoreTimestamp(final long offset, final int size) {
return -1;
}
+ /**
+ * 获取当前CommitLog目录的最小偏移量
+ * 由于RocketMQ会定时删除commitLog,所以CommitLog目录的最小偏移量并不一定是0,而是需要拿到第一个文件,获取该文件的起始偏移量
+ * @return
+ */
public long getMinOffset() {
+ // 获取第一个 CommitLog 文件
MappedFile mappedFile = this.mappedFileQueue.getFirstMappedFile();
if (mappedFile != null) {
+ // 如果该文件可用,则返回该文件的起始偏移量,否则返回下一个文件的起始偏移量
if (mappedFile.isAvailable()) {
return mappedFile.getFileFromOffset();
} else {
@@ -868,6 +907,16 @@ public long getMinOffset() {
return -1;
}
+ /**
+ * 根据偏移量与消息长度查找消息。首先根据偏移找到文件所在的
+ * 物理偏移量,然后用offset与文件长度取余,得到在文件内的偏移
+ * 量,从该偏移量读取size长度的内容并返回。如果只根据消息偏移量
+ * 查找消息,则首先找到文件内的偏移量,然后尝试读取4字节,获取消
+ * 息的实际长度,最后读取指定字节。
+ * @param offset
+ * @param size
+ * @return
+ */
public SelectMappedBufferResult getMessage(final long offset, final int size) {
int mappedFileSize = this.defaultMessageStore.getMessageStoreConfig().getMappedFileSizeCommitLog();
MappedFile mappedFile = this.mappedFileQueue.findMappedFileByOffset(offset, offset == 0);
@@ -878,6 +927,13 @@ public SelectMappedBufferResult getMessage(final long offset, final int size) {
return null;
}
+ /**
+ * 根据offset返回下一个文件的起始偏移量。获取一个文件的大
+ * 小,减去offset % mapped-FileSize,回到下一文件的起始偏移量,
+ * 如代码清单4-32所示。
+ * @param offset
+ * @return
+ */
public long rollNextFile(final long offset) {
int mappedFileSize = this.defaultMessageStore.getMessageStoreConfig().getMappedFileSizeCommitLog();
return offset + mappedFileSize - offset % mappedFileSize;
@@ -1209,6 +1265,9 @@ public long getJointime() {
class DefaultAppendMessageCallback implements AppendMessageCallback {
// File at the end of the minimum fixed length empty
+ /**
+ * 每个CommitLog文件最少空闲8字节,高4字节存储当前文件的剩余空间,低4字节存储魔数 CommitLog.BLANK_MAGIC_CODE
+ */
private static final int END_FILE_MIN_BLANK_LENGTH = 4 + 4;
private final ByteBuffer msgIdMemory;
private final ByteBuffer msgIdV6Memory;
@@ -1248,6 +1307,7 @@ public AppendMessageResult doAppend(final long fileFromOffset, final ByteBuffer
this.resetByteBuffer(storeHostHolder, storeHostLength);
String msgId;
+ // 创建全局唯一的消息ID
if ((sysflag & MessageSysFlag.STOREHOSTADDRESS_V6_FLAG) == 0) {
msgId = MessageDecoder.createMessageId(this.msgIdMemory, msgInner.getStoreHostBytes(storeHostHolder), wroteOffset);
} else {
@@ -1255,6 +1315,7 @@ public AppendMessageResult doAppend(final long fileFromOffset, final ByteBuffer
}
// Record ConsumeQueue information
+ // 获取该消息在消息队列的物理偏移量。CommitLog中保存了当前所有消息队列的待写入物理偏移量
keyBuilder.setLength(0);
keyBuilder.append(msgInner.getTopic());
keyBuilder.append('-');
@@ -1286,7 +1347,7 @@ public AppendMessageResult doAppend(final long fileFromOffset, final ByteBuffer
*/
final byte[] propertiesData =
msgInner.getPropertiesString() == null ? null : msgInner.getPropertiesString().getBytes(MessageDecoder.CHARSET_UTF8);
-
+ // properties 长度
final int propertiesLength = propertiesData == null ? 0 : propertiesData.length;
if (propertiesLength > Short.MAX_VALUE) {
@@ -1299,6 +1360,7 @@ public AppendMessageResult doAppend(final long fileFromOffset, final ByteBuffer
final int bodyLength = msgInner.getBody() == null ? 0 : msgInner.getBody().length;
+ // 计算消息长度
final int msgLen = calMsgLength(msgInner.getSysFlag(), bodyLength, topicLength, propertiesLength);
// Exceeds the maximum message
@@ -1309,6 +1371,8 @@ public AppendMessageResult doAppend(final long fileFromOffset, final ByteBuffer
}
// Determines whether there is sufficient free space
+ // 如果消息长度+END_FILE_MIN_BLANK_LENGTH大于CommitLog文件的空闲空间,
+ // 则返回AppendMessageStatus.END_OF_FILE,Broker会创建一个新的CommitLog文件来存储该消息。
if ((msgLen + END_FILE_MIN_BLANK_LENGTH) > maxBlank) {
this.resetByteBuffer(this.msgStoreItemMemory, maxBlank);
// 1 TOTALSIZE
@@ -1369,6 +1433,7 @@ public AppendMessageResult doAppend(final long fileFromOffset, final ByteBuffer
final long beginTimeMills = CommitLog.this.defaultMessageStore.now();
// Write messages to the queue buffer
+ // 将消息内容存储到ByteBuffer中,然后创建AppendMessageResult。这里只是将消息存储在MappedFile对应的内存映射Buffer中,并没有写入磁盘
byteBuffer.put(this.msgStoreItemMemory.array(), 0, msgLen);
AppendMessageResult result = new AppendMessageResult(AppendMessageStatus.PUT_OK, wroteOffset, msgLen, msgId,
@@ -1381,6 +1446,7 @@ public AppendMessageResult doAppend(final long fileFromOffset, final ByteBuffer
case MessageSysFlag.TRANSACTION_NOT_TYPE:
case MessageSysFlag.TRANSACTION_COMMIT_TYPE:
// The next update ConsumeQueue information
+ // 更新消息队列的逻辑偏移量
CommitLog.this.topicQueueTable.put(key, ++queueOffset);
break;
default:
diff --git a/store/src/main/java/org/apache/rocketmq/store/ConsumeQueue.java b/store/src/main/java/org/apache/rocketmq/store/ConsumeQueue.java
index 87ff0a096cf..214f36f4343 100644
--- a/store/src/main/java/org/apache/rocketmq/store/ConsumeQueue.java
+++ b/store/src/main/java/org/apache/rocketmq/store/ConsumeQueue.java
@@ -25,9 +25,28 @@
import org.apache.rocketmq.store.config.BrokerRole;
import org.apache.rocketmq.store.config.StorePathConfigHelper;
+/**
+ * RocketMQ基于主题订阅模式实现消息消费,消费者关心的是一个
+ * 主题下的所有消息,但同一主题的消息是不连续地存储在CommitLog文
+ * 件中的。如果消息消费者直接从消息存储文件中遍历查找订阅主题下
+ * 的消息,效率将极其低下。RocketMQ为了适应消息消费的检索需求,
+ * 设计了ConsumeQueue文件,该文件可以看作CommitLog关于消息消费的
+ * “索引”文件,ConsumeQueue的第一级目录为消息主题,第二级目录
+ * 为主题的消息队列。
+ * $HOME/store/consumequeue/{topic}/{queueId}/{fileName}
+ *
+ * ConsumeQueue 的设计极具技巧,每个条目长度固定(8字节CommitLog物理偏移量、
+ * 4字节消息长度、8字节tag哈希码)。这里不是存储tag的原始字符串,而是存储哈希码,
+ * 目的是确保每个条目的长度固定,可以使用访问类似数组下标的方式快速定位条目,
+ * 极大地提高了ConsumeQueue文件的读取性能。
+ */
public class ConsumeQueue {
private static final InternalLogger log = InternalLoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
+ /**
+ * ConsumeQueue文件中每一个条目是固定长度 20字节
+ * 8字节 CommitLog偏移量 + 4字节消息长度 + 8字节 Tag Hash码
+ */
public static final int CQ_STORE_UNIT_SIZE = 20;
private static final InternalLogger LOG_ERROR = InternalLoggerFactory.getLogger(LoggerName.STORE_ERROR_LOGGER_NAME);
@@ -152,6 +171,11 @@ public void recover() {
}
}
+ /**
+ * 根据消息存储时间来查找具体消息的下标
+ * @param timestamp
+ * @return
+ */
public long getOffsetInQueueByTime(final long timestamp) {
MappedFile mappedFile = this.mappedFileQueue.getMappedFileByTime(timestamp);
if (mappedFile != null) {
@@ -376,6 +400,10 @@ public long getMinOffsetInQueue() {
return this.minLogicOffset / CQ_STORE_UNIT_SIZE;
}
+ /**
+ * 将 CommitLog 分发存储到 ConsumeQueue中,只有执行该方法后,才会将CommitLog索引信息存到ConsumeQueue中,这样消费者才能消费到这条消息
+ * @param request
+ */
public void putMessagePositionInfoWrapper(DispatchRequest request) {
final int maxRetries = 30;
boolean canWrite = this.defaultMessageStore.getRunningFlags().isCQWriteable();
@@ -395,6 +423,7 @@ public void putMessagePositionInfoWrapper(DispatchRequest request) {
topic, queueId, request.getCommitLogOffset());
}
}
+ // 执行分发操作
boolean result = this.putMessagePositionInfo(request.getCommitLogOffset(),
request.getMsgSize(), tagsCode, request.getConsumeQueueOffset());
if (result) {
@@ -429,11 +458,14 @@ private boolean putMessagePositionInfo(final long offset, final int size, final
log.warn("Maybe try to build consume queue repeatedly maxPhysicOffset={} phyOffset={}", maxPhysicOffset, offset);
return true;
}
-
+ // 切换读写模式
this.byteBufferIndex.flip();
this.byteBufferIndex.limit(CQ_STORE_UNIT_SIZE);
+ // 设置消息在CommitLog中的物理偏移量
this.byteBufferIndex.putLong(offset);
+ // 设置消息长度
this.byteBufferIndex.putInt(size);
+ // 设置消息 Tag Hash码
this.byteBufferIndex.putLong(tagsCode);
final long expectLogicOffset = cqOffset * CQ_STORE_UNIT_SIZE;
@@ -471,6 +503,7 @@ private boolean putMessagePositionInfo(final long offset, final int size, final
}
}
this.maxPhysicOffset = offset + size;
+ // 追加到 ConsumeQueue 文件
return mappedFile.appendMessage(this.byteBufferIndex.array());
}
return false;
@@ -488,6 +521,16 @@ private void fillPreBlank(final MappedFile mappedFile, final long untilWhere) {
}
}
+ /**
+ * 根据startIndex获取消息消费队列条目。通过startIndex×20得
+ * 到在ConsumeQueue文件的物理偏移量,如果该偏移量小于
+ * minLogicOffset,则返回null,说明该消息已被删除,如果大于
+ * minLogicOffset,则根据偏移量定位到具体的物理文件。通过将该偏
+ * 移量与物理文件的大小取模获取在该文件的偏移量,从偏移量开始连
+ * 续读取20个字节即可。
+ * @param startIndex
+ * @return
+ */
public SelectMappedBufferResult getIndexBuffer(final long startIndex) {
int mappedFileSize = this.mappedFileSize;
long offset = startIndex * CQ_STORE_UNIT_SIZE;
diff --git a/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java b/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java
index d5ba5692a92..8992df9a80d 100644
--- a/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java
+++ b/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java
@@ -61,33 +61,70 @@
import org.apache.rocketmq.store.schedule.ScheduleMessageService;
import org.apache.rocketmq.store.stats.BrokerStatsManager;
+/**
+ * 消息存储实现类org.apache.rocketmq.store.DefaultMessageStore是存储模块里面最重要的一个类,
+ * 包含了很多对存储文件进行操作的API,其他模块对消息实体的操作都是通过DefaultMessageStore进行的
+ */
public class DefaultMessageStore implements MessageStore {
private static final InternalLogger log = InternalLoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
+ /**
+ * 消息存储配置属性
+ */
private final MessageStoreConfig messageStoreConfig;
+ /**
+ * CommitLog文件的存储实现类
+ */
// CommitLog
private final CommitLog commitLog;
+ /**
+ * 消息队列存储缓存表,按消息主题分组
+ */
private final ConcurrentMap> consumeQueueTable;
+ /**
+ * ConsumeQueue文件刷盘线程
+ */
private final FlushConsumeQueueService flushConsumeQueueService;
+ /**
+ * 清除CommitLog文件服务
+ */
private final CleanCommitLogService cleanCommitLogService;
+ /**
+ * 清除ConsumeQueue文件服务
+ */
private final CleanConsumeQueueService cleanConsumeQueueService;
+ /**
+ * Index文件实现类
+ */
private final IndexService indexService;
+ /**
+ * MappedFile分配服务
+ */
private final AllocateMappedFileService allocateMappedFileService;
+ /**
+ * CommitLog消息分发,根据CommitLog文件构建ConsumeQueue、Index文件。
+ */
private final ReputMessageService reputMessageService;
+ /**
+ * 存储高可用机制
+ */
private final HAService haService;
private final ScheduleMessageService scheduleMessageService;
private final StoreStatsService storeStatsService;
+ /**
+ * 消息堆内存缓存
+ */
private final TransientStorePool transientStorePool;
private final RunningFlags runningFlags = new RunningFlags();
@@ -97,14 +134,23 @@ public class DefaultMessageStore implements MessageStore {
Executors.newSingleThreadScheduledExecutor(new ThreadFactoryImpl("StoreScheduledThread"));
private final BrokerStatsManager brokerStatsManager;
private final MessageArrivingListener messageArrivingListener;
+ /**
+ * Broker配置属性
+ */
private final BrokerConfig brokerConfig;
private volatile boolean shutdown = true;
+ /**
+ * 文件刷盘检测点
+ */
private StoreCheckpoint storeCheckpoint;
private AtomicLong printTimes = new AtomicLong(0);
+ /**
+ * CommitLog文件转发请求
+ */
private final LinkedList dispatcherList;
private RandomAccessFile lockFile;
@@ -177,6 +223,7 @@ public boolean load() {
boolean result = true;
try {
+ // 判断上次broker停止是否是正常停止
boolean lastExitOK = !this.isTempFileExist();
log.info("last shutdown {}", lastExitOK ? "normally" : "abnormally");
@@ -185,17 +232,24 @@ public boolean load() {
}
// load Commit Log
+ // 加载CommitLog文件,加载 ${ROCKET_HOME}/store/commitlog 目录下所有文件并按照文件名进行
+ // 排序。如果文件与配置文件的单个文件大小不一致,将忽略该目录下的所有文件,然后创建MappedFile对象。注意load()方法将
+ //wrotePosition、flushedPosition、committedPosition三个指针都设置为文件大小。
result = result && this.commitLog.load();
// load Consume Queue
+ // 加载 ConsumeQueue
result = result && this.loadConsumeQueue();
if (result) {
+ // 加载并存储checkpoint文件,主要用于记录CommitLog文件、ConsumeQueue文件、Inde文件的刷盘点
this.storeCheckpoint =
new StoreCheckpoint(StorePathConfigHelper.getStoreCheckpoint(this.messageStoreConfig.getStorePathRootDir()));
+ // 加载Index文件,如果上次异常退出,而且Index文件刷盘时间小于该文件最大的消息时间戳,则该文件将立即销毁
this.indexService.load(lastExitOK);
+ // 根据Broker是否为正常停止,执行不同的恢复策略
this.recover(lastExitOK);
log.info("load over, and the max phy offset = {}", this.getMaxPhyOffset());
@@ -232,6 +286,7 @@ public void start() throws Exception {
* 4. Make sure the fall-behind messages to be dispatched before starting the commitlog, especially when the broker role are automatically changed.
*/
long maxPhysicalPosInLogicQueue = commitLog.getMinOffset();
+ // 将ConsumeQueue中最大CommitLog物理偏移量,设置成ReputMessage线程重放的起点
for (ConcurrentMap maps : this.consumeQueueTable.values()) {
for (ConsumeQueue logic : maps.values()) {
if (logic.getMaxPhysicOffset() > maxPhysicalPosInLogicQueue) {
@@ -270,6 +325,8 @@ public void start() throws Exception {
Thread.sleep(1000);
log.info("Try to finish doing reput the messages fall behind during the starting, reputOffset={} maxOffset={} behind={}", this.reputMessageService.getReputFromOffset(), this.getMaxPhyOffset(), this.dispatchBehindBytes());
}
+ // 恢复ConsumeQueue文件后,将在CommitLog实例中保存每个消息消费队列当前的存储逻辑偏移量,这也是消息中不仅存储主
+ // 题、消息队列ID还存储了消息队列偏移量的关键所在。
this.recoverTopicQueueTable();
}
@@ -352,11 +409,13 @@ public void destroyLogics() {
}
public PutMessageResult putMessage(MessageExtBrokerInner msg) {
+ // 如果当前broker停止工作或当前不支持写入,则拒绝消息写入。
if (this.shutdown) {
log.warn("message store has shutdown, so putMessage is forbidden");
return new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null);
}
+ // 如果当前broker是从节点,则拒绝写入消息
if (BrokerRole.SLAVE == this.messageStoreConfig.getBrokerRole()) {
long value = this.printTimes.getAndIncrement();
if ((value % 50000) == 0) {
@@ -366,6 +425,7 @@ public PutMessageResult putMessage(MessageExtBrokerInner msg) {
return new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null);
}
+ // 判断当前broker是否能够写入
if (!this.runningFlags.isWriteable()) {
long value = this.printTimes.getAndIncrement();
if ((value % 50000) == 0) {
@@ -377,21 +437,26 @@ public PutMessageResult putMessage(MessageExtBrokerInner msg) {
this.printTimes.set(0);
}
+ // topic长度大于127字符,则报错
if (msg.getTopic().length() > Byte.MAX_VALUE) {
log.warn("putMessage message topic length too long " + msg.getTopic().length());
return new PutMessageResult(PutMessageStatus.MESSAGE_ILLEGAL, null);
}
+ // 消息属性长度大于 32767,则报错
if (msg.getPropertiesString() != null && msg.getPropertiesString().length() > Short.MAX_VALUE) {
log.warn("putMessage message properties length too long " + msg.getPropertiesString().length());
return new PutMessageResult(PutMessageStatus.PROPERTIES_SIZE_EXCEEDED, null);
}
+ // OSPageCacheBusy通常是出现在操作系统在试图缓存太多页面时。当物理内存充满了,操作系统可能试图清除一些页面来为新的页面腾出空间。
+ // 如果这个过程中所有的页面都在使用(即“繁忙”),那么就会报告OSPageCacheBusy。
if (this.isOSPageCacheBusy()) {
return new PutMessageResult(PutMessageStatus.OS_PAGECACHE_BUSY, null);
}
long beginTime = this.getSystemClock().now();
+ // 写入CommitLog
PutMessageResult result = this.commitLog.putMessage(msg);
long elapsedTime = this.getSystemClock().now() - beginTime;
@@ -1128,6 +1193,15 @@ public MessageExt lookMessageByOffset(long commitLogOffset, int size) {
return null;
}
+ /**
+ * 根据消息主题与队列ID,先获取对应的ConsumeQueue文
+ * 件,其逻辑比较简单,因为每一个消息主题对应一个ConsumeQueue目
+ * 录,主题下每一个消息队列对应一个文件夹,所以取出该文件夹最后
+ * 的ConsumeQueue文件即可
+ * @param topic
+ * @param queueId
+ * @return
+ */
public ConsumeQueue findConsumeQueue(String topic, int queueId) {
ConcurrentMap map = consumeQueueTable.get(topic);
if (null == map) {
@@ -1284,12 +1358,28 @@ private void checkSelf() {
}
}
+ /**
+ * 判断 broker存储目录下,是否有abort文件(${ROCKET_HOME}/store/abort),有则代表上一次broker是非正常停止。
+ *
+ * 其实现机制是Broker在启动时创建${ROCKET_HOME}/store/abort文件,在退出时通过注册JVM钩子
+ * 函数删除abort文件。如果下一次启动时存在abort文件
+ *
+ * @return
+ */
private boolean isTempFileExist() {
String fileName = StorePathConfigHelper.getAbortFile(this.messageStoreConfig.getStorePathRootDir());
File file = new File(fileName);
return file.exists();
}
+ /**
+ * 加载ConsumeQueue文件
+ * 遍历消息消费队列根目录,获取该Broker存储的所有主题,然后
+ * 遍历每个主题目录,获取该主题下的所有消息消费队列,最后分别加
+ * 载每个消息消费队列下的文件,构建ConsumeQueue对象,主要初始化
+ * ConsumeQueue的topic、queueId、storePath、mappedFileSize属性
+ * @return
+ */
private boolean loadConsumeQueue() {
File dirLogic = new File(StorePathConfigHelper.getStorePathConsumeQueue(this.messageStoreConfig.getStorePathRootDir()));
File[] fileTopicList = dirLogic.listFiles();
@@ -1327,12 +1417,28 @@ private boolean loadConsumeQueue() {
return true;
}
+ /**
+ * 根据Broker上一次停止是否为正常停止,执行不同的恢复策略
+ *
+ * 存储启动时所谓的文件恢复主要完成flushedPosition、
+ * committedWhere指针的设置、将消息消费队列最大偏移量加载到内
+ * 存,并删除flushedPosition之后所有的文件。如果Broker异常停止,
+ * 在文件恢复过程中,RocketMQ会将最后一个有效文件中的所有消息重
+ * 新转发到ConsumeQueue和Index文件中,确保不丢失消息,但同时会带
+ * 来消息重复的问题。纵观RocktMQ的整体设计思想,RocketMQ保证消息
+ * 不丢失但不保证消息不会重复消费,故消息消费业务方需要实现消息
+ * 消费的幂等设计
+ *
+ * @param lastExitOK
+ */
private void recover(final boolean lastExitOK) {
long maxPhyOffsetOfConsumeQueue = this.recoverConsumeQueue();
if (lastExitOK) {
+ // 正常停止
this.commitLog.recoverNormally(maxPhyOffsetOfConsumeQueue);
} else {
+ // 异常停止
this.commitLog.recoverAbnormally(maxPhyOffsetOfConsumeQueue);
}
@@ -1372,6 +1478,10 @@ private long recoverConsumeQueue() {
return maxPhysicOffset;
}
+ /**
+ * 恢复ConsumeQueue文件后,将在CommitLog实例中保存每个消息消费队列当前的存储逻辑偏移量,这也是消息中不仅存储主
+ * 题、消息队列ID还存储了消息队列偏移量的关键所在。
+ */
public void recoverTopicQueueTable() {
HashMap table = new HashMap(1024);
long minPhyOffset = this.commitLog.getMinOffset();
@@ -1419,12 +1529,16 @@ public RunningFlags getRunningFlags() {
}
public void doDispatch(DispatchRequest req) {
+ // 不同的文件有不同的转发实现
+ // ConsumeQueue 对应 org.apache.rocketmq.store.DefaultMessageStore.CommitLogDispatcherBuildConsumeQueue
+ // Index 对应 org.apache.rocketmq.store.DefaultMessageStore.CommitLogDispatcherBuildIndex
for (CommitLogDispatcher dispatcher : this.dispatcherList) {
dispatcher.dispatch(req);
}
}
public void putMessagePositionInfo(DispatchRequest dispatchRequest) {
+ // 根据 topic 和 队列ID,获取对应ConsumeQueue
ConsumeQueue cq = this.findConsumeQueue(dispatchRequest.getTopic(), dispatchRequest.getQueueId());
cq.putMessagePositionInfoWrapper(dispatchRequest);
}
@@ -1478,6 +1592,9 @@ public void run() {
}, 6, TimeUnit.SECONDS);
}
+ /**
+ * CommitLog向 ConsumeQueue文件分发实现
+ */
class CommitLogDispatcherBuildConsumeQueue implements CommitLogDispatcher {
@Override
@@ -1486,8 +1603,10 @@ public void dispatch(DispatchRequest request) {
switch (tranType) {
case MessageSysFlag.TRANSACTION_NOT_TYPE:
case MessageSysFlag.TRANSACTION_COMMIT_TYPE:
+ // 将CommitLog分发至ConsumeQueue
DefaultMessageStore.this.putMessagePositionInfo(request);
break;
+ // 事务消息,在未提交,以及回滚操作下,不会将CommitLog中的消息分发到ConsumeQueue
case MessageSysFlag.TRANSACTION_PREPARED_TYPE:
case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE:
break;
@@ -1495,11 +1614,15 @@ public void dispatch(DispatchRequest request) {
}
}
+ /**
+ * CommitLog向Index文件分发的实现
+ */
class CommitLogDispatcherBuildIndex implements CommitLogDispatcher {
@Override
public void dispatch(DispatchRequest request) {
if (DefaultMessageStore.this.messageStoreConfig.isMessageIndexEnable()) {
+ // 只有开启了消息索引开关,才会执行消息索引,默认是开启状态
DefaultMessageStore.this.indexService.buildIndex(request);
}
}
@@ -1774,6 +1897,22 @@ public long getJointime() {
}
}
+ /**
+ * 因为ConsumeQueue文件、Index文件都是基于CommitLog文件构建
+ * 的,所以当消息生产者提交的消息存储到CommitLog文件中时,
+ * ConsumeQueue文件、Index文件需要及时更新,否则消息无法及时被消
+ * 费,根据消息属性查找消息也会出现较大延迟。RocketMQ通过开启一
+ * 个线程ReputMessageServcie来准实时转发CommitLog文件的更新事
+ * 件,相应的任务处理器根据转发的消息及时更新ConsumeQueue文件、
+ * Index文件。
+ *
+ * Broker服务器在启动时会启动ReputMessageService线程,并初始
+ * 化一个非常关键的参数reputFromOffset,该参数的含义是
+ * ReputMessageService从哪个物理偏移量开始转发消息给ConsumeQueue
+ * 和Index文件。如果允许重复转发,将reputFromOffset设置为
+ * CommitLog文件的提交指针。如果不允许重复转发,将
+ * reputFromOffset设置为CommitLog文件的内存中最大偏移量
+ */
class ReputMessageService extends ServiceThread {
private volatile long reputFromOffset = 0;
@@ -1811,6 +1950,11 @@ private boolean isCommitLogAvailable() {
return this.reputFromOffset < DefaultMessageStore.this.commitLog.getMaxOffset();
}
+ /**
+ * ReputMessageService线程每执行一次任务推送,休息1ms后继续
+ * 尝试推送消息到Consume Queue和Index文件中,消息消费转发由
+ * doReput()方法实现
+ */
private void doReput() {
if (this.reputFromOffset < DefaultMessageStore.this.commitLog.getMinOffset()) {
log.warn("The reputFromOffset={} is smaller than minPyOffset={}, this usually indicate that the dispatch behind too much and the commitlog has expired.",
@@ -1824,18 +1968,21 @@ private void doReput() {
break;
}
+ // 返回reputFromOffset偏移量开始的全部有效数据(CommitLog文件)。然后循环读取每一条消息
SelectMappedBufferResult result = DefaultMessageStore.this.commitLog.getData(reputFromOffset);
if (result != null) {
try {
this.reputFromOffset = result.getStartOffset();
for (int readSize = 0; readSize < result.getSize() && doNext; ) {
+ // 从result返回的ByteBuffer中循环读取消息,一次读取一条,创建Dispatch Request对象
DispatchRequest dispatchRequest =
DefaultMessageStore.this.commitLog.checkMessageAndReturnSize(result.getByteBuffer(), false, false);
int size = dispatchRequest.getBufferSize() == -1 ? dispatchRequest.getMsgSize() : dispatchRequest.getBufferSize();
if (dispatchRequest.isSuccess()) {
if (size > 0) {
+ // 执行CommitLog转发
DefaultMessageStore.this.doDispatch(dispatchRequest);
if (BrokerRole.SLAVE != DefaultMessageStore.this.getMessageStoreConfig().getBrokerRole()
@@ -1893,6 +2040,7 @@ public void run() {
while (!this.isStopped()) {
try {
Thread.sleep(1);
+ // 每休息1ms就执行一次消息转发
this.doReput();
} catch (Exception e) {
DefaultMessageStore.log.warn(this.getServiceName() + " service has exception. ", e);
diff --git a/store/src/main/java/org/apache/rocketmq/store/DispatchRequest.java b/store/src/main/java/org/apache/rocketmq/store/DispatchRequest.java
index 89d47ced5ba..21ae8e803e2 100644
--- a/store/src/main/java/org/apache/rocketmq/store/DispatchRequest.java
+++ b/store/src/main/java/org/apache/rocketmq/store/DispatchRequest.java
@@ -19,20 +19,64 @@
import java.util.Map;
public class DispatchRequest {
+
+ /**
+ * 消息主题名称
+ */
private final String topic;
+ /**
+ * 队列ID
+ */
private final int queueId;
+ /**
+ * 消息物理偏移量
+ */
private final long commitLogOffset;
+ /**
+ * 消息长度
+ */
private int msgSize;
+ /**
+ * 消息过滤tag哈希码
+ */
private final long tagsCode;
+ /**
+ * 消息存储时间戳
+ */
private final long storeTimestamp;
+ /**
+ * 消息队列偏移量
+ */
private final long consumeQueueOffset;
+ /**
+ * 消息索引key。多个索引key用空格隔开,例如
+ * key1 key2
+ */
private final String keys;
+ /**
+ * 是否成功解析到完整的消息
+ */
private final boolean success;
+ /**
+ * 消息唯一键
+ */
private final String uniqKey;
+ /**
+ * 消息系统标记
+ */
private final int sysFlag;
+ /**
+ * 消息预处理事务偏移量
+ */
private final long preparedTransactionOffset;
+ /**
+ * 消息属性
+ */
private final Map propertiesMap;
+ /**
+ * 位图
+ */
private byte[] bitMap;
private int bufferSize = -1;//the buffer size maybe larger than the msg size if the message is wrapped by something
diff --git a/store/src/main/java/org/apache/rocketmq/store/MappedFile.java b/store/src/main/java/org/apache/rocketmq/store/MappedFile.java
index 9185d21d874..f5765ad22c5 100644
--- a/store/src/main/java/org/apache/rocketmq/store/MappedFile.java
+++ b/store/src/main/java/org/apache/rocketmq/store/MappedFile.java
@@ -42,27 +42,73 @@
import sun.nio.ch.DirectBuffer;
public class MappedFile extends ReferenceResource {
+
+ /**
+ * 操作系统页大小
+ */
public static final int OS_PAGE_SIZE = 1024 * 4;
protected static final InternalLogger log = InternalLoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
-
+ /**
+ * 当前JVM实例中MappedFile的虚拟内存。
+ */
private static final AtomicLong TOTAL_MAPPED_VIRTUAL_MEMORY = new AtomicLong(0);
-
+ /**
+ * 当前JVM实例中MappedFile对象个数。
+ */
private static final AtomicInteger TOTAL_MAPPED_FILES = new AtomicInteger(0);
+ /**
+ * 当前文件的写指针,从0开始(内存映射文件中的写指针)。
+ */
protected final AtomicInteger wrotePosition = new AtomicInteger(0);
+ /**
+ * 当前文件的提交指针,如果开启transientStore-PoolEnable,则数据会存储在
+ * TransientStorePool中,然后提交到内存映射ByteBuffer中,再写入磁盘。
+ */
protected final AtomicInteger committedPosition = new AtomicInteger(0);
+ /**
+ * 将该指针之前的数据持久化存储到磁盘中。
+ */
private final AtomicInteger flushedPosition = new AtomicInteger(0);
protected int fileSize;
+ /**
+ * 文件通道
+ */
protected FileChannel fileChannel;
/**
* Message will put to here first, and then reput to FileChannel if writeBuffer is not null.
*/
+ /**
+ * 堆外内存ByteBuffer,如果不为空,数据首先将存储在该Buffer中,然后提交到MappedFile创建的
+ * FileChannel中。transientStorePoolEnable为true时不为空。
+ */
protected ByteBuffer writeBuffer = null;
+ /**
+ * 堆外内存池,该内存池中的内存会提供内存锁机制。transientStorePoolEnable为true时启用。
+ */
protected TransientStorePool transientStorePool = null;
+ /**
+ * 文件名称
+ */
private String fileName;
+ /**
+ * 该文件的初始偏移量
+ */
private long fileFromOffset;
+ /**
+ * 物理文件
+ */
private File file;
+ /**
+ * 物理文件对应的内存映射Buffer。
+ */
private MappedByteBuffer mappedByteBuffer;
+ /**
+ * 文件最后一次写入内容的时间
+ */
private volatile long storeTimestamp = 0;
+ /**
+ * 是否是MappedFileQueue队列中第一个文件。
+ */
private boolean firstCreateInQueue = false;
public MappedFile() {
@@ -159,6 +205,7 @@ private void init(final String fileName, final int fileSize) throws IOException
try {
this.fileChannel = new RandomAccessFile(this.file, "rw").getChannel();
+ // 创建内存映射 MappedByteBuffer
this.mappedByteBuffer = this.fileChannel.map(MapMode.READ_WRITE, 0, fileSize);
TOTAL_MAPPED_VIRTUAL_MEMORY.addAndGet(fileSize);
TOTAL_MAPPED_FILES.incrementAndGet();
@@ -200,12 +247,16 @@ public AppendMessageResult appendMessagesInner(final MessageExt messageExt, fina
assert messageExt != null;
assert cb != null;
+ // 首先获取MappedFile当前的写指针,如果currentPos大于或等于文件大小,表明文件已写满
int currentPos = this.wrotePosition.get();
if (currentPos < this.fileSize) {
+ // 通过slice()方法创建一个与原ByteBuffer共享的内存区,且拥有独立的position、limit、capacity等指针(零拷贝)
ByteBuffer byteBuffer = writeBuffer != null ? writeBuffer.slice() : this.mappedByteBuffer.slice();
+ // 并设置position为当前指针
byteBuffer.position(currentPos);
AppendMessageResult result;
+ // 执行写入操作,将消息内容存储到ByteBuffer中,这里只是将消息存储在MappedFile对应的内存映射Buffer中,并没有写入磁盘
if (messageExt instanceof MessageExtBrokerInner) {
result = cb.doAppend(this.getFileFromOffset(), byteBuffer, this.fileSize - currentPos, (MessageExtBrokerInner) messageExt);
} else if (messageExt instanceof MessageExtBatch) {
@@ -278,6 +329,8 @@ public int flush(final int flushLeastPages) {
if (writeBuffer != null || this.fileChannel.position() != 0) {
this.fileChannel.force(false);
} else {
+ // 直接调用mappedByteBuffer或fileChannel的force()方法将数据
+ // 写入磁盘,将内存中的数据持久化到磁盘中,
this.mappedByteBuffer.force();
}
} catch (Throwable e) {
@@ -294,6 +347,11 @@ public int flush(final int flushLeastPages) {
return this.getFlushedPosition();
}
+ /**
+ * 内存映射文件的提交动作由MappedFile的commit()方法实现
+ * @param commitLeastPages 本次提交的最小页数,如果待提交数据不满足commitLeastPages,则不执行本次提交操作,等待下次提交
+ * @return
+ */
public int commit(final int commitLeastPages) {
if (writeBuffer == null) {
//no need to commit data to file channel, so just regard wrotePosition as committedPosition.
@@ -350,6 +408,15 @@ private boolean isAbleToFlush(final int flushLeastPages) {
return write > flush;
}
+ /**
+ * 判断是否执行commit操作。如果文件已满,返回true。如果
+ * commitLeastPages大于0,则计算wrotePosition(当前writeBuffe的
+ * 写指针)与上一次提交的指针(committedPosition)的差值,将其除
+ * 以OS_PAGE_SIZE得到当前脏页的数量,如果大于commitLeastPages,
+ * 则返回true。如果commitLeastPages小于0,表示只要存在脏页就提交
+ * @param commitLeastPages
+ * @return
+ */
protected boolean isAbleToCommit(final int commitLeastPages) {
int flush = this.committedPosition.get();
int write = this.wrotePosition.get();
@@ -398,14 +465,29 @@ public SelectMappedBufferResult selectMappedBuffer(int pos, int size) {
return null;
}
+ /**
+ * 首先查找pos到当前最大可读指针之间的数据,因为在整个写入期
+ * 间都未曾改变MappedByteBuffer的指针,所以
+ * mappedByteBuffer.slice()方法返回的共享缓存区空间为整个
+ * MappedFile。然后通过设置byteBuffer的position为待查找的值,读
+ * 取字节为当前可读字节长度,最终返回的ByteBuffer的limit(可读最
+ * 大长度)为size。整个共享缓存区的容量为
+ * MappedFile#fileSizepos,故在操作SelectMappedBufferResult时不
+ * 能对包含在里面的ByteBuffer调用flip()方法。
+ *
+ * @param pos
+ * @return
+ */
public SelectMappedBufferResult selectMappedBuffer(int pos) {
int readPosition = getReadPosition();
if (pos < readPosition && pos >= 0) {
if (this.hold()) {
ByteBuffer byteBuffer = this.mappedByteBuffer.slice();
+ // 设置读起始指针
byteBuffer.position(pos);
int size = readPosition - pos;
ByteBuffer byteBufferNew = byteBuffer.slice();
+ // 设置读结束指针
byteBufferNew.limit(size);
return new SelectMappedBufferResult(this.fileFromOffset + pos, byteBufferNew, size, this);
}
diff --git a/store/src/main/java/org/apache/rocketmq/store/MappedFileQueue.java b/store/src/main/java/org/apache/rocketmq/store/MappedFileQueue.java
index cc145921cef..1210bb0f064 100644
--- a/store/src/main/java/org/apache/rocketmq/store/MappedFileQueue.java
+++ b/store/src/main/java/org/apache/rocketmq/store/MappedFileQueue.java
@@ -29,21 +29,47 @@
import org.apache.rocketmq.logging.InternalLogger;
import org.apache.rocketmq.logging.InternalLoggerFactory;
+/**
+ * RocketMQ通过使用内存映射文件来提高I/O访问性能,无论是
+ * CommitLog、Consume-Queue还是Index,单个文件都被设计为固定长
+ * 度,一个文件写满以后再创建新文件,文件名就为该文件第一条消息
+ * 对应的全局物理偏移量。
+ * RocketMQ使用MappedFile、MappedFileQueue来封装存储文件。
+ *
+ * MappedFileQueue是MappedFile的管理容器,MappedFileQueue对
+ * 存储目录进行封装,
+ *
+ * 例如CommitLog文件的存储场景下,存储路径为${ROCKET_HOME}/store/commitlog/,
+ * 该目录下会存在多个内存映射文件MappedFile
+ */
public class MappedFileQueue {
private static final InternalLogger log = InternalLoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
private static final InternalLogger LOG_ERROR = InternalLoggerFactory.getLogger(LoggerName.STORE_ERROR_LOGGER_NAME);
private static final int DELETE_FILES_BATCH_MAX = 10;
-
+ /**
+ * 存储目录
+ */
private final String storePath;
-
+ /**
+ * 单个文件的存储大小
+ */
private final int mappedFileSize;
-
+ /**
+ * MappedFile集合
+ */
private final CopyOnWriteArrayList mappedFiles = new CopyOnWriteArrayList();
-
+ /**
+ * 创建MappedFile服务类
+ */
private final AllocateMappedFileService allocateMappedFileService;
-
+ /**
+ * 当前刷盘指针,表示该指针之前的所有数据全部持久化到磁盘
+ */
private long flushedWhere = 0;
+ /**
+ * 当前数据提交指针,内存中ByteBuffer当前的写指针,该值大于、等于flushedWhere
+ */
private long committedWhere = 0;
private volatile long storeTimestamp = 0;
@@ -74,6 +100,13 @@ public void checkSelf() {
}
}
+ /**
+ * 根据消息存储时间戳查找MappdFile。从MappedFile列表中第一个
+ * 文件开始查找,找到第一个最后一次更新时间大于待查找时间戳的文
+ * 件,如果不存在,则返回最后一个MappedFile
+ * @param timestamp
+ * @return
+ */
public MappedFile getMappedFileByTime(final long timestamp) {
Object[] mfs = this.copyMappedFiles(0);
@@ -101,6 +134,18 @@ private Object[] copyMappedFiles(final int reservedMappedFiles) {
return mfs;
}
+ /**
+ * 删除offset之后的所有文件。遍历目录下的文件,如果
+ * 文件的尾部偏移量小于offset则跳过该文件,如果尾部的偏移量大于
+ * offset,则进一步比较offset与文件的开始偏移量。如果offset大于
+ * 文件的起始偏移量,说明当前文件包含了有效偏移量,设置
+ * MappedFile的flushedPosition和committedPosition。如果offset小
+ * 于文件的起始偏移量,说明该文件是有效文件后面创建的,则调用
+ * MappedFile#destory方法释放MappedFile占用的内存资源(内存映射
+ * 与内存通道等),然后加入待删除文件列表中,最终调用
+ * deleteExpiredFile将文件从物理磁盘上删除
+ * @param offset
+ */
public void truncateDirtyFiles(long offset) {
List willRemoveFiles = new ArrayList();
@@ -144,6 +189,12 @@ void deleteExpiredFile(List files) {
}
}
+ /**
+ * 加载CommitLog文件,加载 ${ROCKET_HOME}/store/commitlog 目录下所有文件并按照文件名进行
+ * 排序。如果文件与配置文件的单个文件大小不一致,将忽略该目录下的所有文件,然后创建MappedFile对象。注意load()方法将
+ * wrotePosition、flushedPosition、committedPosition三个指针都设置为文件大小。
+ * @return
+ */
public boolean load() {
File dir = new File(this.storePath);
File[] files = dir.listFiles();
@@ -152,6 +203,7 @@ public boolean load() {
Arrays.sort(files);
for (File file : files) {
+ //
if (file.length() != this.mappedFileSize) {
log.warn(file + "\t" + file.length()
+ " length not matched message store config value, please check it manually");
@@ -285,6 +337,10 @@ public boolean resetOffset(long offset) {
return true;
}
+ /**
+ * 获取存储文件最小偏移量。
+ * @return
+ */
public long getMinOffset() {
if (!this.mappedFiles.isEmpty()) {
@@ -299,6 +355,10 @@ public long getMinOffset() {
return -1;
}
+ /**
+ * 获取存储文件最大偏移量。
+ * @return
+ */
public long getMaxOffset() {
MappedFile mappedFile = getLastMappedFile();
if (mappedFile != null) {
@@ -453,6 +513,13 @@ public boolean commit(final int commitLeastPages) {
}
/**
+ * 根据消息偏移量offset查找MappedFile,但是不能直接使用
+ * offset%mappedFileSize。这是因为使用了内存映射,只要是存在于存
+ * 储目录下的文件,都需要对应创建内存映射文件,RocketMQ采取定时删除存储文件的策略。
+ * 也就是说,在存储文件中,第一个文件不一定是00000000000000000000,因为该文件在某一
+ * 时刻会被删除,所以根据offset定位MappedFile的算法为(int)
+ * ((offset/this.mappedFileSize)-(firstMappedFile.getFileFromOffset()/this.MappedFileSize))
+ *
* Finds a mapped file by offset.
*
* @param offset Offset.
@@ -472,6 +539,7 @@ public MappedFile findMappedFileByOffset(final long offset, final boolean return
this.mappedFileSize,
this.mappedFiles.size());
} else {
+ // 计算文件索引
int index = (int) ((offset / this.mappedFileSize) - (firstMappedFile.getFileFromOffset() / this.mappedFileSize));
MappedFile targetFile = null;
try {
diff --git a/store/src/main/java/org/apache/rocketmq/store/StoreCheckpoint.java b/store/src/main/java/org/apache/rocketmq/store/StoreCheckpoint.java
index 7e6c706942b..c65c57c6e01 100644
--- a/store/src/main/java/org/apache/rocketmq/store/StoreCheckpoint.java
+++ b/store/src/main/java/org/apache/rocketmq/store/StoreCheckpoint.java
@@ -27,13 +27,27 @@
import org.apache.rocketmq.logging.InternalLogger;
import org.apache.rocketmq.logging.InternalLoggerFactory;
+/**
+ * checkpoint(检查点)文件的作用是记录ComitLog、
+ * ConsumeQueue、Index文件的刷盘时间点,文件固定长度为4KB,其中
+ * 只用该文件的前面24字节。
+ */
public class StoreCheckpoint {
private static final InternalLogger log = InternalLoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
private final RandomAccessFile randomAccessFile;
private final FileChannel fileChannel;
private final MappedByteBuffer mappedByteBuffer;
+ /**
+ * CommitLog文件刷盘时间点。
+ */
private volatile long physicMsgTimestamp = 0;
+ /**
+ * ConsumeQueue文件刷盘时间点。
+ */
private volatile long logicsMsgTimestamp = 0;
+ /**
+ * Index文件刷盘时间点。
+ */
private volatile long indexMsgTimestamp = 0;
public StoreCheckpoint(final String scpPath) throws IOException {
diff --git a/store/src/main/java/org/apache/rocketmq/store/TransientStorePool.java b/store/src/main/java/org/apache/rocketmq/store/TransientStorePool.java
index f692a99b1cc..1e3d9cb08e0 100644
--- a/store/src/main/java/org/apache/rocketmq/store/TransientStorePool.java
+++ b/store/src/main/java/org/apache/rocketmq/store/TransientStorePool.java
@@ -28,11 +28,28 @@
import org.apache.rocketmq.store.util.LibC;
import sun.nio.ch.DirectBuffer;
+/**
+ * TransientStorePool即短暂的存储池。RocketMQ单独创建了一个
+ * DirectByteBuffer内存缓存池,用来临时存储数据,数据先写入该内
+ * 存映射中,然后由Commit线程定时将数据从该内存复制到与目标物理
+ * 文件对应的内存映射中。RokcetMQ引入该机制是为了提供一种内存锁
+ * 定,将当前堆外内存一直锁定在内存中,避免被进程将内存交换到磁
+ * 盘中。
+ */
public class TransientStorePool {
private static final InternalLogger log = InternalLoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
+ /**
+ * avaliableBuffers个数,可在broker配置文件中通过transient StorePoolSize进行设置,默认为5
+ */
private final int poolSize;
+ /**
+ * 每个ByteBuffer的大小,默认为mapedFileSizeCommitLog,表明TransientStorePool为CommitLog文件服务。
+ */
private final int fileSize;
+ /**
+ * ByteBuffer容器,双端队列。
+ */
private final Deque availableBuffers;
private final MessageStoreConfig storeConfig;
diff --git a/store/src/main/java/org/apache/rocketmq/store/config/MessageStoreConfig.java b/store/src/main/java/org/apache/rocketmq/store/config/MessageStoreConfig.java
index 7891f71067a..cf4198edaa8 100644
--- a/store/src/main/java/org/apache/rocketmq/store/config/MessageStoreConfig.java
+++ b/store/src/main/java/org/apache/rocketmq/store/config/MessageStoreConfig.java
@@ -107,6 +107,9 @@ public class MessageStoreConfig {
private int maxTransferCountOnMessageInDisk = 8;
@ImportantField
private int accessMessageInMemoryMaxRatio = 40;
+ /**
+ * 是否开启消息索引
+ */
@ImportantField
private boolean messageIndexEnable = true;
private int maxHashSlotNum = 5000000;
diff --git a/store/src/main/java/org/apache/rocketmq/store/index/IndexFile.java b/store/src/main/java/org/apache/rocketmq/store/index/IndexFile.java
index 3d76b73dc28..49c9581d8d3 100644
--- a/store/src/main/java/org/apache/rocketmq/store/index/IndexFile.java
+++ b/store/src/main/java/org/apache/rocketmq/store/index/IndexFile.java
@@ -27,9 +27,27 @@
import org.apache.rocketmq.logging.InternalLoggerFactory;
import org.apache.rocketmq.store.MappedFile;
+/**
+ * ConsumeQueue是RocketMQ专门为消息订阅构建的索引文件,目的
+ * 是提高根据主题与消息队列检索消息的速度。另外,RocketMQ引入哈
+ * 希索引机制为消息建立索引,HashMap的设计包含两个基本点:哈希槽
+ * 与哈希冲突的链表结构。
+ *
+ * Index文件基于物理磁盘文件实现哈希索引。Index文件由40字节的文件头、
+ * 500万个哈希槽、2000万个Index条目组成,每个哈希槽4字节、每个Index
+ * 条目含有20个字节,分别为4字节索引key的哈希码、8字节消息物理偏移量、
+ * 4字节时间戳、4字节的前一个Index条目(哈希冲突的链表结构)。
+ */
public class IndexFile {
private static final InternalLogger log = InternalLoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
+ /**
+ * 一个hash槽为4字节
+ */
private static int hashSlotSize = 4;
+ /**
+ * 一个Index条目为20字节固定长度。分别为4字节索引key的哈希码、8字节消息物理偏移量、
+ * 4字节时间戳、4字节的前一个Index条目(哈希冲突的链表结构)
+ */
private static int indexSize = 20;
private static int invalidIndex = 0;
private final int hashSlotNum;
@@ -89,10 +107,22 @@ public boolean destroy(final long intervalForcibly) {
return this.mappedFile.destroy(intervalForcibly);
}
+ /**
+ * 将消息索引键与消息偏移量的映射关系写入Index的实现
+ * @param key
+ * @param phyOffset
+ * @param storeTimestamp
+ * @return
+ */
public boolean putKey(final String key, final long phyOffset, final long storeTimestamp) {
+ // 当前已使用条目大于、等于允许最大条目数时,返回false,表示当前Index文件已写满。
if (this.indexHeader.getIndexCount() < this.indexNum) {
+ // 如果当前index文件未写满
+ // 计算key的hashcode
int keyHash = indexKeyHashMethod(key);
+ // 根据keyHash对哈希槽数量取余定位到哈希码对应的哈希槽下标
int slotPos = keyHash % this.hashSlotNum;
+ // 根据hash下标,计算hash槽在文件中的绝对偏移量
int absSlotPos = IndexHeader.INDEX_HEADER_SIZE + slotPos * hashSlotSize;
FileLock fileLock = null;
@@ -101,11 +131,13 @@ public boolean putKey(final String key, final long phyOffset, final long storeTi
// fileLock = this.fileChannel.lock(absSlotPos, hashSlotSize,
// false);
+ // 获取hash槽中的数据,代表当前 hash 槽指向的index条目索引
int slotValue = this.mappedByteBuffer.getInt(absSlotPos);
if (slotValue <= invalidIndex || slotValue > this.indexHeader.getIndexCount()) {
slotValue = invalidIndex;
}
+ // 计算待存储消息的时间戳与第一条消息时间戳的差值,并转换成秒,
long timeDiff = storeTimestamp - this.indexHeader.getBeginTimestamp();
timeDiff = timeDiff / 1000;
@@ -118,17 +150,29 @@ public boolean putKey(final String key, final long phyOffset, final long storeTi
timeDiff = 0;
}
+ // 计算Index条目在文件中的绝对位置
int absIndexPos =
IndexHeader.INDEX_HEADER_SIZE + this.hashSlotNum * hashSlotSize
+ this.indexHeader.getIndexCount() * indexSize;
-
+ // 写入 4字节 key hash,值得注意的是,Index文件条目中存储的不是消息索
+ //引key,而是消息属性key的哈希,在根据key查找时需要根据消息物理
+ //偏移量找到消息,进而验证消息key的值。之所以只存储哈希,而不存
+ //储具体的key,是为了将Index条目设计为定长结构,才能方便地检索
+ //与定位条目
this.mappedByteBuffer.putInt(absIndexPos, keyHash);
+ // 写入 8字节 消息物理偏移量
this.mappedByteBuffer.putLong(absIndexPos + 4, phyOffset);
+ // 写入 4字节 时间戳
this.mappedByteBuffer.putInt(absIndexPos + 4 + 8, (int) timeDiff);
+ // 写入 4字节 前一个Index条目(哈希冲突的链表结构)
this.mappedByteBuffer.putInt(absIndexPos + 4 + 8 + 4, slotValue);
+ // 写入 hash 槽中的数据,是当前 index 条目的下表索引
this.mappedByteBuffer.putInt(absSlotPos, this.indexHeader.getIndexCount());
+ // 更新文件索引头信息。如果当前文件只包含一个条目,
+ //则更新beginPhyOffset、beginTimestamp、endPyhOffset、
+ //endTimestamp以及当前文件使用索引条目等信息
if (this.indexHeader.getIndexCount() <= 1) {
this.indexHeader.setBeginPhyOffset(phyOffset);
this.indexHeader.setBeginTimestamp(storeTimestamp);
@@ -186,11 +230,23 @@ public boolean isTimeMatched(final long begin, final long end) {
return result;
}
+ /**
+ * RocketMQ根据索引key查找消息的实现方法
+ * @param phyOffsets 查找到的消息物理偏移量。
+ * @param key 索引key
+ * @param maxNum 本次查找最大消息条数
+ * @param begin 开始时间戳
+ * @param end 结束时间戳
+ * @param lock
+ */
public void selectPhyOffset(final List phyOffsets, final String key, final int maxNum,
final long begin, final long end, boolean lock) {
if (this.mappedFile.hold()) {
+ // 算出 key 的 hash值
int keyHash = indexKeyHashMethod(key);
+ // keyHash对哈希槽数量取余,定位到哈希码对应的哈希槽下标
int slotPos = keyHash % this.hashSlotNum;
+ // 哈希槽的物理地址为IndexHeader(40字节)加上下标乘以每个哈希槽的大小(4字节)得到hash槽在Index文件中的绝对地址
int absSlotPos = IndexHeader.INDEX_HEADER_SIZE + slotPos * hashSlotSize;
FileLock fileLock = null;
@@ -200,6 +256,7 @@ public void selectPhyOffset(final List phyOffsets, final String key, final
// hashSlotSize, true);
}
+ // 获取 hash 槽中的数据,该值为hash槽对应的Index链表中第一个Index条目的索引下标
int slotValue = this.mappedByteBuffer.getInt(absSlotPos);
// if (fileLock != null) {
// fileLock.release();
@@ -208,20 +265,25 @@ public void selectPhyOffset(final List phyOffsets, final String key, final
if (slotValue <= invalidIndex || slotValue > this.indexHeader.getIndexCount()
|| this.indexHeader.getIndexCount() <= 1) {
+ // 代表当前hash槽的Index条目为空,及key未命中任何条目
} else {
for (int nextIndexToRead = slotValue; ; ) {
if (phyOffsets.size() >= maxNum) {
break;
}
+ // 根据Index下标,计算index条目在文件中的绝对地址
int absIndexPos =
IndexHeader.INDEX_HEADER_SIZE + this.hashSlotNum * hashSlotSize
+ nextIndexToRead * indexSize;
+ // 获取Index条目的 key hash
int keyHashRead = this.mappedByteBuffer.getInt(absIndexPos);
+ // 获取消息物理偏移量
long phyOffsetRead = this.mappedByteBuffer.getLong(absIndexPos + 4);
-
+ // 获取时间戳
long timeDiff = (long) this.mappedByteBuffer.getInt(absIndexPos + 4 + 8);
+ // 获取链表中下一个Index条目索引下标
int prevIndexRead = this.mappedByteBuffer.getInt(absIndexPos + 4 + 8 + 4);
if (timeDiff < 0) {
@@ -232,7 +294,7 @@ public void selectPhyOffset(final List phyOffsets, final String key, final
long timeRead = this.indexHeader.getBeginTimestamp() + timeDiff;
boolean timeMatched = (timeRead >= begin) && (timeRead <= end);
-
+ // 判断key hash是否一致 以及 消息存储时间是否满足要求
if (keyHash == keyHashRead && timeMatched) {
phyOffsets.add(phyOffsetRead);
}
@@ -242,7 +304,7 @@ public void selectPhyOffset(final List phyOffsets, final String key, final
|| prevIndexRead == nextIndexToRead || timeRead < begin) {
break;
}
-
+ // 将指针指向链表下一个元素
nextIndexToRead = prevIndexRead;
}
}
diff --git a/store/src/main/java/org/apache/rocketmq/store/index/IndexHeader.java b/store/src/main/java/org/apache/rocketmq/store/index/IndexHeader.java
index 44021cd5895..dc9405bbf21 100644
--- a/store/src/main/java/org/apache/rocketmq/store/index/IndexHeader.java
+++ b/store/src/main/java/org/apache/rocketmq/store/index/IndexHeader.java
@@ -29,12 +29,29 @@ public class IndexHeader {
private static int hashSlotcountIndex = 32;
private static int indexCountIndex = 36;
private final ByteBuffer byteBuffer;
+ /**
+ * Index文件中消息的最小存储时间
+ */
private AtomicLong beginTimestamp = new AtomicLong(0);
+ /**
+ * Index文件中消息的最大存储时间。
+ */
private AtomicLong endTimestamp = new AtomicLong(0);
+ /**
+ * Index文件中消息的最小物理偏移量(CommitLog文件偏移量)
+ */
private AtomicLong beginPhyOffset = new AtomicLong(0);
+ /**
+ * Index文件中消息的最大物理偏移量(CommitLog文件偏移量)。
+ */
private AtomicLong endPhyOffset = new AtomicLong(0);
private AtomicInteger hashSlotCount = new AtomicInteger(0);
-
+ /**
+ * Index条目列表当前已使用的个数,Index条目在Index条目列表中按顺序存储。
+ * 一个Index默认包含500万个哈希槽。哈希槽存储的是落在该哈希
+ * 槽的哈希码最新的Index索引。默认一个Index文件包含2000万个条
+ * 目
+ */
private AtomicInteger indexCount = new AtomicInteger(1);
public IndexHeader(final ByteBuffer byteBuffer) {
diff --git a/store/src/main/java/org/apache/rocketmq/store/index/IndexService.java b/store/src/main/java/org/apache/rocketmq/store/index/IndexService.java
index bf17ecffeaf..a9e72abeb9f 100644
--- a/store/src/main/java/org/apache/rocketmq/store/index/IndexService.java
+++ b/store/src/main/java/org/apache/rocketmq/store/index/IndexService.java
@@ -54,6 +54,13 @@ public IndexService(final DefaultMessageStore store) {
StorePathConfigHelper.getStorePathIndex(store.getMessageStoreConfig().getStorePathRootDir());
}
+ /**
+ * 加载Index文件
+ *
+ * 如果上次异常退出,而且Index文件刷盘时间小于该文件最大的消息时间戳,则该文件将立即销毁
+ * @param lastExitOK
+ * @return
+ */
public boolean load(final boolean lastExitOK) {
File dir = new File(this.storePath);
File[] files = dir.listFiles();
@@ -206,6 +213,9 @@ public void buildIndex(DispatchRequest req) {
String topic = msg.getTopic();
String keys = msg.getKeys();
if (msg.getCommitLogOffset() < endPhyOffset) {
+ // 获取或创建Index文件并获取所有文件最大的物理偏移
+ //量。如果该消息的物理偏移量小于Index文件中的物理偏移量,则说明
+ //是重复数据,忽略本次索引构建
return;
}
@@ -219,6 +229,7 @@ public void buildIndex(DispatchRequest req) {
return;
}
+ // 如果消息的唯一键不为空,则添加到哈希索引中,以便加速根据唯一键检索消息
if (req.getUniqKey() != null) {
indexFile = putKey(indexFile, msg, buildKey(topic, req.getUniqKey()));
if (indexFile == null) {
@@ -227,6 +238,7 @@ public void buildIndex(DispatchRequest req) {
}
}
+ // 构建索引键,RocketMQ支持为同一个消息建立多个索引,多个索引键用空格分开
if (keys != null && keys.length() > 0) {
String[] keyset = keys.split(MessageConst.KEY_SEPARATOR);
for (int i = 0; i < keyset.length; i++) {
From fb223db4736c01d37805d8580fa3e40ade050663 Mon Sep 17 00:00:00 2001
From: "Jindong.Tian"
Date: Thu, 12 Oct 2023 15:35:25 +0800
Subject: [PATCH 04/18] =?UTF-8?q?=E5=AE=8C=E5=96=84=E6=B6=88=E6=81=AF?=
=?UTF-8?q?=E5=AD=98=E5=82=A8=E6=B5=81=E7=A8=8B=E4=BB=A3=E7=A0=81=E6=B3=A8?=
=?UTF-8?q?=E9=87=8A?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../org/apache/rocketmq/store/CommitLog.java | 62 +++++++++++++++++++
.../rocketmq/store/DefaultMessageStore.java | 44 ++++++++++++-
.../org/apache/rocketmq/store/MappedFile.java | 1 +
.../rocketmq/store/MappedFileQueue.java | 3 +
.../store/config/MessageStoreConfig.java | 47 ++++++++++++++
5 files changed, 154 insertions(+), 3 deletions(-)
diff --git a/store/src/main/java/org/apache/rocketmq/store/CommitLog.java b/store/src/main/java/org/apache/rocketmq/store/CommitLog.java
index 581ff40afa7..b66d4f692f4 100644
--- a/store/src/main/java/org/apache/rocketmq/store/CommitLog.java
+++ b/store/src/main/java/org/apache/rocketmq/store/CommitLog.java
@@ -446,6 +446,7 @@ public void recoverAbnormally(long maxPhyOffsetOfConsumeQueue) {
MappedFile mappedFile = null;
for (; index >= 0; index--) {
mappedFile = mappedFiles.get(index);
+ // 判断文件的魔数,如果不是MESSAGE_MAGIC_CODE,则返回false,表示该文件不符合CommitLog文件的存储格式
if (this.isMappedFileMatchedRecover(mappedFile)) {
log.info("recover from this mapped file " + mappedFile.getFileName());
break;
@@ -471,9 +472,11 @@ public void recoverAbnormally(long maxPhyOffsetOfConsumeQueue) {
if (this.defaultMessageStore.getMessageStoreConfig().isDuplicationEnable()) {
if (dispatchRequest.getCommitLogOffset() < this.defaultMessageStore.getConfirmOffset()) {
+ // 遍历MappedFile中的消息,验证消息的合法性,并将消息重新转发到ConsumeQueue与Index文件
this.defaultMessageStore.doDispatch(dispatchRequest);
}
} else {
+ // 遍历MappedFile中的消息,验证消息的合法性,并将消息重新转发到ConsumeQueue与Index文件
this.defaultMessageStore.doDispatch(dispatchRequest);
}
}
@@ -534,9 +537,14 @@ private boolean isMappedFileMatchedRecover(final MappedFile mappedFile) {
int msgStoreTimePos = 4 + 4 + 4 + 4 + 4 + 8 + 8 + 4 + 8 + bornhostLength;
long storeTimestamp = byteBuffer.getLong(msgStoreTimePos);
if (0 == storeTimestamp) {
+ //如果文件中第一条消息的存储时间等于0,则返回false,说明该消息的存储文件中未存储任何消息
return false;
}
+ // 对比文件第一条消息的时间戳与检测点。如果文件第一条消息的时间戳小于文件检测点,说明该文件的部分消息是可靠的,
+ // 则从该文件开始恢复。checkpoint文件中保存了CommitLog、ConsumeQueue、Index的文件刷盘点,RocketMQ默认选择CommitLog文
+ // 件与ConsumeQueue这两个文件的刷盘点中较小值与CommitLog文件第一条消息的时间戳做对比,如果messageIndexEnable为true,表示Index
+ // 文件的刷盘时间点也参与计算
if (this.defaultMessageStore.getMessageStoreConfig().isMessageIndexEnable()
&& this.defaultMessageStore.getMessageStoreConfig().isMessageIndexSafe()) {
if (storeTimestamp <= this.defaultMessageStore.getStoreCheckpoint().getMinTimestampIndex()) {
@@ -705,13 +713,24 @@ public PutMessageResult putMessage(final MessageExtBrokerInner msg) {
return putMessageResult;
}
+ /**
+ * 触发刷盘CommitLog
+ * @param result
+ * @param putMessageResult
+ * @param messageExt
+ */
public void handleDiskFlush(AppendMessageResult result, PutMessageResult putMessageResult, MessageExt messageExt) {
// Synchronization flush
if (FlushDiskType.SYNC_FLUSH == this.defaultMessageStore.getMessageStoreConfig().getFlushDiskType()) {
+ // 如果是同步刷盘
+
final GroupCommitService service = (GroupCommitService) this.flushCommitLogService;
if (messageExt.isWaitStoreMsgOK()) {
+ // 构建GroupCommitRequest同步任务并提交到GroupCommitRequest。
GroupCommitRequest request = new GroupCommitRequest(result.getWroteOffset() + result.getWroteBytes());
+ // 将同步任务GroupCommitRequest提交到GroupCommitService线程
service.putRequest(request);
+ // 等待同步刷盘任务完成,异步刷盘线程刷写完毕后会唤醒当前线程,超时时间默认为5s,如果超时则返回刷盘错误,刷盘成功后正常返回给调用方。
boolean flushOK = request.waitForFlush(this.defaultMessageStore.getMessageStoreConfig().getSyncFlushTimeout());
if (!flushOK) {
log.error("do groupcommit, wait for flush failed, topic: " + messageExt.getTopic() + " tags: " + messageExt.getTags()
@@ -724,9 +743,14 @@ public void handleDiskFlush(AppendMessageResult result, PutMessageResult putMess
}
// Asynchronous flush
else {
+ // 异步刷盘
if (!this.defaultMessageStore.getMessageStoreConfig().isTransientStorePoolEnable()) {
+ // 如果transientStorePoolEnable为false,消息将追加到与物理文件直接映射的内存中,然后写入磁盘
flushCommitLogService.wakeup();
} else {
+ // 如果transientStorePoolEnable为true,RocketMQ会单独申请一个与目标物理文件(CommitLog)同样大
+ // 小的堆外内存,该堆外内存将使用内存锁定,确保不会被置换到虚拟内存中去,消息首先追加到堆外内存,然后提交到与物理文件的内存
+ // 映射中,再经flush操作到磁盘
commitLogService.wakeup();
}
}
@@ -1001,6 +1025,7 @@ abstract class FlushCommitLogService extends ServiceThread {
protected static final int RETRY_TIMES_OVER = 10;
}
+
class CommitRealTimeService extends FlushCommitLogService {
private long lastCommitTimestamp = 0;
@@ -1054,6 +1079,9 @@ public void run() {
}
}
+ /**
+ * CommitLog 内存映射机制定时刷盘线程
+ */
class FlushRealTimeService extends FlushCommitLogService {
private long lastFlushTimestamp = 0;
private long printTimes = 0;
@@ -1062,11 +1090,15 @@ public void run() {
CommitLog.log.info(this.getServiceName() + " service started");
while (!this.isStopped()) {
+ // 默认为false,表示使用await方法等待;如果为true,表示使用Thread.sleep方法等待
boolean flushCommitLogTimed = CommitLog.this.defaultMessageStore.getMessageStoreConfig().isFlushCommitLogTimed();
+ // FlushRealTimeService线程任务运行间隔时间
int interval = CommitLog.this.defaultMessageStore.getMessageStoreConfig().getFlushIntervalCommitLog();
+ // 一次刷盘任务至少包含页数,如果待写入数据不足,小于该参数配置的值,将忽略本次刷盘任务,默认4页
int flushPhysicQueueLeastPages = CommitLog.this.defaultMessageStore.getMessageStoreConfig().getFlushCommitLogLeastPages();
+ // 两次真实刷盘任务的最大间隔时间,默认10s
int flushPhysicQueueThoroughInterval =
CommitLog.this.defaultMessageStore.getMessageStoreConfig().getFlushCommitLogThoroughInterval();
@@ -1075,12 +1107,15 @@ public void run() {
// Print flush progress
long currentTimeMillis = System.currentTimeMillis();
if (currentTimeMillis >= (this.lastFlushTimestamp + flushPhysicQueueThoroughInterval)) {
+ // 如果距上次提交数据的间隔时间超过flushPhysicQueueThoroughInterval,则本次刷盘任务将忽略
+ //flushPhysicQueueLeastPages,也就是如果待写入数据小于指定页数,也执行刷盘操作。
this.lastFlushTimestamp = currentTimeMillis;
flushPhysicQueueLeastPages = 0;
printFlushProgress = (printTimes++ % 10) == 0;
}
try {
+ // 执行一次刷盘任务前先等待指定时间间隔,然后执行刷盘任务
if (flushCommitLogTimed) {
Thread.sleep(interval);
} else {
@@ -1092,9 +1127,11 @@ public void run() {
}
long begin = System.currentTimeMillis();
+ // 执行刷盘操作
CommitLog.this.mappedFileQueue.flush(flushPhysicQueueLeastPages);
long storeTimestamp = CommitLog.this.mappedFileQueue.getStoreTimestamp();
if (storeTimestamp > 0) {
+ // 更新checkpoint中存储的CommitLog文件刷盘时间点。
CommitLog.this.defaultMessageStore.getStoreCheckpoint().setPhysicMsgTimestamp(storeTimestamp);
}
long past = System.currentTimeMillis() - begin;
@@ -1136,8 +1173,15 @@ public long getJointime() {
}
public static class GroupCommitRequest {
+
+ /**
+ * 刷盘点偏移量
+ */
private final long nextOffset;
private final CountDownLatch countDownLatch = new CountDownLatch(1);
+ /**
+ * 刷盘结果
+ */
private volatile boolean flushOK = false;
public GroupCommitRequest(long nextOffset) {
@@ -1168,13 +1212,21 @@ public boolean waitForFlush(long timeout) {
* GroupCommit Service
*/
class GroupCommitService extends FlushCommitLogService {
+
+ /**
+ * 同步刷盘任务暂存容器
+ */
private volatile List requestsWrite = new ArrayList();
+ /**
+ * GroupCommitService线程每次处理的request容器,这是一个设计亮点,避免了任务提交与任务执行的锁冲突
+ */
private volatile List requestsRead = new ArrayList();
public synchronized void putRequest(final GroupCommitRequest request) {
synchronized (this.requestsWrite) {
this.requestsWrite.add(request);
}
+ // 将hasNotified设置为true,这样GroupCommitService线程就会立即执行刷盘操作
if (hasNotified.compareAndSet(false, true)) {
waitPoint.countDown(); // notify
}
@@ -1197,15 +1249,19 @@ private void doCommit() {
flushOK = CommitLog.this.mappedFileQueue.getFlushedWhere() >= req.getNextOffset();
if (!flushOK) {
+ // 同步刷盘
CommitLog.this.mappedFileQueue.flush(0);
}
}
+ // 每执行一次刷盘操作后,立即调用GroupCommitRequest#wakeupCustomer唤醒消息发送线程并通知刷盘结果
req.wakeupCustomer(flushOK);
}
long storeTimestamp = CommitLog.this.mappedFileQueue.getStoreTimestamp();
if (storeTimestamp > 0) {
+ //处理完所有同步刷盘任务后,更新刷盘检测点StoreCheckpoint中的physicMsg Timestamp,但并没有执行检测点的
+ //刷盘操作,检测点的刷盘操作将在刷写消息队列文件时触发
CommitLog.this.defaultMessageStore.getStoreCheckpoint().setPhysicMsgTimestamp(storeTimestamp);
}
@@ -1218,12 +1274,18 @@ private void doCommit() {
}
}
+ /**
+ * GroupCommitService组提交线程,每处理一批刷盘请求后,如果
+ * 后续有待刷盘的请求需要处理(hasNotified=true),组提交线程会马不停蹄地处理下一
+ * 批;如果没有待处理的任务,则休息10ms,即每10ms空转一次
+ */
public void run() {
CommitLog.log.info(this.getServiceName() + " service started");
while (!this.isStopped()) {
try {
this.waitForRunning(10);
+ // 执行刷盘操作
this.doCommit();
} catch (Exception e) {
CommitLog.log.warn(this.getServiceName() + " service has exception. ", e);
diff --git a/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java b/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java
index 8992df9a80d..3f61af65c3a 100644
--- a/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java
+++ b/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java
@@ -1296,6 +1296,9 @@ private void createTempFile() throws IOException {
private void addScheduleTask() {
+ // RocketMQ每隔10s调度一次cleanFilesPeriodically,检测是否需
+ // 要清除过期文件。执行频率可以通过cleanResourceInterval进行设
+ //置,默认10s
this.scheduledExecutorService.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
@@ -1435,10 +1438,10 @@ private void recover(final boolean lastExitOK) {
long maxPhyOffsetOfConsumeQueue = this.recoverConsumeQueue();
if (lastExitOK) {
- // 正常停止
+ // 正常停止,恢复CommitLog
this.commitLog.recoverNormally(maxPhyOffsetOfConsumeQueue);
} else {
- // 异常停止
+ // 异常停止,恢复CommitLog
this.commitLog.recoverAbnormally(maxPhyOffsetOfConsumeQueue);
}
@@ -1628,12 +1631,23 @@ public void dispatch(DispatchRequest request) {
}
}
+ /**
+ * CommitLog过期清理实现
+ */
class CleanCommitLogService {
private final static int MAX_MANUAL_DELETE_FILE_TIMES = 20;
+ /**
+ * 通过系统参数 -Drocketmq.broker.diskSpaceWarningLevelRatio进行设置,
+ * 默认0.90。如果磁盘分区使用率超过该阈值,将设置磁盘为不可写,此时会拒绝写入新消息
+ */
private final double diskSpaceWarningLevelRatio =
Double.parseDouble(System.getProperty("rocketmq.broker.diskSpaceWarningLevelRatio", "0.90"));
+ /**
+ * 通过系统参数 -Drocketmq.broker.diskSpaceCleanForcibly-Ratio进行设置,
+ * 默认0.85。如果磁盘分区使用超过该阈值,建议立即执行过期文件删除,但不会拒绝写入新消息
+ */
private final double diskSpaceCleanForciblyRatio =
Double.parseDouble(System.getProperty("rocketmq.broker.diskSpaceCleanForciblyRatio", "0.85"));
private long lastRedeleteTimestamp = 0;
@@ -1659,14 +1673,24 @@ public void run() {
private void deleteExpiredFiles() {
int deleteCount = 0;
+ // 文件保留时间,如果超过了该时间,则认为是过期文件,可以被删除。单位:小时
long fileReservedTime = DefaultMessageStore.this.getMessageStoreConfig().getFileReservedTime();
+ // 删除物理文件的间隔时间,在一次清除过程中,可能需要被删除的文件不止一个,该值指定两次删除文件的间隔时间
int deletePhysicFilesInterval = DefaultMessageStore.this.getMessageStoreConfig().getDeleteCommitLogFilesInterval();
+ // 在清除过期文件时,如果该文件被其他线程占用(引用次数大于0,比如读取消息),此时会
+ // 阻止此次删除任务,同时在第一次试图删除该文件时记录当前时间戳,destroyMapedFileIntervalForcibly表示第一次拒绝删除之后能
+ // 保留文件的最大时间,在此时间内,同样可以被拒绝删除,超过该时间后,会将引用次数设置为负数,文件将被强制删除
int destroyMapedFileIntervalForcibly = DefaultMessageStore.this.getMessageStoreConfig().getDestroyMapedFileIntervalForcibly();
+ // 指定删除文件的时间点,RocketMQ通过deleteWhen设置每天在固定时间执行一次删除过期文件操作,默认凌晨4点
boolean timeup = this.isTimeToDelete();
+ // 检查磁盘空间是否充足,如果磁盘占用达到阈值(默认90%)则会将broker设置为不可写状态,
+ // 如果磁盘空间不充足(默认使用率达到75%),则返回true,表示应该触发过期文件删除操作
boolean spacefull = this.isSpaceToDelete();
+ // 预留手工触发机制,可以通过调用excuteDeleteFilesManualy方法手工触发删除过期文件的操作,目前RocketMQ暂未封装手工触发文件删除的命令
boolean manualDelete = this.manualDeleteFileSeveralTimes > 0;
+ // 满足任意一种情况,则触发过期文件删除功能
if (timeup || spacefull || manualDelete) {
if (manualDelete)
@@ -1683,6 +1707,7 @@ private void deleteExpiredFiles() {
fileReservedTime *= 60 * 60 * 1000;
+ // 执行删除操作
deleteCount = DefaultMessageStore.this.commitLog.deleteExpiredFile(fileReservedTime, deletePhysicFilesInterval,
destroyMapedFileIntervalForcibly, cleanAtOnce);
if (deleteCount > 0) {
@@ -1719,35 +1744,45 @@ private boolean isTimeToDelete() {
}
private boolean isSpaceToDelete() {
+ // 表示CommitLog文件、ConsumeQueue文件所在磁盘分区的最大使用量,如果超过该值,则需要立即清除过期文件,默认 75%
double ratio = DefaultMessageStore.this.getMessageStoreConfig().getDiskMaxUsedSpaceRatio() / 100.0;
+ // 表示是否需要立即执行清除过期文件的操作
cleanImmediately = false;
+ // CommitLog存储空间计算
{
String storePathPhysic = DefaultMessageStore.this.getMessageStoreConfig().getStorePathCommitLog();
+ // 当前CommitLog目录所在的磁盘分区的磁盘使用率,通过File#getTotalSpace方法获取文件所在磁盘分区的总容量,
+ // 通过File#getFreeSpace方法获取文件所在磁盘分区的剩余容量
double physicRatio = UtilAll.getDiskPartitionSpaceUsedPercent(storePathPhysic);
if (physicRatio > diskSpaceWarningLevelRatio) {
+ // 将broker设置为不可写状态
boolean diskok = DefaultMessageStore.this.runningFlags.getAndMakeDiskFull();
if (diskok) {
DefaultMessageStore.log.error("physic disk maybe full soon " + physicRatio + ", so mark disk full");
}
-
+ // 设置为需要立即清理
cleanImmediately = true;
} else if (physicRatio > diskSpaceCleanForciblyRatio) {
+ // 设置为需要立即清理
cleanImmediately = true;
} else {
+ // 恢复broker为可写状态
boolean diskok = DefaultMessageStore.this.runningFlags.getAndMakeDiskOK();
if (!diskok) {
DefaultMessageStore.log.info("physic disk space OK " + physicRatio + ", so mark disk ok");
}
}
+ // 判断磁盘使用率,是否达到指定值(默认75%)
if (physicRatio < 0 || physicRatio > ratio) {
DefaultMessageStore.log.info("physic disk maybe full soon, so reclaim space, " + physicRatio);
return true;
}
}
+ // ConsumeQueue 存储空间计算
{
String storePathLogics = StorePathConfigHelper
.getStorePathConsumeQueue(DefaultMessageStore.this.getMessageStoreConfig().getStorePathRootDir());
@@ -1786,6 +1821,9 @@ public void setManualDeleteFileSeveralTimes(int manualDeleteFileSeveralTimes) {
}
}
+ /**
+ * ConsumeQueue过期清理实现
+ */
class CleanConsumeQueueService {
private long lastPhysicalMinOffset = 0;
diff --git a/store/src/main/java/org/apache/rocketmq/store/MappedFile.java b/store/src/main/java/org/apache/rocketmq/store/MappedFile.java
index f5765ad22c5..fe22139ba7f 100644
--- a/store/src/main/java/org/apache/rocketmq/store/MappedFile.java
+++ b/store/src/main/java/org/apache/rocketmq/store/MappedFile.java
@@ -526,6 +526,7 @@ public boolean destroy(final long intervalForcibly) {
log.info("close file channel " + this.fileName + " OK");
long beginTime = System.currentTimeMillis();
+ // 删除文件
boolean result = this.file.delete();
log.info("delete file[REF:" + this.getRefCount() + "] " + this.fileName
+ (result ? " OK, " : " Failed, ") + "W:" + this.getWrotePosition() + " M:"
diff --git a/store/src/main/java/org/apache/rocketmq/store/MappedFileQueue.java b/store/src/main/java/org/apache/rocketmq/store/MappedFileQueue.java
index 1210bb0f064..b6e6c21427c 100644
--- a/store/src/main/java/org/apache/rocketmq/store/MappedFileQueue.java
+++ b/store/src/main/java/org/apache/rocketmq/store/MappedFileQueue.java
@@ -409,6 +409,9 @@ public int deleteExpiredFileByTime(final long expiredTime,
for (int i = 0; i < mfsLength; i++) {
MappedFile mappedFile = (MappedFile) mfs[i];
long liveMaxTimestamp = mappedFile.getLastModifiedTimestamp() + expiredTime;
+ // 从倒数第二个文件开始遍历,计算文件的最大存活时间,即文件的最后一次更新时间+文件存活时间(默认
+ // 72小时),如果当前时间大于文件的最大存活时间或需要强制删除文件(当磁盘使用超过设定的阈值)时,执行MappedFile#destory方
+ // 法,清除MappedFile占有的相关资源,如果执行成功,将该文件加入待删除文件列表中,最后统一执行File#delete方法将文件从物理磁盘中删除
if (System.currentTimeMillis() >= liveMaxTimestamp || cleanImmediately) {
if (mappedFile.destroy(intervalForcibly)) {
files.add(mappedFile);
diff --git a/store/src/main/java/org/apache/rocketmq/store/config/MessageStoreConfig.java b/store/src/main/java/org/apache/rocketmq/store/config/MessageStoreConfig.java
index cf4198edaa8..16d48e072f3 100644
--- a/store/src/main/java/org/apache/rocketmq/store/config/MessageStoreConfig.java
+++ b/store/src/main/java/org/apache/rocketmq/store/config/MessageStoreConfig.java
@@ -44,6 +44,9 @@ public class MessageStoreConfig {
// CommitLog flush interval
// flush data to disk
+ /**
+ * ConmmitLog 刷盘线程任务运行间隔时间
+ */
@ImportantField
private int flushIntervalCommitLog = 500;
@@ -68,14 +71,29 @@ public class MessageStoreConfig {
// CommitLog removal interval
private int deleteCommitLogFilesInterval = 100;
// ConsumeQueue removal interval
+ /**
+ * 删除物理文件的间隔时间,在一次清除过程中,可能需要被删除的文件不止一个,该值指定两次删除文件的间隔时间
+ */
private int deleteConsumeQueueFilesInterval = 100;
+ /**
+ * 在清除过期文件时,如果该文件被其他线程占用(引用次数大于0,比如读取消息),此时会
+ * 阻止此次删除任务,同时在第一次试图删除该文件时记录当前时间戳,destroyMapedFileIntervalForcibly表示第一次拒绝删除之后能
+ * 保留文件的最大时间,在此时间内,同样可以被拒绝删除,超过该时间后,会将引用次数设置为负数,文件将被强制删除
+ */
private int destroyMapedFileIntervalForcibly = 1000 * 120;
private int redeleteHangedFileInterval = 1000 * 120;
// When to delete,default is at 4 am
@ImportantField
private String deleteWhen = "04";
+ /**
+ * 表示CommitLog文件、ConsumeQueue文件所在磁盘分区的最大使用量,如果超过该值,则需要立即清除过
+ * 期文件,默认 75,如果超过了 75% 就会触发清理过期文件的动作
+ */
private int diskMaxUsedSpaceRatio = 75;
// The number of hours to keep a log file before deleting it (in hours)
+ /**
+ * 日志文件保存的小时数
+ */
@ImportantField
private int fileReservedTime = 72;
// Flow control for ConsumeQueue
@@ -87,6 +105,9 @@ public class MessageStoreConfig {
// This check adds some overhead,so it may be disabled in cases seeking extreme performance.
private boolean checkCRCOnRecover = true;
// How many pages are to be flushed when flush CommitLog
+ /**
+ * 一次刷盘任务至少包含页数,如果待写入数据不足,小于该参数配置的值,将忽略本次刷盘任务,默认4页
+ */
private int flushCommitLogLeastPages = 4;
// How many pages are to be committed when commit data to file
private int commitCommitLogLeastPages = 4;
@@ -94,6 +115,9 @@ public class MessageStoreConfig {
private int flushLeastPagesWhenWarmMapedFile = 1024 / 4 * 16;
// How many pages are to be flushed when flush ConsumeQueue
private int flushConsumeQueueLeastPages = 2;
+ /**
+ * 两次真实刷盘任务的最大间隔时间,默认10s
+ */
private int flushCommitLogThoroughInterval = 1000 * 10;
private int commitCommitLogThoroughInterval = 200;
private int flushConsumeQueueThoroughInterval = 1000 * 60;
@@ -131,6 +155,10 @@ public class MessageStoreConfig {
private int syncFlushTimeout = 1000 * 5;
private String messageDelayLevel = "1s 5s 10s 30s 1m 2m 3m 4m 5m 6m 7m 8m 9m 10m 20m 30m 1h 2h";
private long flushDelayOffsetInterval = 1000 * 10;
+ /**
+ * 在一些情况下,即使CommitLog文件已经过期,也可能因为某些原因(例如消费者消费速度慢等)而无法被清理。
+ * 此时,如果cleanFileForciblyEnable参数被设置为true,那么RocketMQ将会强制地清理这些文件,从而释放磁盘空间。
+ */
@ImportantField
private boolean cleanFileForciblyEnable = true;
private boolean warmMapedFileEnable = false;
@@ -141,6 +169,25 @@ public class MessageStoreConfig {
private long osPageCacheBusyTimeOutMills = 1000;
private int defaultQueryMaxNum = 32;
+ /**
+ * 是否开启 transientStorePool 机制
+ *
+ * 开启TransientStorePool机制,消息数据首先被写入到堆外内存(也就是TransientStorePool中的DirectByteBuffer)。然后由刷盘线程(CommitRealTimeService)
+ * 在合适的时机把数据从堆外内存复制到内存映射区域(MappedByteBuffer),并调用force()方法将数据写入磁盘。
+ *
+ * 有了 TransientStorePool 的存在,消息可以批量写入内存缓冲区,RocketMQ也就可以有效地控制何时以及如何将脏页(Dirty Page,即已修改但还未写入磁盘的内存页)
+ * 刷写到磁盘,避免了操作系统自动进行的随机性、不可预测的脏页刷写操作,从而提升了I/O性能,特别是在大量写入请求的场景下。
+ *
+ * 值得一提的是,使用TransientStorePool并非没有代价。因为需要额外的一次内存复制操作,即从堆外内存复制到内存映射区域。
+ * 但是在大多数情况下,通过控制脏页刷写带来的性能提升,相比于增加的内存复制开销,更加明显。
+ *
+ * 并且开启transientStorePool机制后,由于消息数据会先写入 堆外内存,然后由特定后台线程(CommitRealTimeService),将堆外内存中的修改
+ * commit 到 内存映射区域,而这一步如果发生 断电、服务宕机,都会产生消息丢失。而传统的异步刷盘,由于消息是直接写入内存映射区域,所以服务宕机
+ * 并不会丢失数据,只有在服务器突然断电时才会丢失少量数据。
+ *
+ * 所以整体来看消息存储可靠性: 同步刷盘(任何情况不会丢失消息)> 异步刷盘+transientStorePool=false(突然断电丢失少量消息)> 异步刷盘+transientStorePool=true(突然断电或服务宕机都会丢失消息)
+ *
+ */
@ImportantField
private boolean transientStorePoolEnable = false;
private int transientStorePoolSize = 5;
From 937b843ec2236b2f613098c5d4e3e37e04de8de4 Mon Sep 17 00:00:00 2001
From: "Jindong.Tian"
Date: Fri, 20 Oct 2023 17:00:35 +0800
Subject: [PATCH 05/18] =?UTF-8?q?RocketMQ=E6=B6=88=E6=81=AF=E6=8B=89?=
=?UTF-8?q?=E5=8F=96=E6=B5=81=E7=A8=8B?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../processor/PullMessageProcessor.java | 13 ++++
.../consumer/DefaultMQPushConsumer.java | 37 +++++++++++
.../rocketmq/client/consumer/MQConsumer.java | 11 ++++
.../client/consumer/MQPushConsumer.java | 5 ++
.../rocketmq/client/consumer/PullResult.java | 9 +++
.../client/impl/FindBrokerResult.java | 10 +++
.../consumer/DefaultMQPushConsumerImpl.java | 44 ++++++++++++-
.../client/impl/consumer/ProcessQueue.java | 64 +++++++++++++++++++
.../client/impl/consumer/PullAPIWrapper.java | 31 +++++++++
.../impl/consumer/PullMessageService.java | 17 +++++
.../client/impl/consumer/PullRequest.java | 16 +++++
.../client/impl/factory/MQClientInstance.java | 3 +
.../impl/producer/DefaultMQProducerImpl.java | 2 +
.../impl/producer/TopicPublishInfo.java | 5 ++
.../client/latency/MQFaultStrategy.java | 2 +-
.../apache/rocketmq/common/BrokerConfig.java | 3 +
.../common/consumer/ConsumeFromWhere.java | 9 +++
.../header/PullMessageRequestHeader.java | 31 +++++++++
.../rocketmq/common/sysflag/PullSysFlag.java | 13 ++++
.../rocketmq/store/DefaultMessageStore.java | 33 ++++++++++
20 files changed, 356 insertions(+), 2 deletions(-)
diff --git a/broker/src/main/java/org/apache/rocketmq/broker/processor/PullMessageProcessor.java b/broker/src/main/java/org/apache/rocketmq/broker/processor/PullMessageProcessor.java
index b4f6daa05eb..26b11647b68 100644
--- a/broker/src/main/java/org/apache/rocketmq/broker/processor/PullMessageProcessor.java
+++ b/broker/src/main/java/org/apache/rocketmq/broker/processor/PullMessageProcessor.java
@@ -67,6 +67,9 @@
import org.apache.rocketmq.store.config.BrokerRole;
import org.apache.rocketmq.store.stats.BrokerStatsManager;
+/**
+ * 消息拉取请求处理类。处理Consumer端拉取消息的请求。
+ */
public class PullMessageProcessor implements NettyRequestProcessor {
private static final InternalLogger log = InternalLoggerFactory.getLogger(LoggerName.BROKER_LOGGER_NAME);
private final BrokerController brokerController;
@@ -98,14 +101,17 @@ private RemotingCommand processRequest(final Channel channel, RemotingCommand re
log.debug("receive PullMessage request command, {}", request);
+ // 判断broker是否可读
if (!PermName.isReadable(this.brokerController.getBrokerConfig().getBrokerPermission())) {
response.setCode(ResponseCode.NO_PERMISSION);
response.setRemark(String.format("the broker[%s] pulling message is forbidden", this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
+ // 获取消费组的配置信息
SubscriptionGroupConfig subscriptionGroupConfig =
this.brokerController.getSubscriptionGroupManager().findSubscriptionGroupConfig(requestHeader.getConsumerGroup());
+
if (null == subscriptionGroupConfig) {
response.setCode(ResponseCode.SUBSCRIPTION_GROUP_NOT_EXIST);
response.setRemark(String.format("subscription group [%s] does not exist, %s", requestHeader.getConsumerGroup(), FAQUrl.suggestTodo(FAQUrl.SUBSCRIPTION_GROUP_NOT_EXIST)));
@@ -147,6 +153,7 @@ private RemotingCommand processRequest(final Channel channel, RemotingCommand re
return response;
}
+ /*********************************构建消息过滤器开始***********************************/
SubscriptionData subscriptionData = null;
ConsumerFilterData consumerFilterData = null;
if (hasSubscriptionFlag) {
@@ -233,10 +240,13 @@ private RemotingCommand processRequest(final Channel channel, RemotingCommand re
messageFilter = new ExpressionMessageFilter(subscriptionData, consumerFilterData,
this.brokerController.getConsumerFilterManager());
}
+ /*********************************构建消息过滤器完毕***********************************/
+ // 读取broker中存储的消息
final GetMessageResult getMessageResult =
this.brokerController.getMessageStore().getMessage(requestHeader.getConsumerGroup(), requestHeader.getTopic(),
requestHeader.getQueueId(), requestHeader.getQueueOffset(), requestHeader.getMaxMsgNums(), messageFilter);
+
if (getMessageResult != null) {
response.setRemark(getMessageResult.getStatus().name());
responseHeader.setNextBeginOffset(getMessageResult.getNextBeginOffset());
@@ -406,6 +416,7 @@ public void operationComplete(ChannelFuture future) throws Exception {
break;
case ResponseCode.PULL_NOT_FOUND:
+ // broker是否开启了长轮询 以及 客户端是否使用长轮询
if (brokerAllowSuspend && hasSuspendFlag) {
long pollingTimeMills = suspendTimeoutMillisLong;
if (!this.brokerController.getBrokerConfig().isLongPollingEnable()) {
@@ -417,6 +428,7 @@ public void operationComplete(ChannelFuture future) throws Exception {
int queueId = requestHeader.getQueueId();
PullRequest pullRequest = new PullRequest(request, channel, pollingTimeMills,
this.brokerController.getMessageStore().now(), offset, subscriptionData, messageFilter);
+ // 每隔5s重试一次
this.brokerController.getPullRequestHoldService().suspendPullRequest(topic, queueId, pullRequest);
response = null;
break;
@@ -464,6 +476,7 @@ public void operationComplete(ChannelFuture future) throws Exception {
storeOffsetEnable = storeOffsetEnable
&& this.brokerController.getMessageStoreConfig().getBrokerRole() != BrokerRole.SLAVE;
if (storeOffsetEnable) {
+ //如果CommitLog标记为可用并且当前节点为主节点,则更新消息消费进度,
this.brokerController.getConsumerOffsetManager().commitOffset(RemotingHelper.parseChannelRemoteAddr(channel),
requestHeader.getConsumerGroup(), requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getCommitOffset());
}
diff --git a/client/src/main/java/org/apache/rocketmq/client/consumer/DefaultMQPushConsumer.java b/client/src/main/java/org/apache/rocketmq/client/consumer/DefaultMQPushConsumer.java
index 339f799f9ac..ce9da315262 100644
--- a/client/src/main/java/org/apache/rocketmq/client/consumer/DefaultMQPushConsumer.java
+++ b/client/src/main/java/org/apache/rocketmq/client/consumer/DefaultMQPushConsumer.java
@@ -70,6 +70,8 @@ public class DefaultMQPushConsumer extends ClientConfig implements MQPushConsume
protected final transient DefaultMQPushConsumerImpl defaultMQPushConsumerImpl;
/**
+ * 消费者所属组
+ *
* Consumers of the same role is required to have exactly same subscriptions and consumerGroup to correctly achieve
* load balance. It's required and needs to be globally unique.
*
@@ -79,6 +81,8 @@ public class DefaultMQPushConsumer extends ClientConfig implements MQPushConsume
private String consumerGroup;
/**
+ * 消息消费模式,分为集群模式、广播模式,默认为集群模式
+ *
* Message model defines the way how messages are delivered to each consumer clients.
*
*
@@ -93,6 +97,16 @@ public class DefaultMQPushConsumer extends ClientConfig implements MQPushConsume
private MessageModel messageModel = MessageModel.CLUSTERING;
/**
+ * 第一次消费时指定消费策略。
+ * CONSUME_FROM_LAST_OFFSET:此处分为两种情况,如果磁盘消息未过期且未被删除,则从最小偏移量开始消费。如果磁盘已过期
+ * 并被删除,则从最大偏移量开始消费。
+ * CONSUME_FROM_FIRST_OFFSET:从队列当前最小偏移量开始消费。
+ * CONSUME_FROM_TIMESTAMP:从消费者指定时间戳开始消费。
+ *
+ * 注意:如果从消息进度服务OffsetStore读取到MessageQueue中的
+ * 偏移量不小于0,则使用读取到的偏移量拉取消息,只有在读到的偏移
+ * 量小于0时,上述策略才会生效
+ *
* Consuming point on consumer booting.
*
*
@@ -134,31 +148,37 @@ public class DefaultMQPushConsumer extends ClientConfig implements MQPushConsume
private String consumeTimestamp = UtilAll.timeMillisToHumanString3(System.currentTimeMillis() - (1000 * 60 * 30));
/**
+ * 集群模式下消息队列的负载策略
* Queue allocation algorithm specifying how message queues are allocated to each consumer clients.
*/
private AllocateMessageQueueStrategy allocateMessageQueueStrategy;
/**
+ * 订阅信息
* Subscription relationship
*/
private Map subscription = new HashMap();
/**
+ * 消息业务监听器
* Message listener
*/
private MessageListener messageListener;
/**
+ * 消息消费进度存储器
* Offset Storage
*/
private OffsetStore offsetStore;
/**
+ * 消费者最小线程数
* Minimum consumer thread number
*/
private int consumeThreadMin = 20;
/**
+ * 消费者最大线程数,因为消费者线程池使用无界队列,所以此参数不生效
* Max consumer thread number
*/
private int consumeThreadMax = 20;
@@ -169,17 +189,25 @@ public class DefaultMQPushConsumer extends ClientConfig implements MQPushConsume
private long adjustThreadPoolNumsThreshold = 100000;
/**
+ * 并发消息消费时处理队列最大跨度,默认2000,表示如果消息处理队列中偏移量最大的消息
+ * 与偏移量最小的消息的跨度超过2000,则延迟50ms后再拉取消息。
* Concurrently max span offset.it has no effect on sequential consumption
*/
private int consumeConcurrentlyMaxSpan = 2000;
/**
+ * 队列级别的流量控制阈值,默认情况下每个消息队列最多缓存1000条消息;
+ *
* Flow control threshold on queue level, each message queue will cache at most 1000 messages by default,
* Consider the {@code pullBatchSize}, the instantaneous value may exceed the limit
*/
private int pullThresholdForQueue = 1000;
/**
+ * 在队列级别限制缓存的消息大小,默认情况下每个消息队列最多缓存100 MiB消息。
+ * 考虑{@code pullBatchSize},瞬时值可能超过限制
+ * 消息的大小仅由消息体来衡量,因此不准确
+ *
* Limit the cached message size on queue level, each message queue will cache at most 100 MiB messages by default,
* Consider the {@code pullBatchSize}, the instantaneous value may exceed the limit
*
@@ -211,21 +239,26 @@ public class DefaultMQPushConsumer extends ClientConfig implements MQPushConsume
private int pullThresholdSizeForTopic = -1;
/**
+ * 推模式下拉取任务的间隔时间,默认一次拉取任务完成后继续拉取
* Message pull Interval
*/
private long pullInterval = 0;
/**
+ * 消息并发消费时一次消费消息的条数,通俗点说,就是每次传入MessageListener#consumeMessage中的消息条数
* Batch consumption size
*/
private int consumeMessageBatchMaxSize = 1;
/**
+ * 每次消息拉取的条数,默认32条
* Batch pull size
*/
private int pullBatchSize = 32;
/**
+ * 是否每次拉取消息都更新订阅信息,默认为false
+ *
* Whether update subscription relationship when every pull
*/
private boolean postSubscriptionWhenPull = false;
@@ -236,6 +269,8 @@ public class DefaultMQPushConsumer extends ClientConfig implements MQPushConsume
private boolean unitMode = false;
/**
+ * 最大消费重试次数。如果消息消费次数超过maxReconsume Times还未成功,则将该消息转移到一个失败队列,等待被删除
+ *
* Max re-consume times. -1 means 16 times.
*
*
@@ -245,11 +280,13 @@ public class DefaultMQPushConsumer extends ClientConfig implements MQPushConsume
private int maxReconsumeTimes = -1;
/**
+ * 延迟将该队列的消息提交到消费者线程的等待时间,默认延迟1s。
* Suspending pulling time for cases requiring slow pulling like flow-control scenario.
*/
private long suspendCurrentQueueTimeMillis = 1000;
/**
+ * 消息消费超时时间,默认为15,单位为分钟
* Maximum amount of time in minutes a message may block the consuming thread.
*/
private long consumeTimeout = 15;
diff --git a/client/src/main/java/org/apache/rocketmq/client/consumer/MQConsumer.java b/client/src/main/java/org/apache/rocketmq/client/consumer/MQConsumer.java
index f4a8eda23a4..1e18ac264b7 100644
--- a/client/src/main/java/org/apache/rocketmq/client/consumer/MQConsumer.java
+++ b/client/src/main/java/org/apache/rocketmq/client/consumer/MQConsumer.java
@@ -28,8 +28,16 @@
* Message queue consumer interface
*/
public interface MQConsumer extends MQAdmin {
+
/**
* If consuming failure,message will be send back to the brokers,and delay consuming some time
+ * 如果消费失败,则将消息发送回broker,并延迟消费一段时间
+ * @param msg
+ * @param delayLevel 消息延迟级别
+ * @throws RemotingException
+ * @throws MQBrokerException
+ * @throws InterruptedException
+ * @throws MQClientException
*/
@Deprecated
void sendMessageBack(final MessageExt msg, final int delayLevel) throws RemotingException,
@@ -37,12 +45,15 @@ void sendMessageBack(final MessageExt msg, final int delayLevel) throws Remoting
/**
* If consuming failure,message will be send back to the broker,and delay consuming some time
+ *
+ * 如果消费失败,则将消息发送回broker,并延迟消费一段时间
*/
void sendMessageBack(final MessageExt msg, final int delayLevel, final String brokerName)
throws RemotingException, MQBrokerException, InterruptedException, MQClientException;
/**
* Fetch message queues from consumer cache according to the topic
+ * 获取消费者对topic分配了哪些消息队列
*
* @param topic message topic
* @return queue set
diff --git a/client/src/main/java/org/apache/rocketmq/client/consumer/MQPushConsumer.java b/client/src/main/java/org/apache/rocketmq/client/consumer/MQPushConsumer.java
index bc6d328c430..ca39a985e24 100644
--- a/client/src/main/java/org/apache/rocketmq/client/consumer/MQPushConsumer.java
+++ b/client/src/main/java/org/apache/rocketmq/client/consumer/MQPushConsumer.java
@@ -22,6 +22,7 @@
import org.apache.rocketmq.client.exception.MQClientException;
/**
+ * 推模式消费者
* Push consumer
*/
public interface MQPushConsumer extends MQConsumer {
@@ -37,6 +38,9 @@ public interface MQPushConsumer extends MQConsumer {
/**
* Register the message listener
+ * 注册并发消息事件监听器
+ *
+ * @param messageListener
*/
@Deprecated
void registerMessageListener(MessageListener messageListener);
@@ -46,6 +50,7 @@ public interface MQPushConsumer extends MQConsumer {
void registerMessageListener(final MessageListenerOrderly messageListener);
/**
+ * 基于主题订阅消息
* Subscribe some topic
*
* @param subExpression subscription expression.it only support or operation such as "tag1 || tag2 || tag3"
if
diff --git a/client/src/main/java/org/apache/rocketmq/client/consumer/PullResult.java b/client/src/main/java/org/apache/rocketmq/client/consumer/PullResult.java
index 30d995270c9..0d77d65529c 100644
--- a/client/src/main/java/org/apache/rocketmq/client/consumer/PullResult.java
+++ b/client/src/main/java/org/apache/rocketmq/client/consumer/PullResult.java
@@ -21,8 +21,17 @@
public class PullResult {
private final PullStatus pullStatus;
+ /**
+ * 下次拉取的偏移量
+ */
private final long nextBeginOffset;
+ /**
+ * 消息队列的最小偏移量
+ */
private final long minOffset;
+ /**
+ * 消息队列的最大偏移量
+ */
private final long maxOffset;
private List msgFoundList;
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/FindBrokerResult.java b/client/src/main/java/org/apache/rocketmq/client/impl/FindBrokerResult.java
index 4367a4c1dbe..0521ac32ef8 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/FindBrokerResult.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/FindBrokerResult.java
@@ -17,8 +17,18 @@
package org.apache.rocketmq.client.impl;
public class FindBrokerResult {
+
+ /**
+ * broker地址
+ */
private final String brokerAddr;
+ /**
+ * 是否是从节点
+ */
private final boolean slave;
+ /**
+ * broker版本
+ */
private final int brokerVersion;
public FindBrokerResult(String brokerAddr, boolean slave) {
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultMQPushConsumerImpl.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultMQPushConsumerImpl.java
index 807e9c6d6fc..5c341cfecb7 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultMQPushConsumerImpl.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultMQPushConsumerImpl.java
@@ -92,6 +92,9 @@ public class DefaultMQPushConsumerImpl implements MQConsumerInner {
* Delay some time when suspend pull service
*/
private static final long PULL_TIME_DELAY_MILLS_WHEN_SUSPEND = 1000;
+ /**
+ * 长轮询的超时时间
+ */
private static final long BROKER_SUSPEND_MAX_TIME_MILLIS = 1000 * 15;
private static final long CONSUMER_TIMEOUT_MILLIS_WHEN_SUSPEND = 1000 * 30;
private final InternalLogger log = ClientLogger.getLog();
@@ -105,6 +108,9 @@ public class DefaultMQPushConsumerImpl implements MQConsumerInner {
private MQClientInstance mQClientFactory;
private PullAPIWrapper pullAPIWrapper;
private volatile boolean pause = false;
+ /**
+ * 是否是顺序消费
+ */
private boolean consumeOrderly = false;
private MessageListener messageListenerInner;
private OffsetStore offsetStore;
@@ -236,7 +242,10 @@ public void pullMessage(final PullRequest pullRequest) {
long cachedMessageCount = processQueue.getMsgCount().get();
long cachedMessageSizeInMiB = processQueue.getMsgSize().get() / (1024 * 1024);
+ // 已缓存消息总数维度的流量控制
if (cachedMessageCount > this.defaultMQPushConsumer.getPullThresholdForQueue()) {
+ // 如果ProcessQueue当前处理的消息条数超过了pullThresholdForQueue=1000,将触发流控,放弃本次拉取任务,并
+ // 且该队列的下一次拉取任务将在50ms后才加入拉取任务队列。
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_FLOW_CONTROL);
if ((queueFlowControlTimes++ % 1000) == 0) {
log.warn(
@@ -246,7 +255,9 @@ public void pullMessage(final PullRequest pullRequest) {
return;
}
+ // 缓存消息大小维度的流量控制
if (cachedMessageSizeInMiB > this.defaultMQPushConsumer.getPullThresholdSizeForQueue()) {
+ // 缓存消息的大小不能超过pullThresholdSizeForQueue,否则触发流控
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_FLOW_CONTROL);
if ((queueFlowControlTimes++ % 1000) == 0) {
log.warn(
@@ -289,8 +300,10 @@ public void pullMessage(final PullRequest pullRequest) {
}
}
+ // 获取主题的订阅信息
final SubscriptionData subscriptionData = this.rebalanceImpl.getSubscriptionInner().get(pullRequest.getMessageQueue().getTopic());
if (null == subscriptionData) {
+ // 拉取该主题的订阅信息,如果为空则结束本次消息拉取,关于该队列的下一次拉取任务将延迟3s执行
this.executePullRequestLater(pullRequest, pullTimeDelayMillsWhenException);
log.warn("find the consumer's subscription failed, {}", pullRequest);
return;
@@ -307,6 +320,7 @@ public void onSuccess(PullResult pullResult) {
switch (pullResult.getPullStatus()) {
case FOUND:
+ // 获取到了消息
long prevRequestOffset = pullRequest.getNextOffset();
pullRequest.setNextOffset(pullResult.getNextBeginOffset());
long pullRT = System.currentTimeMillis() - beginTimestamp;
@@ -315,6 +329,13 @@ public void onSuccess(PullResult pullResult) {
long firstMsgOffset = Long.MAX_VALUE;
if (pullResult.getMsgFoundList() == null || pullResult.getMsgFoundList().isEmpty()) {
+ // 如果msgFoundList为空,则立即将PullReqeuest放入PullMessageService的pullRequestQueue,
+ // 以便PullMessageSerivce能及时唤醒并再次执行消息拉取
+
+ // 为什么PullResult.msgFoundList
+ //还会为空呢?因为RocketMQ根据TAG进行消息过滤时,在服务端只是验
+ //证了TAG的哈希码,所以客户端再次对消息进行过滤时,可能会出现
+ //msgFoundList为空的情况
DefaultMQPushConsumerImpl.this.executePullRequestImmediately(pullRequest);
} else {
firstMsgOffset = pullResult.getMsgFoundList().get(0).getQueueOffset();
@@ -322,7 +343,10 @@ public void onSuccess(PullResult pullResult) {
DefaultMQPushConsumerImpl.this.getConsumerStatsManager().incPullTPS(pullRequest.getConsumerGroup(),
pullRequest.getMessageQueue().getTopic(), pullResult.getMsgFoundList().size());
+ // 将消息存入processQueue
boolean dispatchToConsume = processQueue.putMessage(pullResult.getMsgFoundList());
+
+ // 然后将拉取到的消息提交到Consume MessageService中供消费者消费
DefaultMQPushConsumerImpl.this.consumeMessageService.submitConsumeRequest(
pullResult.getMsgFoundList(),
processQueue,
@@ -402,9 +426,12 @@ public void onException(Throwable e) {
}
};
+ // 是否更新已消费物理偏移量
boolean commitOffsetEnable = false;
+ // 已消费偏移量
long commitOffsetValue = 0L;
if (MessageModel.CLUSTERING == this.defaultMQPushConsumer.getMessageModel()) {
+ // 如果是集群消费,更新已消费偏移量
commitOffsetValue = this.offsetStore.readOffset(pullRequest.getMessageQueue(), ReadOffsetType.READ_FROM_MEMORY);
if (commitOffsetValue > 0) {
commitOffsetEnable = true;
@@ -424,19 +451,26 @@ public void onException(Throwable e) {
int sysFlag = PullSysFlag.buildSysFlag(
commitOffsetEnable, // commitOffset
+ // 是否开启长轮询
true, // suspend
subExpression != null, // subscription
classFilter // class filter
);
try {
+ // 通过远程调用,从broker中拉取消息。拉取成功后,调用 pullCallback.onSuccess 方法
this.pullAPIWrapper.pullKernelImpl(
+ // 需要拉取的消息队列信息
pullRequest.getMessageQueue(),
+ // 消息过滤表达式
subExpression,
subscriptionData.getExpressionType(),
subscriptionData.getSubVersion(),
+ // 拉取的物理偏移量
pullRequest.getNextOffset(),
+ // 拉取的消息数量
this.defaultMQPushConsumer.getPullBatchSize(),
sysFlag,
+ // 已消费偏移量
commitOffsetValue,
BROKER_SUSPEND_MAX_TIME_MILLIS,
CONSUMER_TIMEOUT_MILLIS_WHEN_SUSPEND,
@@ -580,6 +614,7 @@ public synchronized void start() throws MQClientException {
this.defaultMQPushConsumer.changeInstanceNameToPID();
}
+ // 创建MQClientInstance实例。同一个clientId只会创建一个MQClientInstance实例
this.mQClientFactory = MQClientManager.getInstance().getOrCreateMQClientInstance(this.defaultMQPushConsumer, this.rpcHook);
this.rebalanceImpl.setConsumerGroup(this.defaultMQPushConsumer.getConsumerGroup());
@@ -592,9 +627,13 @@ public synchronized void start() throws MQClientException {
this.defaultMQPushConsumer.getConsumerGroup(), isUnitMode());
this.pullAPIWrapper.registerFilterMessageHook(filterMessageHookList);
+ // 初始化消息进度
if (this.defaultMQPushConsumer.getOffsetStore() != null) {
this.offsetStore = this.defaultMQPushConsumer.getOffsetStore();
} else {
+ // 初始化消息进度。如果消息消费采用集群模式,那么消
+ // 息进度存储在Broker上,如果采用广播模式,那么消息消费进度存储
+ // 在消费端
switch (this.defaultMQPushConsumer.getMessageModel()) {
case BROADCASTING:
this.offsetStore = new LocalFileOffsetStore(this.mQClientFactory, this.defaultMQPushConsumer.getConsumerGroup());
@@ -609,6 +648,7 @@ public synchronized void start() throws MQClientException {
}
this.offsetStore.load();
+ // 如果是顺序消费,创建消费端消费线程服务。ConsumeMessageService主要负责消息消费,在内部维护一个线程池
if (this.getMessageListenerInner() instanceof MessageListenerOrderly) {
this.consumeOrderly = true;
this.consumeMessageService =
@@ -621,6 +661,7 @@ public synchronized void start() throws MQClientException {
this.consumeMessageService.start();
+ // 向MQClientInstance注册消费者
boolean registerOK = mQClientFactory.registerConsumer(this.defaultMQPushConsumer.getConsumerGroup(), this);
if (!registerOK) {
this.serviceState = ServiceState.CREATE_JUST;
@@ -629,7 +670,7 @@ public synchronized void start() throws MQClientException {
+ "] has been created before, specify another name please." + FAQUrl.suggestTodo(FAQUrl.GROUP_NAME_DUPLICATE_URL),
null);
}
-
+ // 启动MQClientInstance,JVM中的所有消费者、生产者持有同一个MQClientInstance,MQClientInstance只会启动一次
mQClientFactory.start();
log.info("the consumer [{}] start OK.", this.defaultMQPushConsumer.getConsumerGroup());
this.serviceState = ServiceState.RUNNING;
@@ -842,6 +883,7 @@ private void copySubscription() throws MQClientException {
case BROADCASTING:
break;
case CLUSTERING:
+ // RocketMQ消息重试是以消费组为单位,而不是主题,消息重试主题名为%RETRY%+消费组名
final String retryTopic = MixAll.getRetryTopic(this.defaultMQPushConsumer.getConsumerGroup());
SubscriptionData subscriptionData = FilterAPI.buildSubscriptionData(this.defaultMQPushConsumer.getConsumerGroup(),
retryTopic, SubscriptionData.SUB_ALL);
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ProcessQueue.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ProcessQueue.java
index 092da9aa33e..3a9d5f6abdc 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ProcessQueue.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ProcessQueue.java
@@ -36,6 +36,11 @@
import org.apache.rocketmq.common.protocol.body.ProcessQueueInfo;
/**
+ * ProcessQueue是MessageQueue在消费端的重现、快照。
+ * PullMessageService从消息服务器默认每次拉取32条消息,按消息队
+ * 列偏移量的顺序存放在ProcessQueue中,PullMessageService将消息
+ * 提交到消费者消费线程池,消息成功消费后,再从ProcessQueue中移
+ * 除。
* Queue consumption snapshot
*/
public class ProcessQueue {
@@ -44,34 +49,66 @@ public class ProcessQueue {
public final static long REBALANCE_LOCK_INTERVAL = Long.parseLong(System.getProperty("rocketmq.client.rebalance.lockInterval", "20000"));
private final static long PULL_MAX_IDLE_TIME = Long.parseLong(System.getProperty("rocketmq.client.pull.pullMaxIdleTime", "120000"));
private final InternalLogger log = ClientLogger.getLog();
+ /**
+ * 读写锁,控制多线程并发修改msgTreeMap
+ */
private final ReadWriteLock lockTreeMap = new ReentrantReadWriteLock();
+ /**
+ * 消息存储容器,键为消息在ConsumeQueue中的偏移量
+ */
private final TreeMap msgTreeMap = new TreeMap();
+ /**
+ * ProcessQueue中总消息数
+ */
private final AtomicLong msgCount = new AtomicLong();
private final AtomicLong msgSize = new AtomicLong();
private final Lock lockConsume = new ReentrantLock();
/**
* A subset of msgTreeMap, will only be used when orderly consume
+ *
+ * 用于存储消息消费队列中正在被顺序消费的消息。其键值对的关系为 Offset -> Message Queue,也就是按照消息在 Broker 中存储的物理偏移量进行排序。
*/
private final TreeMap consumingMsgOrderlyTreeMap = new TreeMap();
private final AtomicLong tryUnlockTimes = new AtomicLong(0);
+ /**
+ * 当前ProcessQueue中包含的最大队列偏移量
+ */
private volatile long queueOffsetMax = 0L;
+ /**
+ * 当前ProccesQueue是否被丢弃
+ */
private volatile boolean dropped = false;
+ /**
+ * 上一次开始拉取消息的时间戳
+ */
private volatile long lastPullTimestamp = System.currentTimeMillis();
+ /**
+ * 上一次消费消息的时间戳
+ */
private volatile long lastConsumeTimestamp = System.currentTimeMillis();
private volatile boolean locked = false;
private volatile long lastLockTimestamp = System.currentTimeMillis();
private volatile boolean consuming = false;
private volatile long msgAccCnt = 0;
+ /**
+ * 判断锁是否过期,锁超时时间默认为30s,通过系统参数rocketmq.client.rebalance.lockMaxLiveTime进行设置
+ * @return
+ */
public boolean isLockExpired() {
return (System.currentTimeMillis() - this.lastLockTimestamp) > REBALANCE_LOCK_MAX_LIVE_TIME;
}
+ /**
+ * 判断PullMessageService是否空闲,空闲时间默认120s,通过系统参数rocketmq.client.pull.pullMaxIdleTime进行设置
+ * @return
+ */
public boolean isPullExpired() {
return (System.currentTimeMillis() - this.lastPullTimestamp) > PULL_MAX_IDLE_TIME;
}
/**
+ * 移除消费超时的消息,默认超过15min未消费的消息将延迟3个延迟级别再消费
* @param pushConsumer
*/
public void cleanExpiredMsg(DefaultMQPushConsumer pushConsumer) {
@@ -124,6 +161,11 @@ public void cleanExpiredMsg(DefaultMQPushConsumer pushConsumer) {
}
}
+ /**
+ * 添加消息,PullMessageService拉取消息后,调用该方法将消息添加到ProcessQueue。
+ * @param msgs
+ * @return
+ */
public boolean putMessage(final List msgs) {
boolean dispatchToConsume = false;
try {
@@ -165,6 +207,13 @@ public boolean putMessage(final List msgs) {
return dispatchToConsume;
}
+ /**
+ * 获取当前消息的最大间隔。
+ * getMaxSpan()并不能说明ProceQueue包含的消息个数,但是能说明当
+ * 前处理队列中第一条消息与最后一条消息的偏移量已经超过的消息个
+ * 数
+ * @return
+ */
public long getMaxSpan() {
try {
this.lockTreeMap.readLock().lockInterruptibly();
@@ -257,6 +306,10 @@ public void rollback() {
}
}
+ /**
+ * 清空 consumingMsgOrderlyTreeMap中的消息,代表成功处理这批消息
+ * @return
+ */
public long commit() {
try {
this.lockTreeMap.writeLock().lockInterruptibly();
@@ -266,6 +319,7 @@ public long commit() {
for (MessageExt msg : this.consumingMsgOrderlyTreeMap.values()) {
msgSize.addAndGet(0 - msg.getBody().length);
}
+ // 清空 consumingMsgOrderlyTreeMap 代表这些消息被成功消费
this.consumingMsgOrderlyTreeMap.clear();
if (offset != null) {
return offset + 1;
@@ -280,6 +334,10 @@ public long commit() {
return -1;
}
+ /**
+ * 重新消费这批消息
+ * @param msgs
+ */
public void makeMessageToCosumeAgain(List msgs) {
try {
this.lockTreeMap.writeLock().lockInterruptibly();
@@ -296,6 +354,11 @@ public void makeMessageToCosumeAgain(List msgs) {
}
}
+ /**
+ * 从ProcessQueue中取出batchSize条消息。
+ * @param batchSize
+ * @return
+ */
public List takeMessags(final int batchSize) {
List result = new ArrayList(batchSize);
final long now = System.currentTimeMillis();
@@ -308,6 +371,7 @@ public List takeMessags(final int batchSize) {
Map.Entry entry = this.msgTreeMap.pollFirstEntry();
if (entry != null) {
result.add(entry.getValue());
+ // 将消息加入 consumingMsgOrderlyTreeMap,代表这些消息正在被消费
consumingMsgOrderlyTreeMap.put(entry.getKey(), entry.getValue());
} else {
break;
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/PullAPIWrapper.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/PullAPIWrapper.java
index 1917d27cf15..be1afd00885 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/PullAPIWrapper.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/PullAPIWrapper.java
@@ -139,6 +139,26 @@ public void executeHook(final FilterMessageContext context) {
}
}
+ /**
+ * 通过远程调用,从 broker 拉取消息
+ * @param mq 从哪个队列拉取消息
+ * @param subExpression 消息过滤表达式。
+ * @param expressionType 消息表达式类型,分为TAG、SQL92。
+ * @param subVersion
+ * @param offset 拉取的消息的起始偏移量
+ * @param maxNums 拉取的消息数量
+ * @param sysFlag 表示位,参考: org.apache.rocketmq.common.sysflag.PullSysFlag
+ * @param commitOffset 已消费偏移量
+ * @param brokerSuspendMaxTimeMillis 消息拉取过程中允许Broker挂起的时间,默认15s。
+ * @param timeoutMillis 消息拉取超时时间
+ * @param communicationMode 消息拉取模式,默认为异步拉取。
+ * @param pullCallback 拉取回调
+ * @return
+ * @throws MQClientException
+ * @throws RemotingException
+ * @throws MQBrokerException
+ * @throws InterruptedException
+ */
public PullResult pullKernelImpl(
final MessageQueue mq,
final String subExpression,
@@ -154,6 +174,10 @@ public PullResult pullKernelImpl(
final PullCallback pullCallback
) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
FindBrokerResult findBrokerResult =
+ // 根据brokerName、BrokerId从MQClientInstance中获取
+ //Broker地址,在整个RocketMQ Broker的部署结构中,相同名称的
+ //Broker构成主从结构,其BrokerId会不一样,在每次拉取消息后,会
+ //给出一个建议,下次是从主节点还是从节点拉取
this.mQClientFactory.findBrokerAddressInSubscribe(mq.getBrokerName(),
this.recalculatePullFromWhichNode(mq), false);
if (null == findBrokerResult) {
@@ -179,12 +203,19 @@ public PullResult pullKernelImpl(
}
PullMessageRequestHeader requestHeader = new PullMessageRequestHeader();
+ // 消费组名称
requestHeader.setConsumerGroup(this.consumerGroup);
+ // topic名称
requestHeader.setTopic(mq.getTopic());
+ // 队列ID
requestHeader.setQueueId(mq.getQueueId());
+ // 队列的偏移量
requestHeader.setQueueOffset(offset);
+ // 拉取的消息数量
requestHeader.setMaxMsgNums(maxNums);
+ // 消息拉取的标识位,参考:org.apache.rocketmq.common.sysflag.PullSysFlag
requestHeader.setSysFlag(sysFlagInner);
+ // 已经消费完成的消息偏移量
requestHeader.setCommitOffset(commitOffset);
requestHeader.setSuspendTimeoutMillis(brokerSuspendMaxTimeMillis);
requestHeader.setSubscription(subExpression);
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/PullMessageService.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/PullMessageService.java
index bd46a58859a..5c6e00c68e5 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/PullMessageService.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/PullMessageService.java
@@ -29,6 +29,9 @@
public class PullMessageService extends ServiceThread {
private final InternalLogger log = ClientLogger.getLog();
+ /**
+ * 消息拉取任务阻塞队列
+ */
private final LinkedBlockingQueue pullRequestQueue = new LinkedBlockingQueue();
private final MQClientInstance mQClientFactory;
private final ScheduledExecutorService scheduledExecutorService = Executors
@@ -76,6 +79,15 @@ public ScheduledExecutorService getScheduledExecutorService() {
return scheduledExecutorService;
}
+ /**
+ * 根据消费组名从MQClientInstance中获取消费者的内部实现类
+ * MQConsumerInner,令人意外的是,这里将consumer强制转换为
+ * DefaultMQPushConsumerImpl,也就是PullMessageService,该线程只
+ * 为推模式服务,那拉模式如何拉取消息呢?其实细想也不难理解,对
+ * 于拉模式,RocketMQ只需要提供拉取消息API,再由应用程序调用API
+ *
+ * @param pullRequest
+ */
private void pullMessage(final PullRequest pullRequest) {
final MQConsumerInner consumer = this.mQClientFactory.selectConsumer(pullRequest.getConsumerGroup());
if (consumer != null) {
@@ -90,8 +102,13 @@ private void pullMessage(final PullRequest pullRequest) {
public void run() {
log.info(this.getServiceName() + " service started");
+ // while (!this.isStopped()) 是一种通用的设计技巧,Stopped
+ // 声明为volatile,每执行一次业务逻辑,检测一下其运行状态,可以
+ // 通过其他线程将Stopped设置为true,从而停止该线程
while (!this.isStopped()) {
try {
+ // 从pullRequestQueue中获取一个PullRequest消息拉取任务,
+ // 如果pullRequestQueue为空,则线程将阻塞,直到有拉取任务被放入
PullRequest pullRequest = this.pullRequestQueue.take();
this.pullMessage(pullRequest);
} catch (InterruptedException ignored) {
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/PullRequest.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/PullRequest.java
index 10aded07625..438abba762d 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/PullRequest.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/PullRequest.java
@@ -19,10 +19,26 @@
import org.apache.rocketmq.common.message.MessageQueue;
public class PullRequest {
+
+ /**
+ * 消费者组
+ */
private String consumerGroup;
+ /**
+ * 待拉取消费队列
+ */
private MessageQueue messageQueue;
+ /**
+ * 消息处理队列,从Broker中拉取到的消息会先存入ProccessQueue,然后再提交到消费者消费线程池进行消费
+ */
private ProcessQueue processQueue;
+ /**
+ * 待拉取的MessageQueue偏移量
+ */
private long nextOffset;
+ /**
+ * 是否被锁定
+ */
private boolean lockedFirst = false;
public boolean isLockedFirst() {
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/factory/MQClientInstance.java b/client/src/main/java/org/apache/rocketmq/client/impl/factory/MQClientInstance.java
index 0089df09382..5625b6c68f4 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/factory/MQClientInstance.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/factory/MQClientInstance.java
@@ -115,6 +115,9 @@ public Thread newThread(Runnable r) {
}
});
private final ClientRemotingProcessor clientRemotingProcessor;
+ /**
+ * 消息拉取线程
+ */
private final PullMessageService pullMessageService;
private final RebalanceService rebalanceService;
private final DefaultMQProducer defaultMQProducer;
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/producer/DefaultMQProducerImpl.java b/client/src/main/java/org/apache/rocketmq/client/impl/producer/DefaultMQProducerImpl.java
index 1bdb535b7e2..6404ae2afa1 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/producer/DefaultMQProducerImpl.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/producer/DefaultMQProducerImpl.java
@@ -564,9 +564,11 @@ private SendResult sendDefaultImpl(
MessageQueue mq = null;
Exception exception = null;
SendResult sendResult = null;
+ // 总计发送次数(1+重试次数)
int timesTotal = communicationMode == CommunicationMode.SYNC ? 1 + this.defaultMQProducer.getRetryTimesWhenSendFailed() : 1;
int times = 0;
String[] brokersSent = new String[timesTotal];
+ // 循环指定发送指定次数,直到成功发送后退出循环
for (; times < timesTotal; times++) {
String lastBrokerName = null == mq ? null : mq.getBrokerName();
// 选择一个消息队列
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/producer/TopicPublishInfo.java b/client/src/main/java/org/apache/rocketmq/client/impl/producer/TopicPublishInfo.java
index 94f0d5ebc51..41ddd738e11 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/producer/TopicPublishInfo.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/producer/TopicPublishInfo.java
@@ -79,6 +79,11 @@ public void setHaveTopicRouterInfo(boolean haveTopicRouterInfo) {
this.haveTopicRouterInfo = haveTopicRouterInfo;
}
+ /**
+ * 选择一个队列,如果上一次发送失败,这一次会尽量规避调上一次失败的broker上的队列
+ * @param lastBrokerName 上一次失败的broker
+ * @return
+ */
public MessageQueue selectOneMessageQueue(final String lastBrokerName) {
if (lastBrokerName == null) {
// 在消息发送过程中,可能会多次执行选择消息队列这个方法,
diff --git a/client/src/main/java/org/apache/rocketmq/client/latency/MQFaultStrategy.java b/client/src/main/java/org/apache/rocketmq/client/latency/MQFaultStrategy.java
index f397118b03a..9420881da49 100644
--- a/client/src/main/java/org/apache/rocketmq/client/latency/MQFaultStrategy.java
+++ b/client/src/main/java/org/apache/rocketmq/client/latency/MQFaultStrategy.java
@@ -31,7 +31,7 @@ public class MQFaultStrategy {
* 开启与不开启sendLatencyFaultEnable机制在消息发送时都能规避故
* 障的Broker,那么这两种机制有何区别呢?
*
- * 开启所谓的故障延迟机制,即设置sendLatencyFaultEnable为ture,
+ * 开启所谓的故障延迟机制,即设置sendLatencyFaultEnable为true,
* 其实是一种较为悲观的做法。当消息发送者遇到一次消息发送失败
* 后,就会悲观地认为Broker不可用,在接下来的一段时间内就不再向
* 其发送消息,直接避开该Broker。而不开启延迟规避机制,就只会在
diff --git a/common/src/main/java/org/apache/rocketmq/common/BrokerConfig.java b/common/src/main/java/org/apache/rocketmq/common/BrokerConfig.java
index a7568f0a207..2d5794df014 100644
--- a/common/src/main/java/org/apache/rocketmq/common/BrokerConfig.java
+++ b/common/src/main/java/org/apache/rocketmq/common/BrokerConfig.java
@@ -93,6 +93,9 @@ public class BrokerConfig {
private int filterServerNums = 0;
+ /**
+ * 消息拉取是否开启长轮询
+ */
private boolean longPollingEnable = true;
private long shortPollingTimeMills = 1000;
diff --git a/common/src/main/java/org/apache/rocketmq/common/consumer/ConsumeFromWhere.java b/common/src/main/java/org/apache/rocketmq/common/consumer/ConsumeFromWhere.java
index a33f46568ba..1bd542b7284 100644
--- a/common/src/main/java/org/apache/rocketmq/common/consumer/ConsumeFromWhere.java
+++ b/common/src/main/java/org/apache/rocketmq/common/consumer/ConsumeFromWhere.java
@@ -17,6 +17,9 @@
package org.apache.rocketmq.common.consumer;
public enum ConsumeFromWhere {
+ /**
+ * 此处分为两种情况,如果磁盘消息未过期且未被删除,则从最小偏移量开始消费。如果磁盘已过期并被删除,则从最大偏移量开始消费。
+ */
CONSUME_FROM_LAST_OFFSET,
@Deprecated
@@ -25,6 +28,12 @@ public enum ConsumeFromWhere {
CONSUME_FROM_MIN_OFFSET,
@Deprecated
CONSUME_FROM_MAX_OFFSET,
+ /**
+ * 从队列当前最小偏移量开始消费
+ */
CONSUME_FROM_FIRST_OFFSET,
+ /**
+ * 从消费者指定时间戳开始消费
+ */
CONSUME_FROM_TIMESTAMP,
}
diff --git a/common/src/main/java/org/apache/rocketmq/common/protocol/header/PullMessageRequestHeader.java b/common/src/main/java/org/apache/rocketmq/common/protocol/header/PullMessageRequestHeader.java
index 106e89e511c..70f19c4df38 100644
--- a/common/src/main/java/org/apache/rocketmq/common/protocol/header/PullMessageRequestHeader.java
+++ b/common/src/main/java/org/apache/rocketmq/common/protocol/header/PullMessageRequestHeader.java
@@ -26,26 +26,57 @@
import org.apache.rocketmq.remoting.exception.RemotingCommandException;
public class PullMessageRequestHeader implements CommandCustomHeader {
+
+ /**
+ * 消费组名称
+ */
@CFNotNull
private String consumerGroup;
+ /**
+ * topic名称
+ */
@CFNotNull
private String topic;
+ /**
+ * 队列ID
+ */
@CFNotNull
private Integer queueId;
+ /**
+ * 队列的偏移量
+ */
@CFNotNull
private Long queueOffset;
+ /**
+ * 拉取的消息数量
+ */
@CFNotNull
private Integer maxMsgNums;
+ /**
+ * 消息拉取的标识位,参考:org.apache.rocketmq.common.sysflag.PullSysFlag
+ */
@CFNotNull
private Integer sysFlag;
+ /**
+ * 已经消费完成的消息偏移量
+ */
@CFNotNull
private Long commitOffset;
+ /**
+ * broker暂停最大时间毫秒
+ */
@CFNotNull
private Long suspendTimeoutMillis;
+ /**
+ * 消息过滤表达式
+ */
@CFNullable
private String subscription;
@CFNotNull
private Long subVersion;
+ /**
+ * 参考:org.apache.rocketmq.common.filter.ExpressionType
+ */
private String expressionType;
@Override
diff --git a/common/src/main/java/org/apache/rocketmq/common/sysflag/PullSysFlag.java b/common/src/main/java/org/apache/rocketmq/common/sysflag/PullSysFlag.java
index d476a35b749..b0d8d1c57a8 100644
--- a/common/src/main/java/org/apache/rocketmq/common/sysflag/PullSysFlag.java
+++ b/common/src/main/java/org/apache/rocketmq/common/sysflag/PullSysFlag.java
@@ -17,9 +17,22 @@
package org.apache.rocketmq.common.sysflag;
public class PullSysFlag {
+
+ /**
+ * 是否更新已消费偏移量
+ */
private final static int FLAG_COMMIT_OFFSET = 0x1;
+ /**
+ * 表示消息拉取时是否支持挂起
+ */
private final static int FLAG_SUSPEND = 0x1 << 1;
+ /**
+ * 消息过滤机制为表达式,则设置该标记位
+ */
private final static int FLAG_SUBSCRIPTION = 0x1 << 2;
+ /**
+ * 消息过滤机制为类模式,则设置该标记
+ */
private final static int FLAG_CLASS_FILTER = 0x1 << 3;
public static int buildSysFlag(final boolean commitOffset, final boolean suspend,
diff --git a/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java b/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java
index 3f61af65c3a..6a3b262f2ff 100644
--- a/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java
+++ b/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java
@@ -550,6 +550,16 @@ public CommitLog getCommitLog() {
return commitLog;
}
+ /**
+ * 获取消息
+ * @param group 消费组名称
+ * @param topic topic名称.
+ * @param queueId 队列ID.
+ * @param offset 待拉取偏移量.
+ * @param maxMsgNums 最大拉取消息条数.
+ * @param messageFilter 消息过滤器.
+ * @return
+ */
public GetMessageResult getMessage(final String group, final String topic, final int queueId, final long offset,
final int maxMsgNums,
final MessageFilter messageFilter) {
@@ -566,12 +576,16 @@ public GetMessageResult getMessage(final String group, final String topic, final
long beginTime = this.getSystemClock().now();
GetMessageStatus status = GetMessageStatus.NO_MESSAGE_IN_QUEUE;
+ // 待查找队列的偏移量
long nextBeginOffset = offset;
+ // 当前消息队列的最小偏移量
long minOffset = 0;
+ // 当前消息队列的最大偏移量
long maxOffset = 0;
GetMessageResult getResult = new GetMessageResult();
+ // 当前CommitLog文件的最大偏移量
final long maxOffsetPy = this.commitLog.getMaxOffset();
ConsumeQueue consumeQueue = findConsumeQueue(topic, queueId);
@@ -580,15 +594,32 @@ public GetMessageResult getMessage(final String group, final String topic, final
maxOffset = consumeQueue.getMaxOffsetInQueue();
if (maxOffset == 0) {
+ //表示当前消费队列中没有消息,拉取结果为
+ //NO_MESSAGE_IN_QUEUE。如果当前Broker为主节点,下次拉取偏移量为
+ //0。如果当前Broker为从节点并且offsetCheckInSlave为true,设置下
+ //次拉取偏移量为0。其他情况下次拉取时使用原偏移量
status = GetMessageStatus.NO_MESSAGE_IN_QUEUE;
nextBeginOffset = nextOffsetCorrection(offset, 0);
} else if (offset < minOffset) {
+ //表示待拉取消息偏移量小于队列的起始偏
+ //移量,拉取结果为OFFSET_TOO_SMALL。如果当前Broker为主节点,下
+ //次拉取偏移量为队列的最小偏移量。如果当前Broker为从节点并且
+ //offsetCheckInSlave为true,下次拉取偏移量为队列的最小偏移量。
+ //其他情况下次拉取时使用原偏移量。
status = GetMessageStatus.OFFSET_TOO_SMALL;
nextBeginOffset = nextOffsetCorrection(offset, minOffset);
} else if (offset == maxOffset) {
+ // 如果待拉取偏移量等于队列最大偏移
+ //量,拉取结果为OFFSET_OVERFLOW_ONE,则下次拉取偏移量依然为
+ //offset。
status = GetMessageStatus.OFFSET_OVERFLOW_ONE;
nextBeginOffset = nextOffsetCorrection(offset, offset);
} else if (offset > maxOffset) {
+ // 表示偏移量越界,拉取结果为
+ //OFFSET_OVERFLOW_BADLY。此时需要考虑当前队列的偏移量是否为0,
+ //如果当前队列的最小偏移量为0,则使用最小偏移量纠正下次拉取偏移
+ //量。如果当前队列的最小偏移量不为0,则使用该队列的最大偏移量来
+ //纠正下次拉取偏移量
status = GetMessageStatus.OFFSET_OVERFLOW_BADLY;
if (0 == minOffset) {
nextBeginOffset = nextOffsetCorrection(offset, minOffset);
@@ -2023,8 +2054,10 @@ private void doReput() {
// 执行CommitLog转发
DefaultMessageStore.this.doDispatch(dispatchRequest);
+ // 如果开启了长轮询
if (BrokerRole.SLAVE != DefaultMessageStore.this.getMessageStoreConfig().getBrokerRole()
&& DefaultMessageStore.this.brokerConfig.isLongPollingEnable()) {
+ // 有新消息到达,唤醒长轮询等待的线程,返回消息
DefaultMessageStore.this.messageArrivingListener.arriving(dispatchRequest.getTopic(),
dispatchRequest.getQueueId(), dispatchRequest.getConsumeQueueOffset() + 1,
dispatchRequest.getTagsCode(), dispatchRequest.getStoreTimestamp(),
From 0a4d238d24970df62549addcb0b1e31db436ca19 Mon Sep 17 00:00:00 2001
From: "Jindong.Tian"
Date: Sat, 28 Oct 2023 12:01:44 +0800
Subject: [PATCH 06/18] =?UTF-8?q?'RocketMQ=E6=B6=88=E6=81=AF=E6=8B=89?=
=?UTF-8?q?=E5=8F=96=E5=92=8C=E6=B6=88=E8=B4=B9=E6=B5=81=E7=A8=8B=E4=BB=A3?=
=?UTF-8?q?=E7=A0=81=E6=B3=A8=E9=87=8A'?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
README.md | 75 +++---------------
.../processor/SendMessageProcessor.java | 22 ++++-
.../AllocateMessageQueueStrategy.java | 1 +
.../AllocateMessageQueueAveragely.java | 7 ++
...AllocateMessageQueueAveragelyByCircle.java | 9 +++
.../AllocateMessageQueueByConfig.java | 3 +
.../AllocateMessageQueueByMachineRoom.java | 1 +
.../AllocateMessageQueueConsistentHash.java | 1 +
.../rocketmq/client/impl/MQClientAPIImpl.java | 19 +++++
.../ConsumeMessageConcurrentlyService.java | 65 ++++++++++++++-
.../ConsumeMessageOrderlyService.java | 3 +
.../impl/consumer/ConsumeMessageService.java | 23 ++++++
.../consumer/DefaultMQPushConsumerImpl.java | 1 +
.../client/impl/consumer/ProcessQueue.java | 5 ++
.../client/impl/consumer/RebalanceImpl.java | 16 +++-
.../impl/consumer/RebalancePushImpl.java | 7 ++
.../impl/consumer/RebalanceService.java | 7 ++
.../client/impl/factory/MQClientInstance.java | 2 +
.../common/consumer/ConsumeFromWhere.java | 11 ++-
.../ConsumerSendMsgBackRequestHeader.java | 13 +++
.../subscription/SubscriptionGroupConfig.java | 40 ++++++++--
...0\350\264\271\346\265\201\347\250\213.png" | Bin 0 -> 202248 bytes
22 files changed, 252 insertions(+), 79 deletions(-)
create mode 100644 "images/RocketMQ\346\266\210\346\201\257\346\213\211\345\217\226\345\222\214\346\266\210\350\264\271\346\265\201\347\250\213.png"
diff --git a/README.md b/README.md
index 33b42800cf3..d52b1ce9355 100644
--- a/README.md
+++ b/README.md
@@ -1,73 +1,16 @@
-## Apache RocketMQ [](https://travis-ci.org/apache/rocketmq) [](https://coveralls.io/github/apache/rocketmq?branch=master)
-[](http://search.maven.org/#search%7Cga%7C1%7Corg.apache.rocketmq)
-[](https://rocketmq.apache.org/dowloading/releases)
-[](https://www.apache.org/licenses/LICENSE-2.0.html)
+# RocketMQ 源码分析
-**[Apache RocketMQ](https://rocketmq.apache.org) is a distributed messaging and streaming platform with low latency, high performance and reliability, trillion-level capacity and flexible scalability.**
+## 消息存储
-It offers a variety of features:
-* Pub/Sub messaging model
-* Financial grade transactional message
-* A variety of cross language clients, such as Java, C/C++, Python, Go
-* Pluggable transport protocols, such as TCP, SSL, AIO
-* Inbuilt message tracing capability, also support opentracing
-* Versatile big-data and streaming ecosytem integration
-* Message retroactivity by time or offset
-* Reliable FIFO and strict ordered messaging in the same queue
-* Efficient pull&push consumption model
-* Million-level message accumulation capacity in a single queue
-* Multiple messaging protocols like JMS and OpenMessaging
-* Flexible distributed scale-out deployment architecture
-* Lightning-fast batch message exchange system
-* Various message filter mechanics such as SQL and Tag
-* Docker images for isolated testing and cloud isolated clusters
-* Feature-rich administrative dashboard for configuration, metrics and monitoring
-* Authentication and authorisation
+## 消息拉取与消费
-----------
+- 消息拉取代码入口:org.apache.rocketmq.client.impl.consumer.PullMessageService.run
+ - RocketMQ 消费者API层面分为推模式和拉模式两种消费模式,但是底层都是使用的拉模式实现的消息拉取,如果使用的推模式API,RocketMQ Client会使用长轮询的方式拉取消息,将录取到的消息放入 `ProcessQueue` 本地队列中供消费者消费。
-## Learn it & Contact us
-* Mailing Lists:
-* Home:
-* Docs:
-* Issues:
-* Rips:
-* Ask:
-* Slack:
-
+- 消息队列负载均衡代码入口:org.apache.rocketmq.client.impl.consumer.RebalanceService.run
+ - 主要为了实现消息队列负载与重新分布机制。RebalanceService会在第一次启动时计算当前节点消费的topic的队列ID,然后创建PullRequest拉取消息
-----------
+- 消息消费代码入口:org.apache.rocketmq.client.impl.consumer.ConsumeMessageConcurrentlyService.ConsumeRequest.run
-## Apache RocketMQ Community
-* [RocketMQ Community Projects](https://github.com/apache/rocketmq-externals)
-----------
-
-## Contributing
-We always welcome new contributions, whether for trivial cleanups, [big new features](https://github.com/apache/rocketmq/wiki/RocketMQ-Improvement-Proposal) or other material rewards, more details see [here](http://rocketmq.apache.org/docs/how-to-contribute/).
-
-----------
-## License
-[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html) Copyright (C) Apache Software Foundation
-
-
-----------
-## Export Control Notice
-This distribution includes cryptographic software. The country in which you currently reside may have
-restrictions on the import, possession, use, and/or re-export to another country, of encryption software.
-BEFORE using any encryption software, please check your country's laws, regulations and policies concerning
-the import, possession, or use, and re-export of encryption software, to see if this is permitted. See
- for more information.
-
-The U.S. Government Department of Commerce, Bureau of Industry and Security (BIS), has classified this
-software as Export Commodity Control Number (ECCN) 5D002.C.1, which includes information security software
-using or performing cryptographic functions with asymmetric algorithms. The form and manner of this Apache
-Software Foundation distribution makes it eligible for export under the License Exception ENC Technology
-Software Unrestricted (TSU) exception (see the BIS Export Administration Regulations, Section 740.13) for
-both object code and source code.
-
-The following provides more details on the included cryptographic software:
-
-This software uses Apache Commons Crypto (https://commons.apache.org/proper/commons-crypto/) to
-support authentication, and encryption and decryption of data sent across the network between
-services.
+
\ No newline at end of file
diff --git a/broker/src/main/java/org/apache/rocketmq/broker/processor/SendMessageProcessor.java b/broker/src/main/java/org/apache/rocketmq/broker/processor/SendMessageProcessor.java
index 2589a7547dc..624296a8207 100644
--- a/broker/src/main/java/org/apache/rocketmq/broker/processor/SendMessageProcessor.java
+++ b/broker/src/main/java/org/apache/rocketmq/broker/processor/SendMessageProcessor.java
@@ -68,6 +68,7 @@ public RemotingCommand processRequest(ChannelHandlerContext ctx,
SendMessageContext mqtraceContext;
switch (request.getCode()) {
case RequestCode.CONSUMER_SEND_MSG_BACK:
+ // 客户端消费消息失败,将消息重新发回 broker retry topic,使得消费者能延迟一定时间后能重新消费该消息
return this.consumerSendMsgBack(ctx, request);
default:
SendMessageRequestHeader requestHeader = parseRequestHeader(request);
@@ -96,6 +97,13 @@ public boolean rejectRequest() {
this.brokerController.getMessageStore().isTransientStorePoolDeficient();
}
+ /**
+ *
+ * @param ctx
+ * @param request
+ * @return
+ * @throws RemotingCommandException
+ */
private RemotingCommand consumerSendMsgBack(final ChannelHandlerContext ctx, final RemotingCommand request)
throws RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
@@ -116,6 +124,7 @@ private RemotingCommand consumerSendMsgBack(final ChannelHandlerContext ctx, fin
this.executeConsumeMessageHookAfter(context);
}
+ // 获取消费组的订阅配置信息
SubscriptionGroupConfig subscriptionGroupConfig =
this.brokerController.getSubscriptionGroupManager().findSubscriptionGroupConfig(requestHeader.getGroup());
if (null == subscriptionGroupConfig) {
@@ -137,6 +146,7 @@ private RemotingCommand consumerSendMsgBack(final ChannelHandlerContext ctx, fin
return response;
}
+ // 创建重试主题,重试主题名称为%RETRY%+消费组名称,从重试队列中随机选择一个队列,并构建TopicConfig主题配置信息
String newTopic = MixAll.getRetryTopic(requestHeader.getGroup());
int queueIdInt = Math.abs(this.random.nextInt() % 99999999) % subscriptionGroupConfig.getRetryQueueNums();
@@ -145,6 +155,7 @@ private RemotingCommand consumerSendMsgBack(final ChannelHandlerContext ctx, fin
topicSysFlag = TopicSysFlag.buildSysFlag(false, true);
}
+ // 如果没有重试主题则创建一个
TopicConfig topicConfig = this.brokerController.getTopicConfigManager().createTopicInSendMessageBackMethod(
newTopic,
subscriptionGroupConfig.getRetryQueueNums(),
@@ -161,15 +172,16 @@ private RemotingCommand consumerSendMsgBack(final ChannelHandlerContext ctx, fin
return response;
}
+ // 根据消息物理偏移量从CommitLog中获取消息
MessageExt msgExt = this.brokerController.getMessageStore().lookMessageByOffset(requestHeader.getOffset());
if (null == msgExt) {
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark("look message by offset failed, " + requestHeader.getOffset());
return response;
}
-
final String retryTopic = msgExt.getProperty(MessageConst.PROPERTY_RETRY_TOPIC);
if (null == retryTopic) {
+ // 将消息的主题信息存入属性
MessageAccessor.putProperty(msgExt, MessageConst.PROPERTY_RETRY_TOPIC, msgExt.getTopic());
}
msgExt.setWaitStoreMsgOK(false);
@@ -183,6 +195,8 @@ private RemotingCommand consumerSendMsgBack(final ChannelHandlerContext ctx, fin
if (msgExt.getReconsumeTimes() >= maxReconsumeTimes
|| delayLevel < 0) {
+ // 设置消息重试次数,如果消息重试次数已超过maxReconsumeTimes,再次改变newTopic主题为DLQ("%DLQ%"),该主
+ // 题的权限为只写,说明消息一旦进入DLQ队列,RocketMQ将不负责再次调度消费了,需要人工干预
newTopic = MixAll.getDLQTopic(requestHeader.getGroup());
queueIdInt = Math.abs(this.random.nextInt() % 99999999) % DLQ_NUMS_PER_GROUP;
@@ -203,7 +217,10 @@ private RemotingCommand consumerSendMsgBack(final ChannelHandlerContext ctx, fin
msgExt.setDelayTimeLevel(delayLevel);
}
+ // 根据原先的消息创建一个新的消息对象,重试消息会拥有一个唯一消息ID(msgId)并存入CommitLog文件。这里不会更新原
+ // 先的消息,而是会将原先的主题、消息ID存入消息属性,主题名称为重试主题,其他属性与原消息保持一致。
MessageExtBrokerInner msgInner = new MessageExtBrokerInner();
+ // 将重试的消息放入重试topic,或则死信topic
msgInner.setTopic(newTopic);
msgInner.setBody(msgExt.getBody());
msgInner.setFlag(msgExt.getFlag());
@@ -218,9 +235,12 @@ private RemotingCommand consumerSendMsgBack(final ChannelHandlerContext ctx, fin
msgInner.setStoreHost(this.getStoreHost());
msgInner.setReconsumeTimes(msgExt.getReconsumeTimes() + 1);
+ // 原始的消息ID
String originMsgId = MessageAccessor.getOriginMessageId(msgExt);
+ // 设置原始的消息ID
MessageAccessor.setOriginMessageId(msgInner, UtilAll.isBlank(originMsgId) ? msgExt.getMsgId() : originMsgId);
+ // 将重试的消息写入重试topic,或则死信topic。根据 newTopic 变量决定写入哪个topic
PutMessageResult putMessageResult = this.brokerController.getMessageStore().putMessage(msgInner);
if (putMessageResult != null) {
switch (putMessageResult.getPutMessageStatus()) {
diff --git a/client/src/main/java/org/apache/rocketmq/client/consumer/AllocateMessageQueueStrategy.java b/client/src/main/java/org/apache/rocketmq/client/consumer/AllocateMessageQueueStrategy.java
index c1f060406b5..8e0fb032dfa 100644
--- a/client/src/main/java/org/apache/rocketmq/client/consumer/AllocateMessageQueueStrategy.java
+++ b/client/src/main/java/org/apache/rocketmq/client/consumer/AllocateMessageQueueStrategy.java
@@ -21,6 +21,7 @@
/**
* Strategy Algorithm for message allocating between consumers
+ * 多个消费者之间分配消息队列算法策略
*/
public interface AllocateMessageQueueStrategy {
diff --git a/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueAveragely.java b/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueAveragely.java
index 155e692ad0b..d92f0b48573 100644
--- a/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueAveragely.java
+++ b/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueAveragely.java
@@ -25,6 +25,13 @@
/**
* Average Hashing queue algorithm
+ * 平均分配
+ * 举例来说,如果现在有8个消息消费队列q1、q2、q3、q4、q5、
+ * q6、q7、q8,有3个消费者c1、c2、c3,那么根据该负载算法,消息队
+ * 列分配如下。
+ * c1:q1、q2、q3。
+ * c2:q4、q5、q6。
+ * c3:q7、q8。
*/
public class AllocateMessageQueueAveragely implements AllocateMessageQueueStrategy {
private final InternalLogger log = ClientLogger.getLog();
diff --git a/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueAveragelyByCircle.java b/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueAveragelyByCircle.java
index fe78f0a6bbf..823c7856208 100644
--- a/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueAveragelyByCircle.java
+++ b/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueAveragelyByCircle.java
@@ -25,6 +25,15 @@
/**
* Cycle average Hashing queue algorithm
+ *
+ * 平均轮询分配
+ * 举例来说,如果现在有8个消息消费队列q1、q2、q3、q4、q5、
+ * q6、q7、q8,有3个消费者c1、c2、c3,那么根据该负载算法,消息队
+ * 列分配如下
+ *
+ * c1:q1、q4、q7。
+ * c2:q2、q5、q8。
+ * c3:q3、q6。
*/
public class AllocateMessageQueueAveragelyByCircle implements AllocateMessageQueueStrategy {
private final InternalLogger log = ClientLogger.getLog();
diff --git a/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueByConfig.java b/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueByConfig.java
index e548803d0d0..97fc275209f 100644
--- a/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueByConfig.java
+++ b/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueByConfig.java
@@ -20,6 +20,9 @@
import org.apache.rocketmq.client.consumer.AllocateMessageQueueStrategy;
import org.apache.rocketmq.common.message.MessageQueue;
+/**
+ * 根据配置,为每一个消费者配置固定的消息队列。
+ */
public class AllocateMessageQueueByConfig implements AllocateMessageQueueStrategy {
private List messageQueueList;
diff --git a/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueByMachineRoom.java b/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueByMachineRoom.java
index 37568317cb0..b1a91cf23b2 100644
--- a/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueByMachineRoom.java
+++ b/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueByMachineRoom.java
@@ -24,6 +24,7 @@
/**
* Computer room Hashing queue algorithm, such as Alipay logic room
+ * 根据Broker部署机房名,对每个消费者负责不同的Broker上的队列。
*/
public class AllocateMessageQueueByMachineRoom implements AllocateMessageQueueStrategy {
private Set consumeridcs;
diff --git a/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueConsistentHash.java b/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueConsistentHash.java
index 65dcf799271..80b01aeeaac 100644
--- a/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueConsistentHash.java
+++ b/client/src/main/java/org/apache/rocketmq/client/consumer/rebalance/AllocateMessageQueueConsistentHash.java
@@ -29,6 +29,7 @@
/**
* Consistent Hashing queue algorithm
+ * 一致性哈希。因为消息队列负载信息不容易跟踪,所以不推荐使用
*/
public class AllocateMessageQueueConsistentHash implements AllocateMessageQueueStrategy {
private final InternalLogger log = ClientLogger.getLog();
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/MQClientAPIImpl.java b/client/src/main/java/org/apache/rocketmq/client/impl/MQClientAPIImpl.java
index 116bc4d5610..12b84031296 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/MQClientAPIImpl.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/MQClientAPIImpl.java
@@ -1096,6 +1096,18 @@ public boolean registerClient(final String addr, final HeartbeatData heartbeat,
return response.getCode() == ResponseCode.SUCCESS;
}
+ /**
+ * 将消息重新发回 broker retry topic,使得消费者能延迟一定时间后重新消费该消息
+ * @param addr
+ * @param msg
+ * @param consumerGroup
+ * @param delayLevel
+ * @param timeoutMillis
+ * @param maxConsumeRetryTimes
+ * @throws RemotingException
+ * @throws MQBrokerException
+ * @throws InterruptedException
+ */
public void consumerSendMessageBack(
final String addr,
final MessageExt msg,
@@ -1105,13 +1117,20 @@ public void consumerSendMessageBack(
final int maxConsumeRetryTimes
) throws RemotingException, MQBrokerException, InterruptedException {
ConsumerSendMsgBackRequestHeader requestHeader = new ConsumerSendMsgBackRequestHeader();
+ // 命令类型为CONSUMER_SEND_MSG_BACK,broker端处理类为:org.apache.rocketmq.broker.processor.SendMessageProcessor.processRequest
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.CONSUMER_SEND_MSG_BACK, requestHeader);
requestHeader.setGroup(consumerGroup);
+ // 消息topic
requestHeader.setOriginTopic(msg.getTopic());
+ // 消息物理偏移量
requestHeader.setOffset(msg.getCommitLogOffset());
+ // 延迟级别。RcketMQ不支持精确的定时消息调度,而是提供几个延时级别,MessageStoreConfig#messageDelayLevel = "1s 5s 10s 30s 1m 2m
+ //3m 4m 5m 6m 7m 8m 9m 10m 20m 30m 1h 2h",delayLevel=1,表示延迟5s,delayLevel=2,表示延迟10s。
requestHeader.setDelayLevel(delayLevel);
+ // 消息ID
requestHeader.setOriginMsgId(msg.getMsgId());
+ // 最大重试消费次数,默认16次
requestHeader.setMaxReconsumeTimes(maxConsumeRetryTimes);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessageConcurrentlyService.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessageConcurrentlyService.java
index 258e4dbf877..7566761a951 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessageConcurrentlyService.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessageConcurrentlyService.java
@@ -48,16 +48,47 @@
import org.apache.rocketmq.logging.InternalLogger;
import org.apache.rocketmq.remoting.common.RemotingHelper;
+/**
+ * 并发消息消费实现
+ */
public class ConsumeMessageConcurrentlyService implements ConsumeMessageService {
private static final InternalLogger log = ClientLogger.getLog();
+ /**
+ * 消息推模式的实现类,本质上使用的是拉模式(长轮询机制)
+ */
private final DefaultMQPushConsumerImpl defaultMQPushConsumerImpl;
+ /**
+ * 消费者对象
+ */
private final DefaultMQPushConsumer defaultMQPushConsumer;
+ /**
+ * 并发消息业务事件类
+ */
private final MessageListenerConcurrently messageListener;
+ /**
+ * 消息消费任务队列
+ */
private final BlockingQueue consumeRequestQueue;
+ /**
+ * 消息消费线程池
+ */
private final ThreadPoolExecutor consumeExecutor;
+ /**
+ * 消费组
+ */
private final String consumerGroup;
+ /**
+ * 添加消费任务到consumeExecutor延迟调度器。
+ */
private final ScheduledExecutorService scheduledExecutorService;
+ /**
+ * 定时删除过期消息线程池。为了揭示消息消费的完整过程,从服务器拉取
+ * 到消息后,回调PullCallBack方法,先将消息放入ProccessQueue中,
+ * 然后把消息提交到消费线程池中执行,也就是调用
+ * ConsumeMessageService#submitConsumeRequest开始进入消息消费的
+ * 世界。
+ */
private final ScheduledExecutorService cleanExpireMsgExecutors;
public ConsumeMessageConcurrentlyService(DefaultMQPushConsumerImpl defaultMQPushConsumerImpl,
@@ -203,15 +234,21 @@ public void submitConsumeRequest(
final ProcessQueue processQueue,
final MessageQueue messageQueue,
final boolean dispatchToConsume) {
+ // consumeMessageBatchMaxSize表示消息批次,也就是一次消息消费任务ConsumeRequest中包含的消息条数,默认为1。
+ // msgs.size()默认最多为32条消息,受DefaultMQPushConsumer.pullBatchSize属性控制,如果msgs.size()
+ // 小于consumeMessage BatchMaxSize,则直接将拉取到的消息放入ConsumeRequest,然后将consumeRequest提交到消息消费者线程池中
final int consumeBatchSize = this.defaultMQPushConsumer.getConsumeMessageBatchMaxSize();
if (msgs.size() <= consumeBatchSize) {
ConsumeRequest consumeRequest = new ConsumeRequest(msgs, processQueue, messageQueue);
try {
this.consumeExecutor.submit(consumeRequest);
} catch (RejectedExecutionException e) {
+ // 如果提交过程中出现拒绝提交异常,则延迟5s再提交
this.submitConsumeRequestLater(consumeRequest);
}
} else {
+ // 如果拉取的消息条数大于consumeMessageBatchMaxSize,则对拉取消息进行分页,每页
+ // consumeMessageBatchMaxSize条消息,创建多个ConsumeRequest任务并提交到消费线程池
for (int total = 0; total < msgs.size(); ) {
List msgThis = new ArrayList(consumeBatchSize);
for (int i = 0; i < consumeBatchSize; i++, total++) {
@@ -257,9 +294,12 @@ public void processConsumeResult(
if (consumeRequest.getMsgs().isEmpty())
return;
+ // 根据消息监听器返回的结果计算ackIndex
switch (status) {
case CONSUME_SUCCESS:
if (ackIndex >= consumeRequest.getMsgs().size()) {
+ // 如果返回CONSUME_SUCCESS,则将ackIndex设置为msgs.size()-1,这样在后面就不会执行 sendMessageBack,将消息重新
+ // 发送至broker retry队列中去尝试重新消费该消息。
ackIndex = consumeRequest.getMsgs().size() - 1;
}
int ok = ackIndex + 1;
@@ -268,6 +308,7 @@ public void processConsumeResult(
this.getConsumerStatsManager().incConsumeFailedTPS(consumerGroup, consumeRequest.getMessageQueue().getTopic(), failed);
break;
case RECONSUME_LATER:
+ // 如果返回 RECONSUME_LATER,则将ackIndex设置为-1。这样就会将这一批消息全部发送至broker retry topic中,然后消费者就能重新消费到这一批消息
ackIndex = -1;
this.getConsumerStatsManager().incConsumeFailedTPS(consumerGroup, consumeRequest.getMessageQueue().getTopic(),
consumeRequest.getMsgs().size());
@@ -287,6 +328,7 @@ public void processConsumeResult(
List msgBackFailed = new ArrayList(consumeRequest.getMsgs().size());
for (int i = ackIndex + 1; i < consumeRequest.getMsgs().size(); i++) {
MessageExt msg = consumeRequest.getMsgs().get(i);
+ // 将消息重新发送至broker的 retry topic中,
boolean result = this.sendMessageBack(msg, context);
if (!result) {
msg.setReconsumeTimes(msg.getReconsumeTimes() + 1);
@@ -296,7 +338,7 @@ public void processConsumeResult(
if (!msgBackFailed.isEmpty()) {
consumeRequest.getMsgs().removeAll(msgBackFailed);
-
+ // 消息确认失败,则五秒后重新消费消息
this.submitConsumeRequestLater(msgBackFailed, consumeRequest.getProcessQueue(), consumeRequest.getMessageQueue());
}
break;
@@ -304,8 +346,10 @@ public void processConsumeResult(
break;
}
+ // 从 processQueue中移除已确认消息,返回的偏移量是移除该批消息后最小的偏移量。
long offset = consumeRequest.getProcessQueue().removeMessage(consumeRequest.getMsgs());
if (offset >= 0 && !consumeRequest.getProcessQueue().isDropped()) {
+ // 然后更新已消费的offset,以便消费者重启后能从上一次的消费进度开始消费
this.defaultMQPushConsumerImpl.getOffsetStore().updateOffset(consumeRequest.getMessageQueue(), offset, true);
}
}
@@ -314,6 +358,12 @@ public ConsumerStatsManager getConsumerStatsManager() {
return this.defaultMQPushConsumerImpl.getConsumerStatsManager();
}
+ /**
+ * 将消息重新发回 broker retry topic,使得消费者能延迟一定时间后重新消费该消息
+ * @param msg
+ * @param context
+ * @return
+ */
public boolean sendMessageBack(final MessageExt msg, final ConsumeConcurrentlyContext context) {
int delayLevel = context.getDelayLevelWhenNextConsume();
@@ -356,6 +406,9 @@ public void run() {
}, 5000, TimeUnit.MILLISECONDS);
}
+ /**
+ * ConsumeRequest的run()方法封装了消息消费的具体逻辑
+ */
class ConsumeRequest implements Runnable {
private final List msgs;
private final ProcessQueue processQueue;
@@ -387,6 +440,8 @@ public void run() {
ConsumeConcurrentlyStatus status = null;
defaultMQPushConsumerImpl.resetRetryAndNamespace(msgs, defaultMQPushConsumer.getConsumerGroup());
+ // 执行消息消费钩子函数ConsumeMessageHook#consumeMessageBefore。通过
+ // consumer.getDefaultMQPushConsumerImpl().registerConsumeMessageHook(hook)方法消息消费执行钩子函数
ConsumeMessageContext consumeMessageContext = null;
if (ConsumeMessageConcurrentlyService.this.defaultMQPushConsumerImpl.hasHook()) {
consumeMessageContext = new ConsumeMessageContext();
@@ -408,6 +463,7 @@ public void run() {
MessageAccessor.setConsumeStartTimeStamp(msg, String.valueOf(System.currentTimeMillis()));
}
}
+ // 执行业务代码消费消息
status = listener.consumeMessage(Collections.unmodifiableList(msgs), context);
} catch (Throwable e) {
log.warn("consumeMessage exception: {} Group: {} Msgs: {} MQ: {}",
@@ -415,6 +471,7 @@ public void run() {
ConsumeMessageConcurrentlyService.this.consumerGroup,
msgs,
messageQueue);
+ // 若出现异常,则设置未true
hasException = true;
}
long consumeRT = System.currentTimeMillis() - beginTimestamp;
@@ -425,6 +482,7 @@ public void run() {
returnType = ConsumeReturnType.RETURNNULL;
}
} else if (consumeRT >= defaultMQPushConsumer.getConsumeTimeout() * 60 * 1000) {
+ // 消费超时
returnType = ConsumeReturnType.TIME_OUT;
} else if (ConsumeConcurrentlyStatus.RECONSUME_LATER == status) {
returnType = ConsumeReturnType.FAILED;
@@ -444,6 +502,7 @@ public void run() {
status = ConsumeConcurrentlyStatus.RECONSUME_LATER;
}
+ // 执行消息消费钩子函数ConsumeMessageHook#consumeMessageAfter
if (ConsumeMessageConcurrentlyService.this.defaultMQPushConsumerImpl.hasHook()) {
consumeMessageContext.setStatus(status.toString());
consumeMessageContext.setSuccess(ConsumeConcurrentlyStatus.CONSUME_SUCCESS == status);
@@ -453,7 +512,11 @@ public void run() {
ConsumeMessageConcurrentlyService.this.getConsumerStatsManager()
.incConsumeRT(ConsumeMessageConcurrentlyService.this.consumerGroup, messageQueue.getTopic(), consumeRT);
+ // 执行业务消息消费后,在处理结果前再次验证一次ProcessQueue的isDroped状态值。如果状态值为true,将不对结果进
+ // 行任何处理。也就是说,在消息消费进入第四步时,如果因新的消费者加入或原先的消费者出现宕机,导致原先分配给消费者的队列在负
+ // 载之后分配给了别的消费者,那么消息会被重复消费
if (!processQueue.isDropped()) {
+ // 处理消息消费结果
ConsumeMessageConcurrentlyService.this.processConsumeResult(status, context, this);
} else {
log.warn("processQueue is dropped without process consume result. messageQueue={}, msgs={}", messageQueue, msgs);
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessageOrderlyService.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessageOrderlyService.java
index edc2647a5f1..06f936d78e7 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessageOrderlyService.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessageOrderlyService.java
@@ -51,6 +51,9 @@
import org.apache.rocketmq.common.protocol.heartbeat.MessageModel;
import org.apache.rocketmq.remoting.common.RemotingHelper;
+/**
+ * 顺序消息消费实现
+ */
public class ConsumeMessageOrderlyService implements ConsumeMessageService {
private static final InternalLogger log = ClientLogger.getLog();
private final static long MAX_TIME_CONSUME_CONTINUOUSLY =
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessageService.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessageService.java
index 0f6f3bb38af..34bd9c7ff11 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessageService.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessageService.java
@@ -21,6 +21,16 @@
import org.apache.rocketmq.common.message.MessageQueue;
import org.apache.rocketmq.common.protocol.body.ConsumeMessageDirectlyResult;
+/**
+ * RocketMQ使用ConsumeMessageService来实现消息消费的处理逻辑。
+ *
+ * PullMessageService负责对消
+ * 息队列进行消息拉取,从远端服务器拉取消息后存入ProcessQueue消
+ * 息处理队列中,然后调用
+ * ConsumeMessageService#submitConsumeRequest方法进行消息消费。
+ * 使用线程池消费消息,确保了消息拉取与消息消费的解耦。
+ *
+ */
public interface ConsumeMessageService {
void start();
@@ -34,8 +44,21 @@ public interface ConsumeMessageService {
int getCorePoolSize();
+ /**
+ * 直接消费消息,主要用于通过管理命令接收消费消息
+ * @param msg 消息
+ * @param brokerName broker名称
+ * @return
+ */
ConsumeMessageDirectlyResult consumeMessageDirectly(final MessageExt msg, final String brokerName);
+ /**
+ * 提交消息消费
+ * @param msgs 消息列表,默认一次从服务器拉取32条消息
+ * @param processQueue 消息处理队列
+ * @param messageQueue 消息所属消费队列
+ * @param dispathToConsume
+ */
void submitConsumeRequest(
final List msgs,
final ProcessQueue processQueue,
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultMQPushConsumerImpl.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultMQPushConsumerImpl.java
index 5c341cfecb7..73dc24fc7d0 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultMQPushConsumerImpl.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultMQPushConsumerImpl.java
@@ -547,6 +547,7 @@ public void sendMessageBack(MessageExt msg, int delayLevel, final String brokerN
try {
String brokerAddr = (null != brokerName) ? this.mQClientFactory.findBrokerAddressInPublish(brokerName)
: RemotingHelper.parseSocketAddressAddr(msg.getStoreHost());
+ // 将消息重新发回 broker retry topic,使得消费者能延迟一定时间后重新消费该消息
this.mQClientFactory.getMQClientAPIImpl().consumerSendMessageBack(brokerAddr, msg,
this.defaultMQPushConsumer.getConsumerGroup(), delayLevel, 5000, getMaxReconsumeTimes());
} catch (Exception e) {
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ProcessQueue.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ProcessQueue.java
index 3a9d5f6abdc..06568d1c0b6 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ProcessQueue.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/ProcessQueue.java
@@ -231,6 +231,11 @@ public long getMaxSpan() {
return 0;
}
+ /**
+ * 将消息移除,返回本地队列中最小的偏移量
+ * @param msgs
+ * @return
+ */
public long removeMessage(final List msgs) {
long result = -1;
final long now = System.currentTimeMillis();
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalanceImpl.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalanceImpl.java
index 146fce6e1e3..bba283d3f8c 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalanceImpl.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalanceImpl.java
@@ -240,6 +240,11 @@ public ConcurrentMap getSubscriptionInner() {
return subscriptionInner;
}
+ /**
+ * 对 topic下的消息进行重新负载
+ * @param topic
+ * @param isOrder
+ */
private void rebalanceByTopic(final String topic, final boolean isOrder) {
switch (messageModel) {
case BROADCASTING: {
@@ -260,7 +265,9 @@ private void rebalanceByTopic(final String topic, final boolean isOrder) {
break;
}
case CLUSTERING: {
+ // 获取topic 的队列信息
Set mqSet = this.topicSubscribeInfoTable.get(topic);
+ // 发送请求,从broker中获取该消费组内当前所有的消费者客户端ID
List cidAll = this.mQClientFactory.findConsumerIdList(topic, consumerGroup);
if (null == mqSet) {
if (!topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
@@ -334,6 +341,8 @@ private boolean updateProcessQueueTableInRebalance(final String topic, final Set
final boolean isOrder) {
boolean changed = false;
+ // processQueueTable是当前消费者负载的消息队列缓存表,如果缓存表中的MessageQueue不包含在mqSet中,说明经过本次消息队列负载后,
+ // 该mq被分配给其他消费者,需要暂停该消息队列消息的消费
Iterator> it = this.processQueueTable.entrySet().iterator();
while (it.hasNext()) {
Entry next = it.next();
@@ -342,7 +351,9 @@ private boolean updateProcessQueueTableInRebalance(final String topic, final Set
if (mq.getTopic().equals(topic)) {
if (!mqSet.contains(mq)) {
+ // 如果缓存表中的MessageQueue不包含在mqSet中,说明经过本次消息队列负载后,该mq被分配给其他消费者
pq.setDropped(true);
+ // 持久化消费进度至broker
if (this.removeUnnecessaryMessageQueue(mq, pq)) {
it.remove();
changed = true;
@@ -371,13 +382,15 @@ private boolean updateProcessQueueTableInRebalance(final String topic, final Set
List pullRequestList = new ArrayList();
for (MessageQueue mq : mqSet) {
if (!this.processQueueTable.containsKey(mq)) {
+ // 如果 mqSet 不存在于本地缓存中,则证明是新增加的消息队列
if (isOrder && !this.lock(mq)) {
log.warn("doRebalance, {}, add a new mq failed, {}, because lock failed", consumerGroup, mq);
continue;
}
-
+ // 从内存中移除消息进度
this.removeDirtyOffset(mq);
ProcessQueue pq = new ProcessQueue();
+ // 计算消息消费的起始偏移量
long nextOffset = this.computePullFromWhere(mq);
if (nextOffset >= 0) {
ProcessQueue pre = this.processQueueTable.putIfAbsent(mq, pq);
@@ -399,6 +412,7 @@ private boolean updateProcessQueueTableInRebalance(final String topic, final Set
}
}
+ // 将PullRequest 放入 PullMessageService 任务队列中,这样消费者就可以开始拉取消息
this.dispatchPullRequest(pullRequestList);
return changed;
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalancePushImpl.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalancePushImpl.java
index e5166f35b59..aafc2db9276 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalancePushImpl.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalancePushImpl.java
@@ -81,6 +81,12 @@ public void messageQueueChanged(String topic, Set mqAll, Set= 0) {
result = lastOffset;
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalanceService.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalanceService.java
index c8f8ab14079..a427adf67f2 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalanceService.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalanceService.java
@@ -21,6 +21,12 @@
import org.apache.rocketmq.common.ServiceThread;
import org.apache.rocketmq.logging.InternalLogger;
+/**
+ * 消息队列负载与重新分布机制的实现。
+ * 问题一:集群内多个消费者如何负载topic下多个消息队列的呢?
+ * 问题二:如果有新的消费者加入,消息队列又会如何分布?
+ *
+ */
public class RebalanceService extends ServiceThread {
private static long waitInterval =
Long.parseLong(System.getProperty(
@@ -37,6 +43,7 @@ public void run() {
log.info(this.getServiceName() + " service started");
while (!this.isStopped()) {
+ // 默认每20s执行一次 doRebalance
this.waitForRunning(waitInterval);
this.mqClientFactory.doRebalance();
}
diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/factory/MQClientInstance.java b/client/src/main/java/org/apache/rocketmq/client/impl/factory/MQClientInstance.java
index 5625b6c68f4..4f47cbed79c 100644
--- a/client/src/main/java/org/apache/rocketmq/client/impl/factory/MQClientInstance.java
+++ b/client/src/main/java/org/apache/rocketmq/client/impl/factory/MQClientInstance.java
@@ -988,10 +988,12 @@ public void rebalanceImmediately() {
}
public void doRebalance() {
+ // 遍历消费者
for (Map.Entry entry : this.consumerTable.entrySet()) {
MQConsumerInner impl = entry.getValue();
if (impl != null) {
try {
+ // 对消费者执行 doRebalance
impl.doRebalance();
} catch (Throwable e) {
log.error("doRebalance exception", e);
diff --git a/common/src/main/java/org/apache/rocketmq/common/consumer/ConsumeFromWhere.java b/common/src/main/java/org/apache/rocketmq/common/consumer/ConsumeFromWhere.java
index 1bd542b7284..bdd59e709dd 100644
--- a/common/src/main/java/org/apache/rocketmq/common/consumer/ConsumeFromWhere.java
+++ b/common/src/main/java/org/apache/rocketmq/common/consumer/ConsumeFromWhere.java
@@ -18,7 +18,8 @@
public enum ConsumeFromWhere {
/**
- * 此处分为两种情况,如果磁盘消息未过期且未被删除,则从最小偏移量开始消费。如果磁盘已过期并被删除,则从最大偏移量开始消费。
+ * 一个新的订阅组第一次启动从队列的最后位置开始消费
+ * 后续再启动接着上次消费的进度开始消费
*/
CONSUME_FROM_LAST_OFFSET,
@@ -28,12 +29,16 @@ public enum ConsumeFromWhere {
CONSUME_FROM_MIN_OFFSET,
@Deprecated
CONSUME_FROM_MAX_OFFSET,
+
/**
- * 从队列当前最小偏移量开始消费
+ * 一个新的订阅组第一次启动从队列的最前位置开始消费
+ * 后续再启动接着上次消费的进度开始消费
*/
CONSUME_FROM_FIRST_OFFSET,
/**
- * 从消费者指定时间戳开始消费
+ * 一个新的订阅组第一次启动从指定时间点开始消费
+ * 后续再启动接着上次消费的进度开始消费
+ * 时间点设置参见DefaultMQPushConsumer.consumeTimestamp参数
*/
CONSUME_FROM_TIMESTAMP,
}
diff --git a/common/src/main/java/org/apache/rocketmq/common/protocol/header/ConsumerSendMsgBackRequestHeader.java b/common/src/main/java/org/apache/rocketmq/common/protocol/header/ConsumerSendMsgBackRequestHeader.java
index bd8fbb44ca0..f823b46e12e 100644
--- a/common/src/main/java/org/apache/rocketmq/common/protocol/header/ConsumerSendMsgBackRequestHeader.java
+++ b/common/src/main/java/org/apache/rocketmq/common/protocol/header/ConsumerSendMsgBackRequestHeader.java
@@ -23,13 +23,26 @@
import org.apache.rocketmq.remoting.exception.RemotingCommandException;
public class ConsumerSendMsgBackRequestHeader implements CommandCustomHeader {
+ /**
+ * 消息物理偏移量
+ */
@CFNotNull
private Long offset;
@CFNotNull
private String group;
+ /**
+ * 延迟级别。RcketMQ不支持精确的定时消息调度,而是提供几个延时级别,MessageStoreConfig#messageDelayLevel = "1s 5s 10s 30s 1m 2m
+ * 3m 4m 5m 6m 7m 8m 9m 10m 20m 30m 1h 2h",delayLevel=1,表示延迟5s,delayLevel=2,表示延迟10s。
+ */
@CFNotNull
private Integer delayLevel;
+ /**
+ * 消息ID
+ */
private String originMsgId;
+ /**
+ * 消息topic
+ */
private String originTopic;
@CFNullable
private boolean unitMode = false;
diff --git a/common/src/main/java/org/apache/rocketmq/common/subscription/SubscriptionGroupConfig.java b/common/src/main/java/org/apache/rocketmq/common/subscription/SubscriptionGroupConfig.java
index 8f4703f6c80..c73bb7001b9 100644
--- a/common/src/main/java/org/apache/rocketmq/common/subscription/SubscriptionGroupConfig.java
+++ b/common/src/main/java/org/apache/rocketmq/common/subscription/SubscriptionGroupConfig.java
@@ -21,21 +21,47 @@
public class SubscriptionGroupConfig {
+ /**
+ * 消费组名
+ */
private String groupName;
-
+ /**
+ * 是否可以消费,默认为true,如果consumeEnable=false,该消费组无法拉取消息,因而无法消费消息
+ */
private boolean consumeEnable = true;
+ /**
+ * 是否允许从队列最小偏移量开始消费,默认为true,目前未使用该参数
+ */
private boolean consumeFromMinEnable = true;
-
+ /**
+ * 设置该消费组是否能以广播模式消费,默认为true,如果设置为false,表示只能以集群模式消费
+ */
private boolean consumeBroadcastEnable = true;
-
+ /**
+ * 重试队列个数,默认为1,每一个Broker上有一个重试队列
+ */
private int retryQueueNums = 1;
-
+ /**
+ * 消息最大重试次数,默认16次
+ */
private int retryMaxTimes = 16;
-
+ /**
+ * 主节点ID
+ */
private long brokerId = MixAll.MASTER_ID;
-
+ /**
+ * 如果消息堵塞(主节点),将转向该brokerId的服务器上拉取消息,默认为1
+ */
private long whichBrokerWhenConsumeSlowly = 1;
-
+ /**
+ * 当消费发生变化时,是否
+ * 立即进行消息队列重新负载。消费组订阅信息配置信息存储在Broker
+ * 的 ${ROCKET_HOME}/store/config/subscriptionGroup.json中。
+ * BrokerConfig.autoCreateSubscriptionGroup默认为true,表示在第
+ * 一次使用消费组配置信息时如果不存在消费组,则使用上述默认值自
+ * 动创建一个,如果为false,则只能通过客户端命令mqadmin
+ * updateSubGroup创建消费组后再修改相关参数
+ */
private boolean notifyConsumerIdsChangedEnable = true;
public String getGroupName() {
diff --git "a/images/RocketMQ\346\266\210\346\201\257\346\213\211\345\217\226\345\222\214\346\266\210\350\264\271\346\265\201\347\250\213.png" "b/images/RocketMQ\346\266\210\346\201\257\346\213\211\345\217\226\345\222\214\346\266\210\350\264\271\346\265\201\347\250\213.png"
new file mode 100644
index 0000000000000000000000000000000000000000..85ea709a034dffd5fc45be5e19a5c8156c689544
GIT binary patch
literal 202248
zcmZs?1yCJbur)eZkYK^x-QC^Y-QC^Y-Ge*9-Q5WqJh(&f;1=B8`R@O2)&HvAuA-o3
zrp}oec6YDUYjw1ef+PYQ4jcdg2+~qwDgXe@4FC|LFyNpgjmYe_pkJ^~QrfNnfa3Pw
z57@Q^p%>`jH#c!DH&sUqH&0_1b3oO>+R50}&0HUIW(xp_0BJE{HLu)reP2CP&Av}^
z$V{ODVP#Zt@)-KUQyh)%hleHG+un%8KubP~1NBLVog|^FpQw;hMu{6`lY05eO{Q
z$bJHZz>}YVJxXkZ27$(ph!&Z3CZ>th-R=s)5!R{Tp+^YF@cTC|XEAW>fY*z5R*NZ^
zzCogU&XOktZVX==M@L67F-YF(1UB|3lJ<|NlHYT4%pT`j=^9WHkxL69mGut`cM&=N
z4w)0NtLvB~r4OZy+Kg3lrZ@@dr;N{ihtvSyVt0
zou8lI*C&uThKhrOgO87of#Jcb;}I(i9|=0~>z9-u>$%T?5P6!BmQ++;?rIA7y@0&F
z{??Y~&P9fpZW71IVE9*+%0Tyq*Q!FNi#FD`$_-_l^MrblGX(_rd}xO0?E3v}*a)&;7g5b;J?Phc8F}4k=@wTEp5b`xVTY#6?6!
z^%*po%+B3-hK7d5E$$>tHku{396YtOuvGJf+AQW4ESk0GRLd3aZz_8Yll8pE6@$T$dbDE>AJ
za-FIb(?ogDNq;R55VQy;vWk*frjyYL7DtSG3Fc25jvA$J*(V_}dKxDAEfESIiBF!e
zc*tz?K20?F@MfBMKZVW`p+qymfBk@Vw{6;LGS#L_A5Wo7nWaxtTwC)fspRY#
zJmb?-RZUT&DVhpezR!Z62YdMj8UqFXb4OZDG<>|#AInCT;>ovhC5nMj|`*7fGHbcq%-3d
z{dUd?^EWj#S|u$QCu0&F2WP@engu&cfInd$w9&HTsVP&CsqT6|7}w#0=Y+@;_P>4m
zR&wsdRa8_2J{$FZo|)Q0iQ>f&48^G5#b&?WLX`Lm<^5s95fv3Rly0PixEL!!Tue*|
z@bk7Jx{APw*x86{WB-}5rsY=A3H*NdF;lmGQm6THUubFy8o
z@&0^&e2-Z$;5_vTNw0$NUWA_SltXyW%+$9yeiizSNU9ZqBWH>xz)3nC3=w&S!&j4T
zW`tGY)~QF(<;_@?oVu~GQCw8CnX1i$VmX`Ndsf$-X)$9}eXGP55l|;u#XUjB8vKJb
zd96N<5DH>TjtTOb4jv!w1rmNpYVVGjU7aXXQmo}#o#5Ar+D}Cb5g2oq#{T{SHrB{#
zJgm}XR4Pz7{Qj-99x=y}Ea}Yf#1a-3_H?kkyeyqUp`4}`OozUzLtuWCCVu4WUGQK4
zy>0H0ObzfdNRJLg>Ayw+arWovu(bhB@*stvczEe%Qg
z3R(7YXxy0LkCRMwc6MetlRGYw>d>BJR4Us2vB1)1%R%
zTy`7=OQ+8!jC{6S>wBE%X(gjf``^QNb$7#tWOBP6g~6gqQDTM=nT{pov02Z@VY6QL
zy+tHFkVl7U+3
zPNy<=G)^)UMC^4$o%7d&eIcDg=I%HD(l1}WgozLru;`GeUUG}>GaWlm26be`gD<|8O$
zyY(2#k~DshIO#X+oKsEO*+>nPoHaP;1q3~&6aGm@fPaQRswjZs;vttL!E+$e8a+dd
z4E3uGtHS&4clI^;*0R9AOo<962Ehq;2v-ZXVAhi-uf1pDe^g%TkyK_X|s+fzaU
z;^{QL^^U+i3FkiYtsxPyuG92@!T`p+n_OPgPAF2W~!XG
z)hsy{LcuI0FG*Q7vmHY5Xai>yb!dSAQ;wpjyi2DZvFJ_!5e)d4Go-2xmPzv#7c
zI=meH<2~+TU8wlA@2VDo!YS)f%InLE7|D0VE88!xtmm5ajujbK=*o;K}PN)l%z-yetyx9t-ge+o13Q$B6R&i{Yd^
zG*A)gjeuDjphXTqo-CQG%vSAcQ!KdZMLmCXF@>9bb}RrPD(a
z_I|*(JX=Fg)9oLfrAnS+;%)^q81zu3$!L%MquZs5(F?>fYXfCsN(u_Z9M)g^UuYbE
zGKPpIS=rgi$;pW+x3sq2d$fl`;6?$Upa&s=`F+s~cWd+Ho#?^lJ20qm~tnZn}uSGPwXi*s|Mm&uK6-xRFOU|rEDV=Ze0=`AS3@;D46j$4z1eqhVcry`zJ}<@x!rbMEYMB0jIj%fFLY4Eki5(u3C(G9O=G
zQ4x`ueEyv5Y@dbM2#82vHD`*82EDkw8y^HVvAADeUlsHDsL|l8wNEWVH&Y&$2_I*`
zguU5MB%TAM-;M=8vv@r(8xT4!_QhSi-LIb=O+)+NZe=_56nLZhMT^?cCY-RK4qTou
z9Ka)W!(uTRNnltUHo1!DR@c@}bWKj~XqT3j$jQl{JF_w&DLzvTE!L@OBlHR7j*NK=
z2H?*@C>6&X#j2Q2#^x8>BorDn9wR^}fKTY>Y7HK2q!pQ6FKDSbKS4KPR76MoA42x~
z6jBhIqnLZLk?!p8|4d0Sgb=b?EQwZ5saPX(zKM?ZRAvXI#$LCR`S7opDyph^3dh@t
ziSq<@I-A3b1#ec1v1Vn|e}fV1P~3HfLE$4v+zi`|Sb6&sinM*r6ZpsLZ(*c^Cn_iquD6B`Co{-V
z#v>4LrJ&yniwoL4J(O8D-;32a0pRVN`Y`IK?tfu^@t4H0A?I9<=s8VUZ{BMwQwsS+3T=CkOZD-CfLvOZzn&
z+B4~hzK=L>J0P~>NE9&flu7F`W2x#m3t2y#H{zR6Ze_vZKb@}&t;4(J7A+LPI#xn;9NiDi7OflK70UBYowA0=eB-hN7f-Y6>!ia8&QR?T
zYH=9AX6ae!!61UIie9nj;JFb2VQJ-R6e2`~)mWcCwhwkq5pDVor`Ngu?FmN!qH
zuTHx-M0u_Excd2a46MK|@1YRYw5nGu>1Z6jk!n2HQ4S#I=z}ut*t?i&bsXuubJmDZgQ!M60~HSxE$bT_AYSplEd$$f3vd};cp@#MmOkThU=T8g
zYL!Jn-;Lki@=KUmh|fG)@g;JeOtht)W&;Ll`yr=I#I$x!3l0}W=!ZvK?sLH`7n@fs
zIb3pSj>pIS8V-ZY0;MCFKJ;D-iYwXcK0D6@7l#tuDD^K~C9kr8)biNJwsbZD{ks#Ixm6g2TU
zEaQsWx~`75-qzNO9~*`E%i%n$ja^Py&s{oVbZIe#>vk+_y9w7zJW~zXhNrTrmA6^{!N;JlhO?DQ$F%M;M<LO|FE)l%`S_BI^+f%v;56~~ALC3GTqVFs%IB=r%S7>){lxP_!-DPWFCjzVSymz;V
z+4P8i1SUj+r{^7@f5cUS?i%vg+I>x4l6K-DRr7K&i^_&IyU8l}^oVH_Q#=W90&M?Qz
z%lT3?`K-ob4o|eXb!lyK?!{#z+f8pUig)AI1okj?%ZhnSc}eo}=oI6ym&6YScDsZE
zP6l_K`d<1?!3T!IL$)>1i7+ie$
z95^j&{4I?H^}9aOMDU9yc9hfmK@kc;W{}oh%qB?;+m^0tw6Di$r%{a#prlQB1+kWn
zbl->C{cSq)M9au0CiSW6TS8`8wNM4CBe
z$C=)rrs*{xw=L%zTIl0xeL}DmkIlb2J(?nMRONKWUECOOAa+4(XsG|RK6=aQ0RS0W
zema_cy)AD)#xMJ^CNF&rH#y7Teu<9Tx9%|j1Jlr7H3ev7ecTQu>WekHgi5_zOHOOV
zA1#m*){PYkIybrV>Ik?%y9yNr1qH-DtWvO6mJs1PaurrYt*wztslkGvg#Z9dp`4a&
z@j_G9*Vk7~4ecbxy;F~&U6v+IpjuzH>=u9le|d%GdO2o-1cwwx7781rg4FAH_A)84BK~4R}{;rpf
zZ7ZgqdhW~3-rln2+nt)-H%8iP0eRzy$=WhY3u~udg-Ss|OBRj0edt|Aoa$H9)qFNB
z9ZFdy$`4I+S)!UP)b)Blq?7mC^uxV3+(Yr*MKs1gRHo@pW4SaIvUK*H_0xWa#hO(u
zov~M%*i;!STCWM`ZU5kmRY^UWn4*jK3C053Y~iK!b??Mh8c~tA@QKl5P|PPU-LiqJ
zuV>g+u4MgtZ|QXME~6g7<69|1(R9{sMHn#o!>_e$kDZ+DFU^BG?o7_~v3x6~ya#f%
zLExw7V@qqCYSlsnQ{TN&zJ$8fyZB8tGdb<{NUN{Axa{ysw(qEx?V>tiCHp?}?a2DQ
zsAqqXBP^%kFXOPNganS9X&@;n$?5)7oyfnH>!U{F>%d(j(srURpE0PlIAjmGtx_#$
z3$1iuM@##wW4WEwq|>K!*{@{EiRnwc$1)+cW1LDAhMw-BF~72bGl4pT25}TJSVOz#
zpTZKOpH5TrCSb^u10(mS|?6v}~Nx#B%(d
zc2QX)VE*Mh8@GO~b`@Te!H79$!Q#n`PM91?45dfVs1ahT_-M3FG$Jl+WFCo?|qeMJY+=$_b=gf
z2R}h#f7;kCV)&3+V`)Q^aCR%bAf^4^Ke}Y;^L=!GnQ#;Sx_uGV50%HFOer_FU`Q>d
z7hdIpuL&-wtaC`Oz%V%bazmlNCS1*?PtM(E&_eg=e10eGu3bHRM*H&@J*XV{91Ni
z6SGI!wCFyvQ3~xu%)@?jp?Q}(!*QIKBFd0aS?eo75DvnhJ5gN9UKdVI$}O7ED37^_
zm=D>(&`p<8K`z<5h!2OJHnVV+5WfGd=(Ld&*DBF_`D4ZkDw>4XGB2C2F7GD4s&q7d
zO)+Dg1+Hq+^`N8C#V~x^ii15z7I1JHtVd~^;
z?IHhkO86S~8NDox$H(T|M9lq?MpBt=6H6+!i}$TwR_24{AX6U;p)vnEx%h_0n4Vn<
zgAR+Cpm|TB-+R-!ipletAotx1hoETA4Fi1oA^Zj57Z9%;+h@jthYKGRjQ>Hz!SMj<
z9+QJ80sZTT^V6dB{>g%6#cp39o`6;{WyIbHrlFjalvb|~yD;*_1%CI3!J+0U=&In*
zg7f#WZy^xSgg&P+EJ}dTF%5dCBV^R!iOuw+IQubke@|Pp`8pe;=;&lxB?~05r{0+s
zFHd;<3(TmlZBR|Km0I@lF^bYGEt4sPsNoL16I*nRfxW$~u#E@;D#@Cb9+Vm(Ex7fP
z*lnd4p0i`hX5rKtfgl-wUEdLz!{%Y2!p8N&NH>q;JpoUBFsTWIDYuXKBRTx%XdKUw
zD@o*up@5`H&Lu6nmp3;^z$m}$M^D*uw+1bsWolmM_~>?{SL(t52Uumdo!8Vp$fJV;
zcTFzKmEhX=`W+Ouh(OWQ(pue;W#zcMD1dx;_WICA1f67YVKw~g><8_98Iu2+Mhyc`
zEYuvzr|(!W1aH7&_l|slfdAfbw70i+4yPjp%-&YA|Crz_Ns?rn-Ok3&PGI-O+nsKIDwxn4(LFNg!L=@OWFVE6a$Xz-mM4JUaUrC>
z(BbPCO`jRow7B$y#~|4Qjhj%H$u-=A00?r^Rvxiv#RBGC?(HFe%0yuauga-IRf8LV
z$5U>^u@f-NGyE%uVJrU8Hs(vB%N|24>s6w?aaxxb-C1U|#-1&{;98vx8`4n>tMI+^
z+Tn=T76*$M-_j+np0@HS9EbaXROYu{#
zAFE2B2W=bTb^%VB=nwlPXhPmcS5beQg&G*(vmf8qD}@oNpXSU<#u$a60rB3-`hyc30&6q2d
zD3eZRhuvx2#`CaZ&)=G5x=kmpSDHTFe6kcUq1k8xZ0o3Xis65VXfH3P6kPpE1`m=t
z?Ou}V(IbEc;0Ogxj;rC)kq*6+Y$kAFLk8mL9$OCX5?#;%A{8scc`AthL-ze~bwxQy
zU@bW}m;B?13!visYfmQ@SXOBg4-W)2-PL%d>%jr8nht9v>XsF1U(-bK*HrFZwW9&6
z(N6-Z*=e10-9tf+X0_fQ0AS8weGL9X<{~`?$Fw+pSEM*Lf?HiEqK-0{szmd9ojKeuIiS^iwl)Z)j#)GHDU=>tt~};
z)7oO7=guQmSUGa6u2}tEjRHaUYwcCJ&!wSV>|pYiHVz|>$gWEdD#%V3Sqhonq6d6*ZU6vFg2vS~KRCQ@`qVT;{
zE5C4jOGn+d(?i}a0$d$Ws79o_RK&6A8>Dc{(m{DK@MH6O96LGR|J`3gLD*ZqcFlx&
zIn|z?=6S5Kzb+XJ6l&62f4K0fw~8w105G{v*MYaK@maEkADd9GcKO|i!ico+ZDgx!GaKhx8=qT
zGkpKn#o_f(cz7<3UXrRQF$|>z^U^7jW}5xXxJ(K-Kvs2!v`FYD4F)jD>KZ?!4V|``
zM}rN^$_RW)Cg1_{cSu*bX#Wf~tt@~3z+Y0;ZP0=T=EUIAD=p6odTp{ev@zzLzZ5EEw
zbIVl{aYPNHRT*hiS;pQ0%HHB@Us_X3xaXhvb&Ik%e=$vzYEM7{z;kw!#0I{c6YGcV
z`)xtP=64=Cm@kwKUG~~6KxRgop@FL9PneK~JQ|fTh#eOnGtPs@{Hetw>Wgv4*|
zqFazhsrGBj?MGAdpv6^J{KWw>8`FCZ4Wv+C-JpoztN8SWR%1eoquD7Y+=M_~<=m3m
z#V_>{h#I$lBVsDN*svJj`7hzspL$fyMIZ@Ys=P51At~WDBWLe(#>s4Ycm<<-|=saH<W$=#xM
z>>&&=Ty>kMyOyn{Qk4aTTusA;9)|1S1P>(rn0KJ5=FwAi?WnR-O@{|i0v^^U6}@|>
zSnelF%;T3FGzy6{9BYSQYU=nTx&NkO(x$^rgpU2M7r@GuGb6R9DxLm!Y8I%7;ygBO
zs!s0+P;9*zOsO9`R90}?HfGvZd!VCVY-+yHEs4`=)QvMfYwcJyF5=fyt*NLvFON$6
ztM~8l@60&fSLfca!Lf%7W>2R}3;7$2PJ0A_P%;_Nt%3+7zV%&ebHmRNs`8au$~NtR
zDX9nlT<_34I~Gi5P_pGrUCeET($jspN%PbQ-Tmi@&v%=k0}r^iq96U_Sao}r`3C%I
z^%dy1utM_0gqivM5xfgR7I9HgPk)OO!BZlhY?RTqI$bHL3j}wx^tuusuJ!}*@2U?c
z8T&!-<7wJ4KonU_b`rfT#QYU;JN0h=8b91qpB^9rm2A%w+>_(mUJOisO8wjC$aUL)
zgyj=kx{^4WnT=GmU_vzvFDoCpYLI}L?49keZB$!2T?!Du`26r5S7GK(v7S4u9tNeY
zUOi^3b9PNo>K(SUe?GhVvchv=-_u5StQJgz0jj|3ZKF->ElbM%gS1cP
z>EW6SE@`ZoT$``L$eEoYyqJK{jYt|4Zf=u?!^^?JG95MW18(}LiswqNcUtW%1=cPz
z-RaIBPa+ItivTs-1)lzdUV)^vtH4j+5Tfn12TvW8=R3oX<&`mhLp%rE9t$~OFEyK!
z_S6n?7f(-=Sl?h-%3F;-4HW&!`GFlQnqWX>C*cJy8e)J>2?oS)RgA*-^yG9Om=RfZ
z3`%2Gv>nDTKOU8RCQa_KoHAEA!|+oS!*kSj@ny=aOWrFa_a0H+*d|oyOk4Zo;|?J%?pdtEtC3(zeO
zgp}68eLW@1pc^nWb*rB#4(D+UQ5k${*wG@_M
zfK;;wyGafI!`1d{C=CW+)>j?8M(18`b2h~>)eV=NFA5uCRcF#d^&d|4GEDexZeQ1E
z1UaXXk^={*$j>G0=AaQ?Paq;RtaGYUfxsR*HnnG8d7V`>7SYYgArdJlg{zwMc(6)0
zY4lV%@(5U+(yZwFMxr+_F6C2JoAj5T7KsKf!T0&^@&7DD$_^O%u|X(LItCkHZ%O{b
ziPY0B2M4OTP%GOS*m$br0s1at_0^T!dg2Uy=?v5rTzaa-3xR!Co}r?Zqz5rZN%W5j
zYdrSsjI`r$w!SbS19as!G;Qbyb>k{C_$+bBWmT;?GB$D=9+9G^{@7vZv
z>{9kIOL<$?muLNwsV$xC0^%<~Pa(_I$o(M#k^YC$L8E7^;-9~RZ}aq>FHw~^LDXSWJ613KtsR91nO_CDb?~qAlqKK$btTsax%(F4}+*gQGnCt
zoK_?ACqF6OugcXt#pgFplDy;Qj*C`F_wB18rQiun3k!yosy#r6PF8hte(@l!krV4a
zLZ^7HLa?-Y+t6;)&|~Hn#br~@6@4H5bY$%;V`7Z$#6$_`)_OMyP>7&&nhKqnoX}Az
z+pwnE(EeWgYxFTKJr4}Inx$D~IOON(eKsv80X63s&E4dr3$-uNkLZ;Lu0dUR^&0wJ
zf#YdoF_3glA&fjXHzy#!yeFB-g1Q;d3JPkc&EW6;rCiG2$x>wYOXjnrZ=YjQJGb?p
z^k71WfC@c&OuMeRpaEnoS!hrx
z_$AqxcTMA4`cPCP;0r{02Z$=JMmNUIzAaUXoy4&8(4yI5xR~6!
z+98Vc8aZ;hL>W*>zsUl8yL=$?pxpOlulN~cF&MyzHMK`s$v&-$Gb61|^~ug&jPCA&
zU)bSu!b#CNSMXarlofDtoBPHt%Gm@B2q`sc!5fLEo*7P0W}nT*4Xq|B?xdGZGw$zj_
zAe76(E35C()a0XSeQ(k^ucy(q0Z*jPs6J^b3=IHUV}UW84LkoQ!_1e;5+s&4H?uN<
z!(EIc@ru5mnf5~8@c*_Cg2>_yQ
z51W!T7ezTHJ+-DUf?B?R6MNp+y#G1^7zwK8w=1anflDs9#x74U9{+VpvemSf(t(Ps
zbg^~zMg@$xwvk}KgC5xnc-%JOqO)6RQ%j059IF@t>K7pauI;O`*DkUw`ya=-!j9ld
zC6+X(;5>?$4)!b?n-E74JODD|4C6+w==)tE;m!(rKv#Nv_R8r(G-%FHgUi)f}BSM;4MxMJA%4z$guHLuuakAfjfJpaLHN
zgC?eSODbrWFje{n4g^gs$*Pq?PfiC>f%LGfC;$kFAvX!4sBZ0?nURQ!I#}!3jA+Gu
z#C4Mg0I3i?1}5w?XP~&!eO|WyAkbFZFhEBW0hE3^T}no*Y@n^4USf+O3Km1X8YQ~D
ze(SXpFfwdf1Hi!bvcf_0ZCxC6#C-XT0-p!An
zF7)f@9)blbKxTn{LAS_&C6e$!!TP9fiRdYS3<(wlGDcZ1lz~Y$?LZJQ&r_oSDL1Ko
zb^7EFgw6ZQsOHc0hvV*@>3sVZ0|hWC}Uo?(Td)H&pGh*N-
zXyAf)Sd%x(hIKLefM1>U;GK8pG(6tRz4Q0{ks`ZSKTG2qSf!rSf_}LKU{x{Wuvfzl
z5*X0YWmsb6tgK#Ph=-`)JWm3zULe;`l)RErIJ9Ty*eQTbxG3Hv!Hk3Q8$%k!$ZsZQ6w-uf
zu!hm-Qk86Kxt^@00Jq1Lxwf3CBXB$b3{MZ}yNZSgGIF8{??!&U?Rje+J-;ZswQOa!
zs$4Ez$b*nls`>@0XzaX7l`BIrQRXotEaDMc9lVOZi$Y39
z{=KNW+IA|FxtMlx7aB<{xs1MwW9f2wHaimQ@xXjSmBo;nQ_C47$$*2tmON$EX88x%r2(C3F(~r1mo~0ZQb^Y9Vkzdf6o#nq4=Y;^WCD)YY=n
zd}<_LfXxI`IZPb=A9Hmo0G6!bn${X5k&1<2agjpPBPuaW(3TyGHuVvu1N-G#CcmHr
z7df?eo8Q)V4I716eK+kghwL4_4d|cO#a3(!^V-1z{)D0W4P=49Ry#8)uiX%r48GN
zlKPzH-gej8Iw}#1FcgRG9jmGYWR{LkBq_>HQ}deJvMn!X({aRAY#1i__oD$Nvgra;
z=WGyCmZehSK!^I{rObVb_fm;-@FT|=P1Q^=pJ)pA*T#G*{nrw|IYM_XYx;i=-A^JN
zuCNLB3D=N4#@KovlUo{;;6M};1qKx0v^H(&md5*aMKBNKSTOH5ZvYV~PqxuG?0q*L
zU0q#APf@N-svk{mX*3vrhDYz~(QxY!)}7BzCI-=E*4}Aj6LridV`L09$0SLCUrA6p
z4BgJ20UzC8q}0u)9=n#n?eT(~zh-KY`ksJ0$HAOmU~K&P>3lagf6sWNt^+o(e>wsX
zQE2Sl+3a@ULkpR^WI2i#eI0WKdb$abjVQire!6d)Kmk)LucXaQ?2~9#H7dLu-7kNi$LZjQF8%>bZoZqDB!H1ewuNvxGsH&v^<@Rk&x
z;p1dBqh!?0@A>ibxzglg;?^gbqMIp$6%QDM6fu_a^z_smE3H?+Th-6)ceqI;n=dgZ
zBFVr6aJ=<(16Y(4>DU~K-b=N*-Y
zI1E@1m~S?_`h!@~$$Zki`yy6kErwOqj%2c&`ELzQ@egS-w7}essViLZ7=%#!{p}J)
z^^#4m&ppWBtf|}U6ecky0shauo!#*A@Bggx|5nSIokw?XHK8X25OzBxMy!=MG#Bt(
zJzMqZcy=}`z>p!Q`PNml6b}Hq9?4FfdLVnN6Qcih9@g`L5d#1W^Ok`F$m5Ik2ez0%
zP~p>v-E>Z=1)#l7V$KIn-_=vDe)gDEhuzN5eIjMOy+A!q8-u_#0DkdO!RoH
zPswf>_&R&HVF$3+1@T}YK^rzY8dh#kd51DSDypKU#_#QBOwtD^U!X>brlFz9_u7j-
zv!;q9#Bfr*VV!7wt+~als@lQZ^Z@Y6COGewKMUvKfg70%+GlUs;jSHqiERgP-~_xB
zZK^A!?-9J@lyj1!{I3RIez|Lq7QDuML!7#m!Q`xADH=
zTLWV54mT*iAEzecXV3v#_v;IexPn|SCla&4gc|G2=vA`cG5Q{A
z$4)zd#wtxXgbz73
zndH&%xuwL(lb*u&)_dA?VKA~J8C)3IA5-xC6?$ps-_NH?);TB5S@Y)BSznFh0=fXE
zmoo^-K7@jIvGuT*z$Lb@NesEhg133_eG5jvz2GMpq)y?q^8WS>Y#NcL&?TQ2hJb*-
zDUMyEas`|aGba?*AZ8S&qP%QjA5o
zx9eSi5%Vz6wQ14UG^j93Gk!C!9I1?v$wt+284pd!fy1iIR4=0!HMk;
z@7;)K9;e0D_7Y_?+23k~js08Y^3`E9q`-$RG$m6dqZ&N!Y)qKUsN>m0CLo48L}jSS
zXu$*n!Wvag3X3~?kwi%58cE2)kit+s-PU}2kB9|b-cqS6?ry_r`3ZuMja#UO*L*MF
zpXc_%uy6KBMZ63E!13VE-E)gFMKE~@`rE+(6)K2?W)_v4FDMlU82=d9zY(D#N-OCc
z+_m`gd|dDe@>Ise#01>L8g5>;IdvEyL+*NXZ@p`}g;!^M&K6b!OvWFmiBnl9;r_n2
zJM4yS;J|R{+g=t(W91`)^=v129;z?cQ5C$|E?2u7MbYQiWLznDKhXO+@B~F`Ffk5O
z8?c|2XK6y{dwB0I7>!T1@Dcm@;rih_Ij1K#Ik)70DozVlBNf<&cR(pJ
zdegXP-{n;0jYv)r<)$*R(?Fi}I8{z>u)>v1^;Lh`>7NY~GS2u>3BwR1ezoOm{4D;|
zGO?idyY2HLPP_)G!uhcU_&jE(c%;3uX73LyUjXGs)bPtXjhxwX4vSO0HLcf9R(Zsz
zEM`G&;~JwRSAGS&yRfjx0iw^Yml^~^&Ng1s5!e)wI)jeQeJ~hdv2Vl*HSIea4EanF
zbOeOxaG^C9!}$FA9yvp>h2N-nC|Gi@UQ-jt_Y;}(K>v(nKh|M4(?4MAOS;U
zjEP4sXua)T39sFvM&=6ShPs)Ad@vwLi3|nyE7&8j9NYbi?z`#ej|!^X+5K-?<@YfN
z`^3toB!gZA85@VguP_x>J84EhvXY4e!i@
z*P%-k7Xy!UvkjeCO*r(0uP_Q376FWi55mi_pO%b{g@%PMY!^zj$MA7PW|ZE-Gkj
z|0W7d($X@qeaUzZb)@k-IC#5u-KrB93k45LoMeYS1K!ysE*9TK#cFg4`M!7|gRbjP
zI0s0Js9=Wqx8duTSm-o3rQN(&?8C5_Z_^T~n4LmXK!L)PR9SvJIG#O=gAV}vm)>&j
zpK(+Igx9hW5QMlJqZ2CTm(2zK8&J%wM-yekM0}jf=DpWT?X}yMY#tAT;jY*F=8`!8
zkISzKA!nig>TR9NufAiPnhjx;aOFo}WhMXUeo1OLn4
z;sEJ5U!ph|kg*X31~P?Gk-#698wgtAx!2B@v#8p*=<~WU(bLPF<$_1c{PzaT
zM|H~NBw
zvkvvCKm-Zprm`?<3()>MmqsP{V(8yeOV@({boy*_SkB1Jm5)wbNB~0R6w{yM{;uMf
z0-@{xVE^ay();-7;*2RJye}L=YBzOmDi<%c
z^DIyW^LwyjEN4Qw;Qj7AGDM$X=ehvZ_)~Not1B=oHcl
zfiw?)
zedN)v9M<$*2_S;3kwCfrb4I40c0$+m>KQ^DYC}L^1q?{`mNn2S_I8=+K>p*NNB_E`
zx%2UOSd9gM-c034?5FL`kmuvsN6W(_^q4>i&g)3sP<&=_;1NVtVu|3(Ur|BnxI_Q<
zm1B+f6RoE=%XYq4zqfKu58SR-U;OPy#vx0=w%h)dIzJX`xt(xPbMlS$%i7+y5=YS1
zEiKusR;$FMkR6m(-?%B#Gz>g53jKO@=j~Iz*?!)yS^rVWB7paj^Leu#SWhcR__-Ld
z)j4q!^3zaepuW8Sxmd79=k@1`>DBA>^5aUW7zD6;{^Dc!A>04?y5bLl15vU8r;TF^
zDyZ3?SDxY1iTx_ssO5|_n>x4SBW(sU;&V;Vgz~=EbCRqCQgPs(?KfAB4y)INu%KR9)2qceMg7{LGK%iG@e<-y{e2}vPePA`x~s30`};u
zr-}rhJu~ajz?|w~Vp-W#^w+Dd`3XW+TK27V--GkC2uA=IolT;gQAifobvx_|a5)Xf
z6mXF7a=SJ+$*vbOlB>?H=WV4`1xW|aG1t1X6zRm-hjinGg69vtfhHZ~RlS}hx*
zjXk#>4F#tSigKg11Zb-(I?-5Ud;Lxl$DUp57~OKI$F(*8JukN0i}z!@{;L(Vduo`8
z=k00z`E*b~|G8A3ug&EO4}uZgxPmVW)qyXQjgtV=F6El7p^vk<4xdOb
zmMd9kVDo)Zwm!G5_HnXy_EPX{+`QCcVZGPli<~{p)5pPZSdAX)iJjVWKqg1HoH($GeWqkJL
z4B*I|YCp5{{WWSmYb_igAKRr
zZ=0rwxj_(E>yxqEo)z+Ll`qrz&9Ke1ALGN(G~lO0Xm6;Dr^=y9D-Ee?USU$qfKLn4
zap(Qv@r%zZm{6HsnTllv8OYBUI2_Px8t5_9JYh*&dspzOYtAsOaiQU&KUYEy#V$Ts
z5T(fz*c&Te4Y{`pbhxd?wbbNaWKDlg)_SU
z^#Um9jrw-g>2JL@HwS#b|9g%V49%(8_wjb5$V)l%d2(7a3?qSaQ9v6k_<7h&`}a*;
zwjjXFCJF>3xkYdi!CJ!OyU)J2a5G0DF8ev}#(jVvt`Exqo7(UBw%=T=_47O
za6iaPdfstvZNtx{TxGFadZ^U&KKv#Qj3_N#A3!wI>d>nbTE|a!TBGK|!XWsD-^|2P
zAZRulbOi@8pn^uYt~!tN2TF*~Y(T>v?mSzdNk7hch&RwHSF2tE4XPIw7OpI*T&Wo|
z9Z1rqgiKq+Mn!ex+vqsPukae6aoKT}e79Zr)-`lsY
zu2rZq9|>h#Y!5fLI6^@Jz;A|KFjyK1xZIvB<7wUsm$I13qLu*kdNp-%a_A8=h-@~=
z#?7M3+e6(T@4WalH8;`{2sjzhusK0YP*_P4`~M;9tD~ZRzQ317kdjboQ0bCx5SNth
zkdj8aJ4H%5Bm|^mX;?Z$L`p!qyIV;CfoB#!-*bNFJo|^>@{XB1cW%D!ovlWv^%7`VR}-
zWJ2$K#rPUNU0&yQCWIsrw>9dv!BI5u85Imm(hotg@B_>~&wF&kpf?uIO=
z&nxm?={YG-9L
zv1uLvo_{(j4_r7<^yb@mr)-?Zq56n%X_Ws!pY35xH#Rpdt((9
z6`w=796cWwrXS|VKT^4hk6}Jdu1aGJhnkIDS``MZppNb2ucJ38>}1VuQ=G&=F#ocT>{M)(i~hc8rH*h9aNxg<#s
zgxSO~C2hbSs85mIhYzH1NHJwSlk6#$fvHTsD|oun^u)aPT2A0iIbDdc_0d;=ihjQA
zxHg;HaQeZ(f%1vHf4T+RyFXeVI~qUcF4=S8^Z2J6$_0
zw9&bs7CYXd`?H`@Wp*##xK7_5X;?MdqFOQbw2C8+7lPY>jDfdi<(pa0jOq(@j=|c%~m~
z`?b7T2$E8tGV@C8+kBjH0Ug`*NLvUfgSnT?-bI(nYu`wY25AmAXBby)!c_E91aNUQRGyf1mDUD@FLngl>v6w#t|=JIbO1
z{f9943RZMb>ZNT-W-_uRV{o{gi~y)H6O59}P%;}q<>VoBzlv)x+>%}aGP8q>6Q)^q
z`KuGpmRHYc1mDnBTYCyrqx1UT{Eb@}dv5ZbpB(q-isR|ujmMUoe^EN``aX1ELwaQc
zy$rrIo0}$nvPX@8Meo!chtSZ_fSwbKc#NQf!U)p9+Koy4wH;nex3ge|sG*w?6OSBf
zsYHHXRaF(JI_p?#K`Va3B!LfIQuD^Xe?;bi87RKwbYZXJe0{nYOM*C_#?#{;{
z4BP7yVhzSK`Tk*hXD7${FD37cMZAcWwtx(1333k=xukQRzLL3D&4`iHCVoimuzwqP
zA70khpXIMRs@=biMW;Qx2l>mX4BA8R@bJh!1%-50(;YE7nD2{iju>d}mph2rQzwjA
z5TLDkp41d*lP{{*Ge#OvJo0&<
zmRQ1I+J*t!$%|l<@do*M7hiqRB^umPwNd)|{#F@$p7arAz?4
z9-g_eu~Dp6xbWfo+#XjK$A`Z?LagPZJ~)(<&eZ7xQq8}QT{XR#nYXvL9)Xhm$n`yX
zjEa_<`?=*HDu6v&sASRWpJCgue2VId{z<^uM^3kMoZYK4>Dy0{m%e9YFYx{3
zN-}xJmA3o0M_X=hu3|*aQj{6)yHp-+@fx4Rp=lhxp;?X@}Svg
zU;=H$=qD7_EjyOEg!g6p?@rLxYD2XP*lBnbXnbqtL>bNb#^R^)eqQr*h;&t1QpZ`!
zuwsp^=o`r%U#-@4=W6v=uRYdT9HJVud^#}q;N5SZ&9q&`6ZNAIqK|}SvTdTovNx#_
zzH%*e2Tn&xRUL1~I*Uw{_lDM(6WE{XMEmv}yk<<18EZz}%TIw%n-3zc
z&V_Tz>nbaMWwOV*&ws~7fj}S$U5<0VemUqoW(z=BL%&U*-KF%o
znW}z&(8<32@RbNDL?&Xj!QOXQ57B&N3?=ZMJg2#AatQ~O-=Ae^u}c+E!iQdkM`z?-
zCf^rCoo=)@2dOAe-?p9nX!pM{i71TJ7>c>hqTwS3m{f=4%M;u*+@vmHZ>D_!^tmr)r
zS)WR(QtHw-q@F~#oUVJ*710cEos;c1u6t^$uJ9*Mo{)DF7mSl;#kkuGK_KExQ|u0e
zZgVJ*$HvDG*cqf}H3L53V};~VBFldWdH8saIz15ODgG~13JNh{!NQ$m!%I2eBgNg=
z$8D6y6|-ULx97$B?A$D!TDSsVHjkePqEQ|yP~Jx$9kd1jrx6m;bRCtQK!|w6--G(g
zd8oDP`a8lzqyX}FoyNtXkXUJCi>s@7N?Q13CRX12QJO6=zdaAJIH_jE!mnDmjWqa}
zm=-VKMB3|cXe=vXho}J^&;ZI`c!uBt;S4x_1mAb>Yo8Zjje+E#Upz&wa
zt2+DYJJ!P}h*7)kPO!$0-dBX+@B7@|*iT(PnJlKN(x&Y2&44r_iQq%386N#2ud_S+
zEm^K_HDf)J^olE|h42w&TMmoA$EsAB?oKvDULr?rX{$-s6b^xK3``+mLQG*kzRjy+
zQr%?TA`g+xB6)V4%(h>{%^OJvtdX41*us!aRdh5~1O4X7qH|GRBAJEgY8Vu-lr&jkdIY+AX!Y{3g-@S3+w@;rWOJJu2tA<%UzEosm35fM&32)
z2JEROYn4qa!=-kN*PgEjIT1bY{P29i)Q#!d+A~wST^GP>@Vux2lbaG>j)O*WJqlpV
zzXOO}!I-J!ww9CVfZy^$^;w{Ie@R}VzPq}3V!U9!4+2ODMt*l|57XW2)*`}6
zlQ%t2#KHYm->Y>K{~|_r@89UKhK>m*$D==d$jcaiWA_}4o@@ufX4P1Tw*<`Pja4MC
zHk|aCcxHCn@&Hz${J&FZOl>GXn<$+cr@K!QC@&K((ywyJ^6bwZT@8W&U0>1B$E^
zpTQ*m(MFN_?&*9LU=TfW@h_dB`upl$gE@%lmaGAg)<$pw;gdg*DBk+GF9`HIYwXy5#
z@~^$=8ZPdqU?%MUQ5?aV5GO82JXFMcAhQ3~6o!}vWDN`PSL)~g=Rtr8;D)xKK!4^+
z9I@!zFaE7I4dUh=;=6~2he*(XjSW~nVOjZZsTogU3eDwUO%czFr9`YOq=#v)1-yfr
zS$N0Y+2;c=n
zTaHJcHm+|u`vjGy_OTfC&H4P
zdzg>>ZzJ8^``>2pc`C+cQDDe=$KWF&l$eN#g#?4(PJqwT!$6MA@%IwhCr6rf!9hVm
z_YChrAo$pKtfv$560w#xH{P&pl_i2Iu$%A5}sPTMC
zF!?VZLi`YHb@W_81m7VI2e65(-)C@h0Heg6q@YUJ3JgF@I>pa22LLoBEIT1bx(v`VA03k5%V9Np0aC^;#oYef3(K^vA=6Y
zt75W$MT_PWv2?APx|-TcC}_N8K3wK;1v;SVRGZ2wDB!5-2lRfu2R5R(=JEzHc+`cF
zErc)w)1tJTqzv77#>Lf@LcrFeu*=2zhn7Ew~G03u^X{J!Zm
z+sA&m{i0~{)WqZ@p^Ak?VJZ4OYGx3sS@)#;#~1(hjc7d8$8Ho&ux%QFTc!#dKRrFI
zdSMIN#aUR;4;^P@WJu+zXg;lLiAG@ky=T##PZDr-yg<>au@(oYQ!s*x-Y#(3ASmpMqL`mh2Zo3F(gbPyk
zNCOg+F}<>BHK@=nGan!axciGhMBmf`%)4P8b?6c)2h}VMOUx)Ua3_@}D{En=j*g6c
zHQ#!}ScY#`{Xq8~Msj9a8mXpLe-mvbMAr`f1v2u#6}yvaRC^6S9AER3M`sAL8uzcB)3(&d+IgZ
zOcG#BAIl#F;K(^yQ-u*`jiT0TW;#avy%;|b8t$uKu<{oPrTR;N1C^c;zM`xdviKj5-
zH#h`YeGH7zlz8Zve+DtwcOG_CQx-t#J$P$n<=FVzMnEY@_xrXBmLn_aq(mlmh(vff
z1GwqM=4c9-XFx3w4Qu_or`6FIQn_*KNn9qRRqB=MX};PM$jH?8H2Hv;dzk(w
zX`yo0);bD=)w(I>Hcn+=J?moWiQ&jea&W6rI$F?rF2&;h(PN=#>MF!-=x)4^?xW}5
z2}=C@1r!=!>VT<;NT{&x2L7C$PStwB3b-Ul5fdU~h0s%>n-^cdO8g@rv_u5IKoy~a
z0iJA>l6H;W@tKvgbIq`2q76g5wVfRuZN+5dh;EPGWVt?eXs3(~;tBt0IupTa-OSDY
zzP=G~BTaTua+o@k2G|;TopJHQmZk!yU`s3OVqeG>s~5PN0FepFTLH~2#r$tt#&W^`
zAharXYXFqd2>xVjb!ORxJVTk;J~iYB=gx349nXLDQCSa|IMjp*MF@K%g%?zJ8e@AJ
zsP`ACGfluX|L8)FfO$pUEh_@HO2A|6e@ghtcw<+AwpQe<7LDr)vMRugZELRu5kGWy
zbUdvutd6T*MpXLf5L`R6y%lt%ywveaKMha3^V!{?NkLH?P6<)R*b^KI5kTH;f
ztp=lkg$NU2qJoVFgb%`UMF?fV4U#8g<6liN-7hWb)XKcISovf(D2Hd2nq^@V-|XB!
z*H6k8(37nSCkaT{w1Vz^HU}8u(f?UnSXfy9tO7dF#s(oC{8kW}J7l|ql-jgKt;}4H
zioG&D%w>YmC(n*B&TjMbuLj@iKeO|c7_5;A<1<1mxMkzDq8s7zCcQj6)--tkN!Ytx
zdZ!G(-hJ-vSToMI5bLnYN5o2+*A=<-&1EL!MMha;%-DFlOw8v>!p~UA>z^5GYTI&o`6
zyGj??l2N~rgr_S+^-CL;EWR1ve;y`8Z;Ibg9VmTs>6W>vr0ps(gxVY~lWU+jX2QGo+|_-n+pxK%%C~T^AVvsH2fb%Q2v8jo
zkc#q8C_WDdD?L&{;vCtL*HixfB(b~WXxh=OX2E=>xK2`0T621hy83bT=)FKQ_*Nj~
zf{q+F_n!;!-zb2Y<0;^@hDpGxQw`k-gjQ134VzOie|F7sOVHe(ep5*>oyHP9Ft5Uu
z_}h(ezp9GvUTO%!(1JiXaqsMRpQl6C#Cu|Dsd^tYBmj#LRZ(KHva&v2u!~6@oz0oh
z(R4_j(&E){QuvCJaSAnDb*kTdx;+uC$g*+9Z<$(L6>PsJQ(18;NI30<*GTrU`-dbt
zB;g@$C^1$SaDtnCE{?(taX9xv_aXJ|KO@KGyB3UFW<
zy+?{UmqTX2`#pXzU)Np@fIG+HDnd%^=MtV
zy!3LD#wGwpfI>Pifrs=oWhOp~ljg8t%Qmhxoz5>vQ~
z&}_c-OXg4NuWJ1vR)ECRH9NbzA-u>diJEiwUfd~OvZ(L(>gnt(g>a^#fd>B|Wn%N@
z)wHE2UM$8x|4tP6RiKMDNN3HJx^aKX@X}`mx;xLbF<|Uq>SYr=q1QUx(o;VpIjm>d
z(ZWobFixXeJYf4>A_;YHt*)(>*2Bm*B9ux%+V`&mQ^^{x+p@6wJMY1moJ_tdytbA<
zLlIaZwRLq^k{ys(!T;2?5CgrSV9fg(wL>i-e*To3M@XD~t^MXt5*WRZtL`nD)$uFL
z`_x;-1dhTL{0*4PUDM7$=>%Rl>lY%=R)aFlU8hE%Vk|5yUcJKB%N@7YDMY4e9mqz(
zd1V9o_ry+BZ#{m(fR#;tiuW|MH
zHoae&OpK1l$IL^zL8FEI^8P+WgExHWA_aWSy^B`)<)=z8?HgZ2z?r?H0$XEtIJei5Gs-7V3A9<3O3=BVD;sWj^
z&KgWf)coLDY1VI0;r`xNFo1=Tg=OtOW0|v~V6p(!qJuyLe`HrHvXq3*41m#DK7t<5
zrM?F5c}uNaenkG%N)@JQT&T5D`_2cv0ET0vlpL_B5%dHf
z<8e4sk~R7{22eb|J%mf|*}r|ue6^&Chb2BCk*#cOOoQ8ikhTQ4p*pXHUf`i-Zi0AM
zL7>NkRdx#Z5a5g-amb#`q5%BVkL&1Bj?Ci|P;ZnK7tG|DY
zeNHwu=GFs^Si7GS8U1sHsj2o@QXV9-MV*Vw2*D)vS>=-*kfB956Oev~>2(XWv$
zj}hpwelZ0N%q9+uK;zvNUQE@Pw}QYBIg{0KEgS1ZiWRyGNW{jJp2N(J9V
z@z3ep`%I1-3i?8mUF~_`p~}@8?)UWc5WUB^C;}{j`K;BzKGR*oh<@sR+PDNIQ2OFt
zQ?6~mWORGvDwwgt_R=#aKR>^*aeDVO0}+N49)
zujJ%}DV0ctjCdL$2|238Kko%X6Wj?2n&Swm7cmQ?5i63REY(Lx(!cr2S7KGyr3_6b
z=ksUCo&M&G`^)0df+0bT->{kKJ+go#mt$0&BS^Dm{F+%_fB$a3Q!X@Pi9@rM!^X#H
z!Nc^`RmxmY;?UfB2{Z5^@#1&Ur3)EXyDA6%PX>6XGB@Y(zG*vFP3}t4qXsk}>6ruXf)zTI5wY;Sw=dwC6&Ik?EQG
zKTY;^XGpRAvExBMJcTp4ECyQ;{o`9tH9?vaJviF~Se0W=9v?h(SctHIKLS-)B@}|i
zc=*OA?OA~C+SK?mhY5=Qt`aIL!ypJpub2Ap>Z-Al&f37s*mq1~E>uMooR@UhgNLh6HIPi{o$N#(XZANShYI08s=_18dkbnO$E$4`AGna86Z0#P3$)o<6%
zoPJ90eX%X3xQgAkO6f02Kh~Jlcgv>Hn*{07UKx{Fl@*kt!MP{U15d?`j?2s(Eg%A6
zFY!=gv-lQ$j;E?j?Ax!;K%!)Gd3hO_zrg=iRaFJLPW$eoto(%pz}6Gx%(BBIvcq&W
zKM=4p6GebluZ!T23zkB{H_b)s4Pv<@+PbRB$_}6}kYpsuJoppz+p#dc9LF^0Rfgu@
ze%m=im#&I-Q1k<$Eq|WdvsJOn)Pfz`g}3OrnE|N4w*x{e1ri?XNz7X2Lzx40
zB=FoKM=1h>Eb{aLi558Y@Ikw{aLQBfdm$2%l9E;aWFZ8qEev7@oKy*W-E1`#yHvM-
zx+mMt;1&MYyK?#Q*8}!eRu}V!ejjIU892C}-G#&o<|}{`SRld=p<^mP^=h?-H`UZQ
zzbF1w{YckCTTLzaGYR#D3KrzSbJD2C?M20hOBd0@X6NNIeRK+Zi{%g7-+V4}3y~zO
zmvH;CN~f>CCk#iitrIb==KFrnk;@v$y3!&L5f>-Q+yoKYpidyuvp-vZ2%2tOHM#Br
z>jShIH-hUjgQ^_Qjnvy?|C^YxT$CP;nC|1&L*GNStjn?U_S8qR(|ef^ECV@l!%l
zjX-n`A4z67?uqo&R}kXKr;x2xqr|Wp>(AAi6+#&5xzh*YdaPQrys=5w*zFRSh6Io>
zz*nX#(*Osso&Pb@`=HT%!P5fF3Mf3WKOJ~~FtwXJ@oUA&+ItG!GYAubLOH4@^LE
zwI=KOLzw98#ZVT%qr{G5V4`Z8-iPt;1(OwZd3jIgtB{~Zj<GR%RHRnE@6;*SYsb
zQF1Z~O_3QDVNC~w<7@E~pkGyKUVlv-Dx*Kphm&_ZdXdEqP}f?_zM%)MlHhJ~)y&*_
zRKWml{{*i)$@^>|nhx=}HkjDO(eH+hpz?X!d8^^=%Vn4kV*i|K?eHo@`??7kL|1);>0fORn5e_-?)
z=*0$|vdLYLs-3ik3kd3AG8YmTv0+VS7q3PgmI0!pyu$FQFOPKqCHu!W76(Cq(o5x7=wUM2kbl+c1AH
zY3T0@&PD(#-6+jxgDdNz1JCc*;qA>K=zH#UKJT6ngd}5r(@gOYD|+k;rlh{r-#{xy
z6uM4W!$BBMAaSbtIW{8*OMqC}mNx`HWPmcDu?s>c={NZoII=8R#AP#Pb+Pb4wZ{S$
z)g4l6mYf#3gh6|k5|AFL7cYU7&<(^U1#@?^J1Y+1!~Q7
zLkaB)4+c}HFl3FsjMV^kj3u|5t1G;&uG(h;oFntP95^{Z?}H$bY43wN>fUfz|N5JA
zT~N*Ro;5Pd*3NG5yYM`O>mEp(^}O@evNcP1;*`<(W%`Zb_Rq-4w5AKUYsQ#hZn}wG
zBZLT;pZR`f8+FeuzgQ1e!!58!;pH2ol_$-=w9V}cXM9*+TZnV%
zvb04WYD%`>NiG!ttRDbKL)PCl`g}@knP4<{;^#M3K>DKxtL+5_jN0@tB~Mi+p7q2%
zVWA|xq^Y#4WXw$I^3{nrG*cbd&Ekc0cZE?(2o(D-PQUi
zF_*)VGMf>X<9!0h9dW&VtjO<_z9jpiAb&q^sj;8E@OzFI?u#T|2MqF_Z-t=A7#cRo
z-dy8xIyhCLRHx=mraR%bjaT$kpgBVDbrD!_t;(;4K5b6r`qua*WyyCKgFnFP1e7DF3$cxdkg2AIY#>XWKyMf$<1ok4{1>wXkV{WDsUdDXsSQHvJOAP1omxqJyIHiu5b
z5(Wq3-e=%Kq;5~8@>V0+N*pY_#MLz?-3?g!JU}NxaGE0E8&qRn=K6CuW#&75AVuUf
znttL0G8AN{dOAQC%4d)Ypr?jlt5leaeedihw
z7{1{5y>ww9(rs`s&Uv^cgkDC^>UZNWt}!$U5Usk%m-_j_1S!Y6nyo!i5WOv;v$P*Fj0T~8gZ1*HtB<&X#9Ld
zWi2w?P!M2n(kIn_3l3j6^}n9Gj)e!>er!c8}U|Ou1mUFjy67iy+?x@nj;$1xC!sh-xtg
zax9l-g@J-e>r9Q=UG$MBp;=nJ{X6LCZ8s#&`IzQ
zP8O#y?duCbM9qo2o)avVg_3CW(
zzygM>Rp^*EO1(w(_e^lUK6-F6&5fL2^rYkGPmGt>q2A}B8WUHRlBW{}x8KrZLzayV
zj~+4*MYMzU8RdEuOOuW96;%g>AH@Bd{H`wn_Z1ZXrhpUH+|$@$1&Jv=tZI
zLLQl$o^7*G)9Pd4q2qfIveF5!0~e%Bxg4sP*CiW2xA-WI2`>KB`Tf%WP;FPBka2haS*}Gt)7TEG
zFO6y8?N{Zahu|C@e-}zTPbfcw($9+IYZy-w1!OG;9Mh8c@nie(ppYsaDz(xwH8O`z
zsY!GHy0xwC{@&h?KWB*TLz!{zTLgYR5PeeO-%*f
zA0dqa^Wx!@nAC*8Bs5%IRED=y#n|Ts*3A%a*NdirU@g*ZR`gkuFCQ|WT+&Wb%+?rI
zj(9+h7Z2)RKc>i0THZ$vLgzDv&UkUEH4AS>lCKs89bPxS_*i6O_2tlS*YXx4&riAD
zscNLgBDbUF_n*OUwkvptl`Xnv&U~7T589p%iVk|sT~q32>lKqyR9zQe;6wy>Ht+UK
zw!5E4w^j&7>ucD1xE^we8fEuJP
zmq)m=OC7BLJiD0Lyn4d7iFKIchlgf)GyC{`RJ%yG}35R$F7-;fqortnFr$(vV|GX!khd6i2`D1^1pC
zWu=G(ul@Y2(hW+*9*%IGV`osJ*l>B&IC1{*Az#Yblv`=V-#^zeRQw81${{OStdsVJ
zDlf^?v&?|YxPA8h@?-x|FQeKvt|dR$tEQ5Rd}gw`b|bjkROVun-60ti8eOoX3K!sn
zy3e+$SN+xy8~%O0=+ovN;G>wXW2oor?|OQ@`=k8?KW@m%`>!)qg?5AWDqopUrgpyP
z8yB$~zAVGS@*?kv=Pk-L)@IW`BgvRuXT90+35Hi9oCFuH5?a08Q*oHZ%Ka*YUB)zgU{CFc=yWqa}~O1hcqA!F~Hx*T;|
z%W7T2%4s)VJDn&WSr>{N#mM#lLU}E<_5iTg6Idrydu*
z-JujLVAgto7GvA)pL7+Ir&r`g&N+GDa=)Bo|Ib3SYst;N273P|i@#!iH-A=eX8oU!
z6|okI*;E*K+HA3C5X6ab_>7nv7(0GADE{%g`m`Y^EYbC5A-Lj?YrD*ASF+#XdmB7B
zSDQAq6m3<;7hCi@iyAQRJEG5F(&L-s`heswX;;!Dey>+MN)K5pZ`4g{Ih
z2I+4D^=Ct=xb)m8=Ce#WtJY>8U-L#!w^jLe$56C6DQHYP-1brw+`89Zmzzi{scv`1
zk+G>I>u-r%qT70S1hHP>>Fl&s``>8UMw4CsR-F|O2-{mMebi2UE#2%YBMT)Rh-&EE
zGniTY$-zbjAOnV-IYo{7L>^)-vL
zO;*Vs;#T{6`Dr9|{_30R8Vs!C9U0FbOYQMrDn{2cGC5y%Yc0a8-w~64PnT{Yt_1Zf
zbN%xWC#SqZ&Mc1JA<0KY5q7~_SpjUF459suY|GL9T$QtrV0l7Tb-DgG3Xk<
z&+Gcp;rgpt7V?GA}(GU4sF^gXKFj53kF
zg&x;4hKDJ7&q1L`SWNvIj!g<5QZ0p`C-mvd!~2;f`&S|uu1wGLr`~G+X}DU$M+=RP
zYjRXrE7W^wtjD_3O;Z?)sedx@nJAY?oIo$3qHYg*3$h5if*BJ{l9Ds
z3=AwRrjF3~j~((|SV;i+RtnDQY^7K#!V}mBz`L%lE<_MUB=i0}sP{~uNxT52laXo~
z$mN73O~{Fm=-~6g29!N;I0!Krwl=eD
zRFuNn+M9GOINZ+&t;Tdm<|`sSseoYkK_dFL%8u-c#A^1Mk(Z
z9`cAa&EFDt52Hj0PLxmb@g_`)SW|3kTCMt~!8!?TrVq)E)O0w0-kKi-XZX??3?#_Luf-DS9|@~=v+fwQ85Zstu(k#(zJ?;eGSbooRzEG!+
z;l?sQRrHKWRcg`|9c8clv|8Xe+;Uy>?W$Oe$oei^c%;(MYUb+ed(IphTHPk=b>CHc
z4JNtpogK>Po!g&>RJ61?Vo^8lKdNZ(2xc3k^d;ATHydH7x9cpr_{)A?gPm%ziesrT
zsR_eGSKcF37@NDvEU`$i*Wa~a$4c+2_&tS2(}=$T{OE=2dS6rlF~&JBMJ1yq&%`7_
z9OoIH0+DnH)oaGIO!;CKTCaw3MdFJ~v5U~lYuucshtZUOV@!QHa^U7sH{r8wNLs+GvE0eL80o}+C6G<{Q7Xs||!`i#vX)e;5XIciPxIJ-O#h@k<9{xN$%<{#2^yDJdMkqSP
zJo|nf@5HXwK;bf4)Bdfdv~!}hV;M66+NJH{4Ev5wrPF+;1{C_j@HVp>rd7IIHnku$
z;qztR@uxm9u}ZUE$#4;!fCQ~>%kVGlRFlHSeWmA0Xmc2K6PmUH2Iu{`LbXt{K2D}6
zT#aDSE5-Ceg9@daGUgI_F#*wUcr*0fTW4K&`^n#`t%q-U_e6JYx}*e+ij*hQ3|(WC
zx_CO=d|C$=s;^+s-_I#(sL1F2Qma3_M_ExKJUR92oXMEM?4LN;I=+AWGT8U*b?q*8Bbm8+&cCE2{(*1|qrqBM^2?@Fx2~B+;c>?FU!uwVoz(Ov4d~FoUnJuYVPw
zgvb}Jx%)%JA4pjJ3m6W~JvC{RwnR1NCLM+#iOb&sBV&V^waOEdlj$yIw?xNwsWDi0
zH-xxt1taasein?IFTGnpBl6~Jn_Z>0wHDDQ#QfwsmBym!4&R@8MV9>F0;ZM8lggWz
zMydI6B#LGeNiSygJJ!71g9tC*;82?(cm5{Bn)8IgTJN8iMRbn-8b>}3pAJeQWw?iM
zaR?ZnaQ*Z^?9JN-i(E4W_$mx1%8*l??K92!i*9zjDKGT#m^y(=z7Gc;jv02>ZnJ}R
zC5612Y}8F0Tl^bSw@=bHGbygzcf(c^pL7>