You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I'm not sure if it is a bug as I know little about Kafka spec, I just read the code of sarama to find out why my sarama producer hang, I still can't find the root cuase, but find some differents between brokerProducer.handleSuccess() and brokerProducer.handleError()
func (bp*brokerProducer) handleSuccess(sent*produceSet, response*ProduceResponse) {
// we iterate through the blocks in the request set, not the response, so that we notice// if the response is missing a block completelyvarretryTopics []stringsent.eachPartition(func(topicstring, partitionint32, pSet*partitionSet) {
ifresponse==nil {
// this only happens when RequiredAcks is NoResponse, so we have to assume successbp.parent.returnSuccesses(pSet.msgs)
return
}
block:=response.GetBlock(topic, partition)
ifblock==nil {
bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
return
}
switchblock.Err {
// SuccesscaseErrNoError:
ifbp.parent.conf.Version.IsAtLeast(V0_10_0_0) &&!block.Timestamp.IsZero() {
for_, msg:=rangepSet.msgs {
msg.Timestamp=block.Timestamp
}
}
fori, msg:=rangepSet.msgs {
msg.Offset=block.Offset+int64(i)
}
bp.parent.returnSuccesses(pSet.msgs)
// DuplicatecaseErrDuplicateSequenceNumber:
bp.parent.returnSuccesses(pSet.msgs)
// Retriable errorscaseErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError:
ifbp.parent.conf.Producer.Retry.Max<=0 {
bp.parent.abandonBrokerConnection(bp.broker)
bp.parent.returnErrors(pSet.msgs, block.Err)
} else {
retryTopics=append(retryTopics, topic)
}
// Other non-retriable errorsdefault:
ifbp.parent.conf.Producer.Retry.Max<=0 {
bp.parent.abandonBrokerConnection(bp.broker)
}
bp.parent.returnErrors(pSet.msgs, block.Err)
}
})
iflen(retryTopics) >0 {
ifbp.parent.conf.Producer.Idempotent {
err:=bp.parent.client.RefreshMetadata(retryTopics...)
iferr!=nil {
Logger.Printf("Failed refreshing metadata because of %v\n", err)
}
}
sent.eachPartition(func(topicstring, partitionint32, pSet*partitionSet) {
block:=response.GetBlock(topic, partition)
ifblock==nil {
// handled in the previous "eachPartition" loopreturn
}
switchblock.Err {
caseErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError:
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
bp.broker.ID(), topic, partition, block.Err)
ifbp.currentRetries[topic] ==nil {
bp.currentRetries[topic] =make(map[int32]error)
}
bp.currentRetries[topic][partition] =block.Errifbp.parent.conf.Producer.Idempotent {
gobp.parent.retryBatch(topic, partition, pSet, block.Err)
} else {
bp.parent.retryMessages(pSet.msgs, block.Err)
}
// dropping the following messages has the side effect of incrementing their retry countbp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
}
})
}
}
func (bp*brokerProducer) handleError(sent*produceSet, errerror) {
vartargetPacketEncodingErroriferrors.As(err, &target) {
sent.eachPartition(func(topicstring, partitionint32, pSet*partitionSet) {
bp.parent.returnErrors(pSet.msgs, err)
})
} else {
Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
bp.parent.abandonBrokerConnection(bp.broker)
_=bp.broker.Close()
bp.closing=err// should we check bp.parent.conf.Producer.Idempotent and call go bp.parent.retryBatch(topic, partition, pSet, block.Err) here ???/* if bp.parent.conf.Producer.Idempotent { sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { go bp.parent.retryBatch(topic, partition, pSet, block.Err) }) } else { ... } */sent.eachPartition(func(topicstring, partitionint32, pSet*partitionSet) {
bp.parent.retryMessages(pSet.msgs, err)
})
bp.buffer.eachPartition(func(topicstring, partitionint32, pSet*partitionSet) {
bp.parent.retryMessages(pSet.msgs, err)
})
bp.rollOver()
}
}
Versions
Sarama
Kafka
Go
main
1.1.1
1.21.0
Configuration
Logs
logs: CLICK ME
Additional Context
The text was updated successfully, but these errors were encountered:
Description
I'm not sure if it is a bug as I know little about Kafka spec, I just read the code of sarama to find out why my sarama producer hang, I still can't find the root cuase, but find some differents between
brokerProducer.handleSuccess()
andbrokerProducer.handleError()
Versions
Configuration
Logs
logs: CLICK ME
Additional Context
The text was updated successfully, but these errors were encountered: