Bump github.com/hashicorp/terraform-plugin-sdk/v2 from 2.26.1 to 2.27.0

Bumps [github.com/hashicorp/terraform-plugin-sdk/v2](https://github.com/hashicorp/terraform-plugin-sdk) from 2.26.1 to 2.27.0.
- [Release notes](https://github.com/hashicorp/terraform-plugin-sdk/releases)
- [Changelog](https://github.com/hashicorp/terraform-plugin-sdk/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hashicorp/terraform-plugin-sdk/compare/v2.26.1...v2.27.0)

---
updated-dependencies:
- dependency-name: github.com/hashicorp/terraform-plugin-sdk/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2023-07-03 20:21:30 +00:00
committed by GitHub
parent b2403e2569
commit 910ccdb092
722 changed files with 31260 additions and 8125 deletions

View File

@ -28,8 +28,13 @@ import (
"google.golang.org/grpc/internal/grpcutil"
)
// Logger is the global binary logger. It can be used to get binary logger for
// each method.
var grpclogLogger = grpclog.Component("binarylog")
// Logger specifies MethodLoggers for method names with a Log call that
// takes a context.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
type Logger interface {
GetMethodLogger(methodName string) MethodLogger
}
@ -40,8 +45,6 @@ type Logger interface {
// It is used to get a MethodLogger for each individual method.
var binLogger Logger
var grpclogLogger = grpclog.Component("binarylog")
// SetLogger sets the binary logger.
//
// Only call this at init time.

View File

@ -19,6 +19,7 @@
package binarylog
import (
"context"
"net"
"strings"
"sync/atomic"
@ -26,7 +27,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
@ -48,8 +49,11 @@ func (g *callIDGenerator) reset() {
var idGen callIDGenerator
// MethodLogger is the sub-logger for each method.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
type MethodLogger interface {
Log(LogEntryConfig)
Log(context.Context, LogEntryConfig)
}
// TruncatingMethodLogger is a method logger that truncates headers and messages
@ -64,6 +68,9 @@ type TruncatingMethodLogger struct {
}
// NewTruncatingMethodLogger returns a new truncating method logger.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
return &TruncatingMethodLogger{
headerMaxLen: h,
@ -79,7 +86,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
// Build is an internal only method for building the proto message out of the
// input event. It's made public to enable other library to reuse as much logic
// in TruncatingMethodLogger as possible.
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
m := c.toProto()
timestamp, _ := ptypes.TimestampProto(time.Now())
m.Timestamp = timestamp
@ -87,22 +94,22 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
switch pay := m.Payload.(type) {
case *pb.GrpcLogEntry_ClientHeader:
case *binlogpb.GrpcLogEntry_ClientHeader:
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
case *pb.GrpcLogEntry_ServerHeader:
case *binlogpb.GrpcLogEntry_ServerHeader:
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
case *pb.GrpcLogEntry_Message:
case *binlogpb.GrpcLogEntry_Message:
m.PayloadTruncated = ml.truncateMessage(pay.Message)
}
return m
}
// Log creates a proto binary log entry, and logs it to the sink.
func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) {
func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
ml.sink.Write(ml.Build(c))
}
func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
if ml.headerMaxLen == maxUInt {
return false
}
@ -121,7 +128,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated
// but not counted towards the size limit.
continue
}
currentEntryLen := uint64(len(entry.Value))
currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
if currentEntryLen > bytesLimit {
break
}
@ -132,7 +139,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated
return truncated
}
func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
if ml.messageMaxLen == maxUInt {
return false
}
@ -144,8 +151,11 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated
}
// LogEntryConfig represents the configuration for binary log entry.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
type LogEntryConfig interface {
toProto() *pb.GrpcLogEntry
toProto() *binlogpb.GrpcLogEntry
}
// ClientHeader configs the binary log entry to be a ClientHeader entry.
@ -159,10 +169,10 @@ type ClientHeader struct {
PeerAddr net.Addr
}
func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
// This function doesn't need to set all the fields (e.g. seq ID). The Log
// function will set the fields when necessary.
clientHeader := &pb.ClientHeader{
clientHeader := &binlogpb.ClientHeader{
Metadata: mdToMetadataProto(c.Header),
MethodName: c.MethodName,
Authority: c.Authority,
@ -170,16 +180,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
if c.Timeout > 0 {
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
Payload: &pb.GrpcLogEntry_ClientHeader{
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
Payload: &binlogpb.GrpcLogEntry_ClientHeader{
ClientHeader: clientHeader,
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
if c.PeerAddr != nil {
ret.Peer = addrToProto(c.PeerAddr)
@ -195,19 +205,19 @@ type ServerHeader struct {
PeerAddr net.Addr
}
func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
Payload: &pb.GrpcLogEntry_ServerHeader{
ServerHeader: &pb.ServerHeader{
func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
Payload: &binlogpb.GrpcLogEntry_ServerHeader{
ServerHeader: &binlogpb.ServerHeader{
Metadata: mdToMetadataProto(c.Header),
},
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
if c.PeerAddr != nil {
ret.Peer = addrToProto(c.PeerAddr)
@ -223,7 +233,7 @@ type ClientMessage struct {
Message interface{}
}
func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
var (
data []byte
err error
@ -238,19 +248,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
} else {
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
Payload: &pb.GrpcLogEntry_Message{
Message: &pb.Message{
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
Payload: &binlogpb.GrpcLogEntry_Message{
Message: &binlogpb.Message{
Length: uint32(len(data)),
Data: data,
},
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
@ -263,7 +273,7 @@ type ServerMessage struct {
Message interface{}
}
func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
var (
data []byte
err error
@ -278,19 +288,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
} else {
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
Payload: &pb.GrpcLogEntry_Message{
Message: &pb.Message{
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
Payload: &binlogpb.GrpcLogEntry_Message{
Message: &binlogpb.Message{
Length: uint32(len(data)),
Data: data,
},
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
@ -300,15 +310,15 @@ type ClientHalfClose struct {
OnClientSide bool
}
func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
Payload: nil, // No payload here.
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
@ -324,7 +334,7 @@ type ServerTrailer struct {
PeerAddr net.Addr
}
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
st, ok := status.FromError(c.Err)
if !ok {
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
@ -340,10 +350,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
}
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
Payload: &pb.GrpcLogEntry_Trailer{
Trailer: &pb.Trailer{
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
Payload: &binlogpb.GrpcLogEntry_Trailer{
Trailer: &binlogpb.Trailer{
Metadata: mdToMetadataProto(c.Trailer),
StatusCode: uint32(st.Code()),
StatusMessage: st.Message(),
@ -352,9 +362,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
if c.PeerAddr != nil {
ret.Peer = addrToProto(c.PeerAddr)
@ -367,15 +377,15 @@ type Cancel struct {
OnClientSide bool
}
func (c *Cancel) toProto() *pb.GrpcLogEntry {
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
Payload: nil,
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
@ -392,15 +402,15 @@ func metadataKeyOmit(key string) bool {
return strings.HasPrefix(key, "grpc-")
}
func mdToMetadataProto(md metadata.MD) *pb.Metadata {
ret := &pb.Metadata{}
func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
ret := &binlogpb.Metadata{}
for k, vv := range md {
if metadataKeyOmit(k) {
continue
}
for _, v := range vv {
ret.Entry = append(ret.Entry,
&pb.MetadataEntry{
&binlogpb.MetadataEntry{
Key: k,
Value: []byte(v),
},
@ -410,26 +420,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata {
return ret
}
func addrToProto(addr net.Addr) *pb.Address {
ret := &pb.Address{}
func addrToProto(addr net.Addr) *binlogpb.Address {
ret := &binlogpb.Address{}
switch a := addr.(type) {
case *net.TCPAddr:
if a.IP.To4() != nil {
ret.Type = pb.Address_TYPE_IPV4
ret.Type = binlogpb.Address_TYPE_IPV4
} else if a.IP.To16() != nil {
ret.Type = pb.Address_TYPE_IPV6
ret.Type = binlogpb.Address_TYPE_IPV6
} else {
ret.Type = pb.Address_TYPE_UNKNOWN
ret.Type = binlogpb.Address_TYPE_UNKNOWN
// Do not set address and port fields.
break
}
ret.Address = a.IP.String()
ret.IpPort = uint32(a.Port)
case *net.UnixAddr:
ret.Type = pb.Address_TYPE_UNIX
ret.Type = binlogpb.Address_TYPE_UNIX
ret.Address = a.String()
default:
ret.Type = pb.Address_TYPE_UNKNOWN
ret.Type = binlogpb.Address_TYPE_UNKNOWN
}
return ret
}

View File

@ -26,7 +26,7 @@ import (
"time"
"github.com/golang/protobuf/proto"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
)
var (
@ -42,15 +42,15 @@ type Sink interface {
// Write will be called to write the log entry into the sink.
//
// It should be thread-safe so it can be called in parallel.
Write(*pb.GrpcLogEntry) error
Write(*binlogpb.GrpcLogEntry) error
// Close will be called when the Sink is replaced by a new Sink.
Close() error
}
type noopSink struct{}
func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
func (ns *noopSink) Close() error { return nil }
func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
func (ns *noopSink) Close() error { return nil }
// newWriterSink creates a binary log sink with the given writer.
//
@ -66,7 +66,7 @@ type writerSink struct {
out io.Writer
}
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
b, err := proto.Marshal(e)
if err != nil {
grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
@ -96,7 +96,7 @@ type bufferedSink struct {
done chan struct{}
}
func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
fs.mu.Lock()
defer fs.mu.Unlock()
if !fs.flusherStarted {

View File

@ -35,6 +35,7 @@ import "sync"
// internal/transport/transport.go for an example of this.
type Unbounded struct {
c chan interface{}
closed bool
mu sync.Mutex
backlog []interface{}
}
@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded {
// Put adds t to the unbounded buffer.
func (b *Unbounded) Put(t interface{}) {
b.mu.Lock()
defer b.mu.Unlock()
if b.closed {
return
}
if len(b.backlog) == 0 {
select {
case b.c <- t:
b.mu.Unlock()
return
default:
}
}
b.backlog = append(b.backlog, t)
b.mu.Unlock()
}
// Load sends the earliest buffered data, if any, onto the read channel
@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) {
// value from the read channel.
func (b *Unbounded) Load() {
b.mu.Lock()
defer b.mu.Unlock()
if b.closed {
return
}
if len(b.backlog) > 0 {
select {
case b.c <- b.backlog[0]:
@ -72,7 +79,6 @@ func (b *Unbounded) Load() {
default:
}
}
b.mu.Unlock()
}
// Get returns a read channel on which values added to the buffer, via Put(),
@ -80,6 +86,20 @@ func (b *Unbounded) Load() {
//
// Upon reading a value from this channel, users are expected to call Load() to
// send the next buffered value onto the channel if there is any.
//
// If the unbounded buffer is closed, the read channel returned by this method
// is closed.
func (b *Unbounded) Get() <-chan interface{} {
return b.c
}
// Close closes the unbounded buffer.
func (b *Unbounded) Close() {
b.mu.Lock()
defer b.mu.Unlock()
if b.closed {
return
}
b.closed = true
close(b.c)
}

View File

@ -21,19 +21,46 @@ package envconfig
import (
"os"
"strconv"
"strings"
)
const (
prefix = "GRPC_GO_"
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS"
)
var (
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
// AdvertiseCompressors is set if registered compressor should be advertised
// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false")
AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
// RingHashCap indicates the maximum ring size which defaults to 4096
// entries but may be overridden by setting the environment variable
// "GRPC_RING_HASH_CAP". This does not override the default bounds
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
// PickFirstLBConfig is set if we should support configuration of the
// pick_first LB policy, which can be enabled by setting the environment
// variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true".
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false)
)
func boolFromEnv(envVar string, def bool) bool {
if def {
// The default is true; return true unless the variable is "false".
return !strings.EqualFold(os.Getenv(envVar), "false")
}
// The default is false; return false unless the variable is "true".
return strings.EqualFold(os.Getenv(envVar), "true")
}
func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
if err != nil {
return def
}
if v < min {
return min
}
if v > max {
return max
}
return v
}

View File

@ -28,9 +28,15 @@ const (
var (
// ObservabilityConfig is the json configuration for the gcp/observability
// package specified directly in the envObservabilityConfig env var.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
ObservabilityConfig = os.Getenv(envObservabilityConfig)
// ObservabilityConfigFile is the json configuration for the
// gcp/observability specified in a file with the location specified in
// envObservabilityConfigFile env var.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
)

View File

@ -20,7 +20,6 @@ package envconfig
import (
"os"
"strings"
)
const (
@ -36,16 +35,6 @@ const (
//
// When both bootstrap FileName and FileContent are set, FileName is used.
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION"
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
)
var (
@ -64,38 +53,43 @@ var (
// XDSRingHash indicates whether ring hash support is enabled, which can be
// disabled by setting the environment variable
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
// XDSClientSideSecurity is used to control processing of security
// configuration on the client-side.
//
// Note that there is no env var protection for the server-side because we
// have a brand new API on the server-side and users explicitly need to use
// the new API to get security integration on the server.
XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
// XDSAggregateAndDNS indicates whether processing of aggregated cluster
// and DNS cluster is enabled, which can be enabled by setting the
// environment variable
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
// "true".
XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false")
XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
// XDSAggregateAndDNS indicates whether processing of aggregated cluster and
// DNS cluster is enabled, which can be disabled by setting the environment
// variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
// to "false".
XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
// which can be disabled by setting the environment variable
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
// XDSOutlierDetection indicates whether outlier detection support is
// enabled, which can be disabled by setting the environment variable
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false")
// XDSFederation indicates whether federation support is enabled.
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
// XDSFederation indicates whether federation support is enabled, which can
// be enabled by setting the environment variable
// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true)
// XDSRLS indicates whether processing of Cluster Specifier plugins and
// support for the RLS CLuster Specifier is enabled, which can be enabled by
// support for the RLS CLuster Specifier is enabled, which can be disabled by
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
// "true".
XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
// "false".
XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true)
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
// XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which
// can be disabled by setting the environment variable
// "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false".
XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true)
)

View File

@ -63,6 +63,9 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
// Debugf does info logging at verbose level 2.
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
// rewrite PrefixLogger a little to ensure that we don't use the global
// `Logger` here, and instead use the `logger` field.
if !Logger.V(2) {
return
}
@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
return
}
InfoDepth(1, fmt.Sprintf(format, args...))
}
// V reports whether verbosity level l is at least the requested verbose level.
func (pl *PrefixLogger) V(l int) bool {
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
// rewrite PrefixLogger a little to ensure that we don't use the global
// `Logger` here, and instead use the `logger` field.
return Logger.V(l)
}
// NewPrefixLogger creates a prefix logger with the given prefix.

View File

@ -72,3 +72,17 @@ func Uint64() uint64 {
defer mu.Unlock()
return r.Uint64()
}
// Uint32 implements rand.Uint32 on the grpcrand global source.
func Uint32() uint32 {
mu.Lock()
defer mu.Unlock()
return r.Uint32()
}
// Shuffle implements rand.Shuffle on the grpcrand global source.
var Shuffle = func(n int, f func(int, int)) {
mu.Lock()
defer mu.Unlock()
r.Shuffle(n, f)
}

View File

@ -0,0 +1,119 @@
/*
*
* Copyright 2022 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpcsync
import (
"context"
"sync"
"google.golang.org/grpc/internal/buffer"
)
// CallbackSerializer provides a mechanism to schedule callbacks in a
// synchronized manner. It provides a FIFO guarantee on the order of execution
// of scheduled callbacks. New callbacks can be scheduled by invoking the
// Schedule() method.
//
// This type is safe for concurrent access.
type CallbackSerializer struct {
// Done is closed once the serializer is shut down completely, i.e all
// scheduled callbacks are executed and the serializer has deallocated all
// its resources.
Done chan struct{}
callbacks *buffer.Unbounded
closedMu sync.Mutex
closed bool
}
// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
// context will be passed to the scheduled callbacks. Users should cancel the
// provided context to shutdown the CallbackSerializer. It is guaranteed that no
// callbacks will be added once this context is canceled, and any pending un-run
// callbacks will be executed before the serializer is shut down.
func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
t := &CallbackSerializer{
Done: make(chan struct{}),
callbacks: buffer.NewUnbounded(),
}
go t.run(ctx)
return t
}
// Schedule adds a callback to be scheduled after existing callbacks are run.
//
// Callbacks are expected to honor the context when performing any blocking
// operations, and should return early when the context is canceled.
//
// Return value indicates if the callback was successfully added to the list of
// callbacks to be executed by the serializer. It is not possible to add
// callbacks once the context passed to NewCallbackSerializer is cancelled.
func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
t.closedMu.Lock()
defer t.closedMu.Unlock()
if t.closed {
return false
}
t.callbacks.Put(f)
return true
}
func (t *CallbackSerializer) run(ctx context.Context) {
var backlog []func(context.Context)
defer close(t.Done)
for ctx.Err() == nil {
select {
case <-ctx.Done():
// Do nothing here. Next iteration of the for loop will not happen,
// since ctx.Err() would be non-nil.
case callback, ok := <-t.callbacks.Get():
if !ok {
return
}
t.callbacks.Load()
callback.(func(ctx context.Context))(ctx)
}
}
// Fetch pending callbacks if any, and execute them before returning from
// this method and closing t.Done.
t.closedMu.Lock()
t.closed = true
backlog = t.fetchPendingCallbacks()
t.callbacks.Close()
t.closedMu.Unlock()
for _, b := range backlog {
b(ctx)
}
}
func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
var backlog []func(context.Context)
for {
select {
case b := <-t.callbacks.Get():
backlog = append(backlog, b.(func(context.Context)))
t.callbacks.Load()
default:
return backlog
}
}
}

View File

@ -58,6 +58,12 @@ var (
// gRPC server. An xDS-enabled server needs to know what type of credentials
// is configured on the underlying gRPC server. This is set by server.go.
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
// CanonicalString returns the canonical string of the code defined here:
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
CanonicalString interface{} // func (codes.Code) string
// DrainServerTransports initiates a graceful close of existing connections
// on a gRPC server accepted on the provided listener address. An
// xDS-enabled server invokes this method on a grpc.Server when a particular
@ -66,26 +72,54 @@ var (
// AddGlobalServerOptions adds an array of ServerOption that will be
// effective globally for newly created servers. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
AddGlobalServerOptions interface{} // func(opt ...ServerOption)
// ClearGlobalServerOptions clears the array of extra ServerOption. This
// method is useful in testing and benchmarking.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
ClearGlobalServerOptions func()
// AddGlobalDialOptions adds an array of DialOption that will be effective
// globally for newly created client channels. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
AddGlobalDialOptions interface{} // func(opt ...DialOption)
// DisableGlobalDialOptions returns a DialOption that prevents the
// ClientConn from applying the global DialOptions (set via
// AddGlobalDialOptions).
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
DisableGlobalDialOptions interface{} // func() grpc.DialOption
// ClearGlobalDialOptions clears the array of extra DialOption. This
// method is useful in testing and benchmarking.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
ClearGlobalDialOptions func()
// JoinDialOptions combines the dial options passed as arguments into a
// single dial option.
JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption
// JoinServerOptions combines the server options passed as arguments into a
// single server option.
JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption
// WithBinaryLogger returns a DialOption that specifies the binary logger
// for a ClientConn.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption
// BinaryLogger returns a ServerOption that can set the binary logger for a
// server.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
@ -127,6 +161,9 @@ var (
//
// TODO: Remove this function once the RBAC env var is removed.
UnregisterRBACHTTPFilterForTesting func()
// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions)
)
// HealthChecker defines the signature of the client-side LB channel health checking function.

View File

@ -76,33 +76,11 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address {
return addr
}
// Validate returns an error if the input md contains invalid keys or values.
//
// If the header is not a pseudo-header, the following items are checked:
// - header names must contain one or more characters from this set [0-9 a-z _ - .].
// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed.
// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E].
// Validate validates every pair in md with ValidatePair.
func Validate(md metadata.MD) error {
for k, vals := range md {
// pseudo-header will be ignored
if k[0] == ':' {
continue
}
// check key, for i that saving a conversion if not using for range
for i := 0; i < len(k); i++ {
r := k[i]
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k)
}
}
if strings.HasSuffix(k, "-bin") {
continue
}
// check value
for _, val := range vals {
if hasNotPrintable(val) {
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k)
}
if err := ValidatePair(k, vals...); err != nil {
return err
}
}
return nil
@ -118,3 +96,37 @@ func hasNotPrintable(msg string) bool {
}
return false
}
// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) :
//
// - key must contain one or more characters.
// - the characters in the key must be contained in [0-9 a-z _ - .].
// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed.
// - the characters in the every value must be printable (in [%x20-%x7E]).
func ValidatePair(key string, vals ...string) error {
// key should not be empty
if key == "" {
return fmt.Errorf("there is an empty key in the header")
}
// pseudo-header will be ignored
if key[0] == ':' {
return nil
}
// check key, for i that saving a conversion if not using for range
for i := 0; i < len(key); i++ {
r := key[i]
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
}
}
if strings.HasSuffix(key, "-bin") {
return nil
}
// check value
for _, val := range vals {
if hasNotPrintable(val) {
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key)
}
}
return nil
}

View File

@ -116,7 +116,7 @@ type dnsBuilder struct{}
// Build creates and starts a DNS resolver that watches the name resolution of the target.
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
host, port, err := parseTarget(target.Endpoint, defaultPort)
host, port, err := parseTarget(target.Endpoint(), defaultPort)
if err != nil {
return nil, err
}
@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
disableServiceConfig: opts.DisableServiceConfig,
}
if target.Authority == "" {
if target.URL.Host == "" {
d.resolver = defaultResolver
} else {
d.resolver, err = customAuthorityResolver(target.Authority)
d.resolver, err = customAuthorityResolver(target.URL.Host)
if err != nil {
return nil, err
}

View File

@ -20,13 +20,20 @@
// name without scheme back to gRPC as resolved address.
package passthrough
import "google.golang.org/grpc/resolver"
import (
"errors"
"google.golang.org/grpc/resolver"
)
const scheme = "passthrough"
type passthroughBuilder struct{}
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
if target.Endpoint() == "" && opts.Dialer == nil {
return nil, errors.New("passthrough: received empty target in Build()")
}
r := &passthroughResolver{
target: target,
cc: cc,
@ -45,7 +52,7 @@ type passthroughResolver struct {
}
func (r *passthroughResolver) start() {
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
}
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}

View File

@ -34,8 +34,8 @@ type builder struct {
}
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
if target.Authority != "" {
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority)
if target.URL.Host != "" {
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
}
// gRPC was parsing the dial target manually before PR #4817, and we

View File

@ -0,0 +1,130 @@
/*
*
* Copyright 2023 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package serviceconfig
import (
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"time"
)
// Duration defines JSON marshal and unmarshal methods to conform to the
// protobuf JSON spec defined [here].
//
// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration
type Duration time.Duration
func (d Duration) String() string {
return fmt.Sprint(time.Duration(d))
}
// MarshalJSON converts from d to a JSON string output.
func (d Duration) MarshalJSON() ([]byte, error) {
ns := time.Duration(d).Nanoseconds()
sec := ns / int64(time.Second)
ns = ns % int64(time.Second)
var sign string
if sec < 0 || ns < 0 {
sign, sec, ns = "-", -1*sec, -1*ns
}
// Generated output always contains 0, 3, 6, or 9 fractional digits,
// depending on required precision.
str := fmt.Sprintf("%s%d.%09d", sign, sec, ns)
str = strings.TrimSuffix(str, "000")
str = strings.TrimSuffix(str, "000")
str = strings.TrimSuffix(str, ".000")
return []byte(fmt.Sprintf("\"%ss\"", str)), nil
}
// UnmarshalJSON unmarshals b as a duration JSON string into d.
func (d *Duration) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
if !strings.HasSuffix(s, "s") {
return fmt.Errorf("malformed duration %q: missing seconds unit", s)
}
neg := false
if s[0] == '-' {
neg = true
s = s[1:]
}
ss := strings.SplitN(s[:len(s)-1], ".", 3)
if len(ss) > 2 {
return fmt.Errorf("malformed duration %q: too many decimals", s)
}
// hasDigits is set if either the whole or fractional part of the number is
// present, since both are optional but one is required.
hasDigits := false
var sec, ns int64
if len(ss[0]) > 0 {
var err error
if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil {
return fmt.Errorf("malformed duration %q: %v", s, err)
}
// Maximum seconds value per the durationpb spec.
const maxProtoSeconds = 315_576_000_000
if sec > maxProtoSeconds {
return fmt.Errorf("out of range: %q", s)
}
hasDigits = true
}
if len(ss) == 2 && len(ss[1]) > 0 {
if len(ss[1]) > 9 {
return fmt.Errorf("malformed duration %q: too many digits after decimal", s)
}
var err error
if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil {
return fmt.Errorf("malformed duration %q: %v", s, err)
}
for i := 9; i > len(ss[1]); i-- {
ns *= 10
}
hasDigits = true
}
if !hasDigits {
return fmt.Errorf("malformed duration %q: contains no numbers", s)
}
if neg {
sec *= -1
ns *= -1
}
// Maximum/minimum seconds/nanoseconds representable by Go's time.Duration.
const maxSeconds = math.MaxInt64 / int64(time.Second)
const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second)
const minSeconds = math.MinInt64 / int64(time.Second)
const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second)
if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) {
*d = Duration(math.MaxInt64)
} else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) {
*d = Duration(math.MinInt64)
} else {
*d = Duration(sec*int64(time.Second) + ns)
}
return nil
}

View File

@ -22,6 +22,7 @@ import (
"bytes"
"errors"
"fmt"
"net"
"runtime"
"strconv"
"sync"
@ -29,6 +30,7 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/status"
)
@ -191,7 +193,7 @@ type goAway struct {
code http2.ErrCode
debugData []byte
headsUp bool
closeConn bool
closeConn error // if set, loopyWriter will exit, resulting in conn closure
}
func (*goAway) isTransportResponseFrame() bool { return false }
@ -209,6 +211,14 @@ type outFlowControlSizeRequest struct {
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
// closeConnection is an instruction to tell the loopy writer to flush the
// framer and exit, which will cause the transport's connection to be closed
// (by the client or server). The transport itself will close after the reader
// encounters the EOF caused by the connection closure.
type closeConnection struct{}
func (closeConnection) isTransportResponseFrame() bool { return false }
type outStreamState int
const (
@ -408,7 +418,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
select {
case <-c.ch:
case <-c.done:
return nil, ErrConnClosing
return nil, errors.New("transport closed by client")
}
}
}
@ -478,12 +488,14 @@ type loopyWriter struct {
hEnc *hpack.Encoder // HPACK encoder.
bdpEst *bdpEstimator
draining bool
conn net.Conn
logger *grpclog.PrefixLogger
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
}
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter {
var buf bytes.Buffer
l := &loopyWriter{
side: s,
@ -496,6 +508,8 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
bdpEst: bdpEst,
conn: conn,
logger: logger,
}
return l
}
@ -513,23 +527,26 @@ const minBatchSize = 1000
// 2. Stream level flow control quota available.
//
// In each iteration of run loop, other than processing the incoming control
// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
// This results in writing of HTTP2 frames into an underlying write buffer.
// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
// if the batch size is too low to give stream goroutines a chance to fill it up.
// frame, loopy calls processData, which processes one node from the
// activeStreams linked-list. This results in writing of HTTP2 frames into an
// underlying write buffer. When there's no more control frames to read from
// controlBuf, loopy flushes the write buffer. As an optimization, to increase
// the batch size for each flush, loopy yields the processor, once if the batch
// size is too low to give stream goroutines a chance to fill it up.
//
// Upon exiting, if the error causing the exit is not an I/O error, run()
// flushes and closes the underlying connection. Otherwise, the connection is
// left open to allow the I/O error to be encountered by the reader instead.
func (l *loopyWriter) run() (err error) {
defer func() {
if err == ErrConnClosing {
// Don't log ErrConnClosing as error since it happens
// 1. When the connection is closed by some other known issue.
// 2. User closed the connection.
// 3. A graceful close of connection.
if logger.V(logLevel) {
logger.Infof("transport: loopyWriter.run returning. %v", err)
}
err = nil
if l.logger.V(logLevel) {
l.logger.Infof("loopyWriter exiting with error: %v", err)
}
if !isIOError(err) {
l.framer.writer.Flush()
l.conn.Close()
}
l.cbuf.finish()
}()
for {
it, err := l.cbuf.get(true)
@ -574,7 +591,6 @@ func (l *loopyWriter) run() (err error) {
}
l.framer.writer.Flush()
break hasdata
}
}
}
@ -583,11 +599,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
}
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) {
// Otherwise update the quota.
if w.streamID == 0 {
l.sendQuota += w.increment
return nil
return
}
// Find the stream and update it.
if str, ok := l.estdStreams[w.streamID]; ok {
@ -595,10 +611,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
str.state = active
l.activeStreams.enqueue(str)
return nil
return
}
}
return nil
}
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
@ -606,13 +621,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
}
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
if err := l.applySettings(s.ss); err != nil {
return err
}
l.applySettings(s.ss)
return l.framer.fr.WriteSettingsAck()
}
func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
func (l *loopyWriter) registerStreamHandler(h *registerStream) {
str := &outStream{
id: h.streamID,
state: empty,
@ -620,15 +633,14 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
wq: h.wq,
}
l.estdStreams[h.streamID] = str
return nil
}
func (l *loopyWriter) headerHandler(h *headerFrame) error {
if l.side == serverSide {
str, ok := l.estdStreams[h.streamID]
if !ok {
if logger.V(logLevel) {
logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
if l.logger.V(logLevel) {
l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID)
}
return nil
}
@ -655,19 +667,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
itl: &itemList{},
wq: h.wq,
}
str.itl.enqueue(h)
return l.originateStream(str)
return l.originateStream(str, h)
}
func (l *loopyWriter) originateStream(str *outStream) error {
hdr := str.itl.dequeue().(*headerFrame)
if err := hdr.initStream(str.id); err != nil {
if err == ErrConnClosing {
return err
}
// Other errors(errStreamDrain) need not close transport.
func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
// l.draining is set when handling GoAway. In which case, we want to avoid
// creating new streams.
if l.draining {
// TODO: provide a better error with the reason we are in draining.
hdr.onOrphaned(errStreamDrain)
return nil
}
if err := hdr.initStream(str.id); err != nil {
return err
}
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
return err
}
@ -682,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
l.hBuf.Reset()
for _, f := range hf {
if err := l.hEnc.WriteField(f); err != nil {
if logger.V(logLevel) {
logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
if l.logger.V(logLevel) {
l.logger.Warningf("Encountered error while encoding headers: %v", err)
}
}
}
@ -721,10 +734,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
return nil
}
func (l *loopyWriter) preprocessData(df *dataFrame) error {
func (l *loopyWriter) preprocessData(df *dataFrame) {
str, ok := l.estdStreams[df.streamID]
if !ok {
return nil
return
}
// If we got data for a stream it means that
// stream was originated and the headers were sent out.
@ -733,7 +746,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error {
str.state = active
l.activeStreams.enqueue(str)
}
return nil
}
func (l *loopyWriter) pingHandler(p *ping) error {
@ -744,9 +756,8 @@ func (l *loopyWriter) pingHandler(p *ping) error {
}
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) {
o.resp <- l.sendQuota
return nil
}
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
@ -763,8 +774,9 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
return err
}
}
if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
return ErrConnClosing
if l.draining && len(l.estdStreams) == 0 {
// Flush and close the connection; we are done with it.
return errors.New("finished processing active streams while in draining mode")
}
return nil
}
@ -799,7 +811,8 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
if l.side == clientSide {
l.draining = true
if len(l.estdStreams) == 0 {
return ErrConnClosing
// Flush and close the connection; we are done with it.
return errors.New("received GOAWAY with no active streams")
}
}
return nil
@ -820,7 +833,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error {
func (l *loopyWriter) handle(i interface{}) error {
switch i := i.(type) {
case *incomingWindowUpdate:
return l.incomingWindowUpdateHandler(i)
l.incomingWindowUpdateHandler(i)
case *outgoingWindowUpdate:
return l.outgoingWindowUpdateHandler(i)
case *incomingSettings:
@ -830,7 +843,7 @@ func (l *loopyWriter) handle(i interface{}) error {
case *headerFrame:
return l.headerHandler(i)
case *registerStream:
return l.registerStreamHandler(i)
l.registerStreamHandler(i)
case *cleanupStream:
return l.cleanupStreamHandler(i)
case *earlyAbortStream:
@ -838,19 +851,24 @@ func (l *loopyWriter) handle(i interface{}) error {
case *incomingGoAway:
return l.incomingGoAwayHandler(i)
case *dataFrame:
return l.preprocessData(i)
l.preprocessData(i)
case *ping:
return l.pingHandler(i)
case *goAway:
return l.goAwayHandler(i)
case *outFlowControlSizeRequest:
return l.outFlowControlSizeRequestHandler(i)
l.outFlowControlSizeRequestHandler(i)
case closeConnection:
// Just return a non-I/O error and run() will flush and close the
// connection.
return ErrConnClosing
default:
return fmt.Errorf("transport: unknown control message type %T", i)
}
return nil
}
func (l *loopyWriter) applySettings(ss []http2.Setting) error {
func (l *loopyWriter) applySettings(ss []http2.Setting) {
for _, s := range ss {
switch s.ID {
case http2.SettingInitialWindowSize:
@ -869,7 +887,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error {
updateHeaderTblSize(l.hEnc, s.Val)
}
}
return nil
}
// processData removes the first stream from active streams, writes out at most 16KB
@ -903,7 +920,7 @@ func (l *loopyWriter) processData() (bool, error) {
return false, err
}
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
return false, nil
return false, err
}
} else {
l.activeStreams.enqueue(str)

View File

@ -47,3 +47,9 @@ const (
defaultClientMaxHeaderListSize = uint32(16 << 20)
defaultServerMaxHeaderListSize = uint32(16 << 20)
)
// MaxStreamID is the upper bound for the stream ID before the current
// transport gracefully closes and new transport is created for subsequent RPCs.
// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
// integer. It's exported so that tests can override it.
var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)

View File

@ -39,6 +39,7 @@ import (
"golang.org/x/net/http2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
@ -46,24 +47,32 @@ import (
"google.golang.org/grpc/status"
)
// NewServerHandlerTransport returns a ServerTransport handling gRPC
// from inside an http.Handler. It requires that the http Server
// supports HTTP/2.
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
// inside an http.Handler, or writes an HTTP error to w and returns an error.
// It requires that the http Server supports HTTP/2.
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
if r.ProtoMajor != 2 {
return nil, errors.New("gRPC requires HTTP/2")
msg := "gRPC requires HTTP/2"
http.Error(w, msg, http.StatusBadRequest)
return nil, errors.New(msg)
}
if r.Method != "POST" {
return nil, errors.New("invalid gRPC request method")
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
http.Error(w, msg, http.StatusBadRequest)
return nil, errors.New(msg)
}
contentType := r.Header.Get("Content-Type")
// TODO: do we assume contentType is lowercase? we did before
contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
if !validContentType {
return nil, errors.New("invalid gRPC request content-type")
msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
http.Error(w, msg, http.StatusUnsupportedMediaType)
return nil, errors.New(msg)
}
if _, ok := w.(http.Flusher); !ok {
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
msg := "gRPC requires a ResponseWriter supporting http.Flusher"
http.Error(w, msg, http.StatusInternalServerError)
return nil, errors.New(msg)
}
st := &serverHandlerTransport{
@ -75,11 +84,14 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
contentSubtype: contentSubtype,
stats: stats,
}
st.logger = prefixLoggerForServerHandlerTransport(st)
if v := r.Header.Get("grpc-timeout"); v != "" {
to, err := decodeTimeout(v)
if err != nil {
return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
http.Error(w, msg, http.StatusBadRequest)
return nil, status.Error(codes.Internal, msg)
}
st.timeoutSet = true
st.timeout = to
@ -97,7 +109,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
for _, v := range vv {
v, err := decodeMetadataHeader(k, v)
if err != nil {
return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
http.Error(w, msg, http.StatusBadRequest)
return nil, status.Error(codes.Internal, msg)
}
metakv = append(metakv, k, v)
}
@ -138,15 +152,19 @@ type serverHandlerTransport struct {
// TODO make sure this is consistent across handler_server and http2_server
contentSubtype string
stats []stats.Handler
stats []stats.Handler
logger *grpclog.PrefixLogger
}
func (ht *serverHandlerTransport) Close() {
ht.closeOnce.Do(ht.closeCloseChanOnce)
func (ht *serverHandlerTransport) Close(err error) {
ht.closeOnce.Do(func() {
if ht.logger.V(logLevel) {
ht.logger.Infof("Closing: %v", err)
}
close(ht.closedCh)
})
}
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
@ -236,7 +254,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
})
}
}
ht.Close()
ht.Close(errors.New("finished writing status"))
return err
}
@ -346,7 +364,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
case <-ht.req.Context().Done():
}
cancel()
ht.Close()
ht.Close(errors.New("request is done processing"))
}()
req := ht.req
@ -435,7 +453,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {}
func (ht *serverHandlerTransport) IncrMsgRecv() {}
func (ht *serverHandlerTransport) Drain() {
func (ht *serverHandlerTransport) Drain(debugData string) {
panic("Drain() is not implemented")
}

View File

@ -38,6 +38,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
icredentials "google.golang.org/grpc/internal/credentials"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
@ -59,11 +60,15 @@ var clientConnectionCounter uint64
// http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct {
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
ctx context.Context
cancel context.CancelFunc
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
userAgent string
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
ctx context.Context
cancel context.CancelFunc
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
userAgent string
// address contains the resolver returned address for this transport.
// If the `ServerName` field is set, it takes precedence over `CallHdr.Host`
// passed to `NewStream`, when determining the :authority header.
address resolver.Address
md metadata.MD
conn net.Conn // underlying communication channel
loopy *loopyWriter
@ -136,12 +141,12 @@ type http2Client struct {
channelzID *channelz.Identifier
czData *channelzData
onGoAway func(GoAwayReason)
onClose func()
onClose func(GoAwayReason)
bufferPool *bufferPool
connectionID uint64
logger *grpclog.PrefixLogger
}
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
@ -193,7 +198,7 @@ func isTemporary(err error) bool {
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
scheme := "http"
ctx, cancel := context.WithCancel(ctx)
defer func() {
@ -213,7 +218,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
if opts.FailOnNonTempDialError {
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
}
return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
}
// Any further errors will close the underlying connection
@ -238,8 +243,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
go func(conn net.Conn) {
defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
<-newClientCtx.Done() // Block until connectCtx expires or the defer above executes.
if connectCtx.Err() != nil {
if err := connectCtx.Err(); err != nil {
// connectCtx expired before exiting the function. Hard close the connection.
if logger.V(logLevel) {
logger.Infof("Aborting due to connect deadline expiring: %v", err)
}
conn.Close()
}
}(conn)
@ -314,6 +322,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
cancel: cancel,
userAgent: opts.UserAgent,
registeredCompressors: grpcutil.RegisteredCompressors(),
address: addr,
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
@ -335,11 +344,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
streamQuota: defaultMaxStreamsClient,
streamsQuotaAvailable: make(chan struct{}, 1),
czData: new(channelzData),
onGoAway: onGoAway,
keepaliveEnabled: keepaliveEnabled,
bufferPool: newBufferPool(),
onClose: onClose,
}
t.logger = prefixLoggerForClientTransport(t)
// Add peer information to the http2client context.
t.ctx = peer.NewContext(t.ctx, t.getPeer())
@ -438,17 +447,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
return nil, err
}
go func() {
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
err := t.loopy.run()
if err != nil {
if logger.V(logLevel) {
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
}
}
// Do not close the transport. Let reader goroutine handle it since
// there might be data in the buffers.
t.conn.Close()
t.controlBuf.finish()
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
t.loopy.run()
close(t.writerDone)
}()
return t, nil
@ -702,6 +702,18 @@ func (e NewStreamError) Error() string {
// streams. All non-nil errors returned will be *NewStreamError.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
ctx = peer.NewContext(ctx, t.getPeer())
// ServerName field of the resolver returned address takes precedence over
// Host field of CallHdr to determine the :authority header. This is because,
// the ServerName field takes precedence for server authentication during
// TLS handshake, and the :authority header should match the value used
// for server authentication.
if t.address.ServerName != "" {
newCallHdr := *callHdr
newCallHdr.Host = t.address.ServerName
callHdr = &newCallHdr
}
headerFields, err := t.createHeaderFields(ctx, callHdr)
if err != nil {
return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
@ -726,15 +738,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
endStream: false,
initStream: func(id uint32) error {
t.mu.Lock()
if state := t.state; state != reachable {
// TODO: handle transport closure in loopy instead and remove this
// initStream is never called when transport is draining.
if t.state == closing {
t.mu.Unlock()
// Do a quick cleanup.
err := error(errStreamDrain)
if state == closing {
err = ErrConnClosing
}
cleanup(err)
return err
cleanup(ErrConnClosing)
return ErrConnClosing
}
if channelz.IsOn() {
atomic.AddInt64(&t.czData.streamsStarted, 1)
@ -752,6 +761,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
}
firstTry := true
var ch chan struct{}
transportDrainRequired := false
checkForStreamQuota := func(it interface{}) bool {
if t.streamQuota <= 0 { // Can go negative if server decreases it.
if firstTry {
@ -767,10 +777,15 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
h := it.(*headerFrame)
h.streamID = t.nextID
t.nextID += 2
// Drain client transport if nextID > MaxStreamID which signals gRPC that
// the connection is closed and a new one must be created for subsequent RPCs.
transportDrainRequired = t.nextID > MaxStreamID
s.id = h.streamID
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
t.mu.Lock()
if t.activeStreams == nil { // Can be niled from Close().
if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
t.mu.Unlock()
return false // Don't create a stream if the transport is already closed.
}
@ -846,6 +861,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
sh.HandleRPC(s.ctx, outHeader)
}
}
if transportDrainRequired {
if t.logger.V(logLevel) {
t.logger.Infof("Draining transport: t.nextID > MaxStreamID")
}
t.GracefulClose()
}
return s, nil
}
@ -934,9 +955,14 @@ func (t *http2Client) Close(err error) {
t.mu.Unlock()
return
}
if t.logger.V(logLevel) {
t.logger.Infof("Closing: %v", err)
}
// Call t.onClose ASAP to prevent the client from attempting to create new
// streams.
t.onClose()
if t.state != draining {
t.onClose(GoAwayInvalid)
}
t.state = closing
streams := t.activeStreams
t.activeStreams = nil
@ -986,11 +1012,15 @@ func (t *http2Client) GracefulClose() {
t.mu.Unlock()
return
}
if t.logger.V(logLevel) {
t.logger.Infof("GracefulClose called")
}
t.onClose(GoAwayInvalid)
t.state = draining
active := len(t.activeStreams)
t.mu.Unlock()
if active == 0 {
t.Close(ErrConnClosing)
t.Close(connectionErrorf(true, nil, "no active streams left to process while draining"))
return
}
t.controlBuf.put(&incomingGoAway{})
@ -1147,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
}
statusCode, ok := http2ErrConvTab[f.ErrCode]
if !ok {
if logger.V(logLevel) {
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
if t.logger.V(logLevel) {
t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode)
}
statusCode = codes.Unknown
}
@ -1230,10 +1260,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.mu.Unlock()
return
}
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
if logger.V(logLevel) {
logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
}
if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
// data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is
// enabled by default and double the configure KEEPALIVE_TIME used for new connections
// on that channel.
logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".")
}
id := f.LastStreamID
if id > 0 && id%2 == 0 {
@ -1266,8 +1298,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
// Notify the clientconn about the GOAWAY before we set the state to
// draining, to allow the client to stop attempting to create streams
// before disallowing new streams on this connection.
t.onGoAway(t.goAwayReason)
t.state = draining
if t.state != draining {
t.onClose(t.goAwayReason)
t.state = draining
}
}
// All streams with IDs greater than the GoAwayId
// and smaller than the previous GoAway ID should be killed.
@ -1303,7 +1337,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
// setGoAwayReason sets the value of t.goAwayReason based
// on the GoAway frame received.
// It expects a lock on transport's mutext to be held by
// It expects a lock on transport's mutex to be held by
// the caller.
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
t.goAwayReason = GoAwayNoReason
@ -1756,3 +1790,9 @@ func (t *http2Client) getOutFlowWindow() int64 {
return -2
}
}
func (t *http2Client) stateForTesting() transportState {
t.mu.Lock()
defer t.mu.Unlock()
return t.state
}

View File

@ -21,6 +21,7 @@ package transport
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math"
@ -34,13 +35,16 @@ import (
"github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
@ -101,13 +105,13 @@ type http2Server struct {
mu sync.Mutex // guard the following
// drainChan is initialized when Drain() is called the first time.
// After which the server writes out the first GoAway(with ID 2^31-1) frame.
// Then an independent goroutine will be launched to later send the second GoAway.
// During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
// Thus call to Drain() will be a no-op if drainChan is already initialized since draining is
// already underway.
drainChan chan struct{}
// drainEvent is initialized when Drain() is called the first time. After
// which the server writes out the first GoAway(with ID 2^31-1) frame. Then
// an independent goroutine will be launched to later send the second
// GoAway. During this time we don't want to write another first GoAway(with
// ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is
// already initialized since draining is already underway.
drainEvent *grpcsync.Event
state transportState
activeStreams map[uint32]*Stream
// idle is the time instant when the connection went idle.
@ -127,6 +131,8 @@ type http2Server struct {
// This lock may not be taken if mu is already held.
maxStreamMu sync.Mutex
maxStreamID uint32 // max stream ID ever seen
logger *grpclog.PrefixLogger
}
// NewServerTransport creates a http2 transport with conn and configuration
@ -265,6 +271,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
czData: new(channelzData),
bufferPool: newBufferPool(),
}
t.logger = prefixLoggerForServerTransport(t)
// Add peer information to the http2server context.
t.ctx = peer.NewContext(t.ctx, t.getPeer())
@ -293,7 +300,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
defer func() {
if err != nil {
t.Close()
t.Close(err)
}
}()
@ -329,23 +336,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
t.handleSettings(sf)
go func() {
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
if err := t.loopy.run(); err != nil {
if logger.V(logLevel) {
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
}
}
t.conn.Close()
t.controlBuf.finish()
t.loopy.run()
close(t.writerDone)
}()
go t.keepalive()
return t, nil
}
// operateHeader takes action on the decoded headers.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
// operateHeaders takes action on the decoded headers. Returns an error if fatal
// error encountered and transport needs to close, otherwise returns nil.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error {
// Acquire max stream ID lock for entire duration
t.maxStreamMu.Lock()
defer t.maxStreamMu.Unlock()
@ -361,15 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
rstCode: http2.ErrCodeFrameSize,
onWrite: func() {},
})
return false
return nil
}
if streamID%2 != 1 || streamID <= t.maxStreamID {
// illegal gRPC stream id.
if logger.V(logLevel) {
logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
}
return true
return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame)
}
t.maxStreamID = streamID
@ -381,13 +380,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
fc: &inFlow{limit: uint32(t.initialWindowSize)},
}
var (
// If a gRPC Response-Headers has already been received, then it means
// that the peer is speaking gRPC and we are in gRPC mode.
isGRPC = false
mdata = make(map[string][]string)
httpMethod string
// headerError is set if an error is encountered while parsing the headers
headerError bool
// if false, content-type was missing or invalid
isGRPC = false
contentType = ""
mdata = make(metadata.MD, len(frame.Fields))
httpMethod string
// these are set if an error is encountered while parsing the headers
protocolError bool
headerError *status.Status
timeoutSet bool
timeout time.Duration
@ -398,11 +398,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
case "content-type":
contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
if !validContentType {
contentType = hf.Value
break
}
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
s.contentSubtype = contentSubtype
isGRPC = true
case "grpc-accept-encoding":
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
if hf.Value == "" {
continue
}
compressors := hf.Value
if s.clientAdvertisedCompressors != "" {
compressors = s.clientAdvertisedCompressors + "," + compressors
}
s.clientAdvertisedCompressors = compressors
case "grpc-encoding":
s.recvCompress = hf.Value
case ":method":
@ -413,23 +425,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
timeoutSet = true
var err error
if timeout, err = decodeTimeout(hf.Value); err != nil {
headerError = true
headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
}
// "Transports must consider requests containing the Connection header
// as malformed." - A41
case "connection":
if logger.V(logLevel) {
logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
if t.logger.V(logLevel) {
t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec")
}
headerError = true
protocolError = true
default:
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
break
}
v, err := decodeMetadataHeader(hf.Name, hf.Value)
if err != nil {
headerError = true
logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
break
}
mdata[hf.Name] = append(mdata[hf.Name], v)
@ -443,27 +455,47 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
// error, this takes precedence over a client not speaking gRPC.
if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
if logger.V(logLevel) {
logger.Errorf("transport: %v", errMsg)
if t.logger.V(logLevel) {
t.logger.Infof("Aborting the stream early: %v", errMsg)
}
t.controlBuf.put(&earlyAbortStream{
httpStatus: 400,
httpStatus: http.StatusBadRequest,
streamID: streamID,
contentSubtype: s.contentSubtype,
status: status.New(codes.Internal, errMsg),
rst: !frame.StreamEnded(),
})
return false
return nil
}
if !isGRPC || headerError {
if protocolError {
t.controlBuf.put(&cleanupStream{
streamID: streamID,
rst: true,
rstCode: http2.ErrCodeProtocol,
onWrite: func() {},
})
return false
return nil
}
if !isGRPC {
t.controlBuf.put(&earlyAbortStream{
httpStatus: http.StatusUnsupportedMediaType,
streamID: streamID,
contentSubtype: s.contentSubtype,
status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
rst: !frame.StreamEnded(),
})
return nil
}
if headerError != nil {
t.controlBuf.put(&earlyAbortStream{
httpStatus: http.StatusBadRequest,
streamID: streamID,
contentSubtype: s.contentSubtype,
status: headerError,
rst: !frame.StreamEnded(),
})
return nil
}
// "If :authority is missing, Host must be renamed to :authority." - A41
@ -503,7 +535,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
if t.state != reachable {
t.mu.Unlock()
s.cancel()
return false
return nil
}
if uint32(len(t.activeStreams)) >= t.maxStreams {
t.mu.Unlock()
@ -514,13 +546,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
onWrite: func() {},
})
s.cancel()
return false
return nil
}
if httpMethod != http.MethodPost {
t.mu.Unlock()
errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
if logger.V(logLevel) {
logger.Infof("transport: %v", errMsg)
errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod)
if t.logger.V(logLevel) {
t.logger.Infof("Aborting the stream early: %v", errMsg)
}
t.controlBuf.put(&earlyAbortStream{
httpStatus: 405,
@ -530,14 +562,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
rst: !frame.StreamEnded(),
})
s.cancel()
return false
return nil
}
if t.inTapHandle != nil {
var err error
if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
t.mu.Unlock()
if logger.V(logLevel) {
logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
if t.logger.V(logLevel) {
t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
}
stat, ok := status.FromError(err)
if !ok {
@ -550,7 +582,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
status: stat,
rst: !frame.StreamEnded(),
})
return false
return nil
}
}
t.activeStreams[streamID] = s
@ -574,7 +606,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
LocalAddr: t.localAddr,
Compression: s.recvCompress,
WireLength: int(frame.Header().Length),
Header: metadata.MD(mdata).Copy(),
Header: mdata.Copy(),
}
sh.HandleRPC(s.ctx, inHeader)
}
@ -597,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
wq: s.wq,
})
handle(s)
return false
return nil
}
// HandleStreams receives incoming streams using the given handler. This is
@ -611,8 +643,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
if err != nil {
if se, ok := err.(http2.StreamError); ok {
if logger.V(logLevel) {
logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
if t.logger.V(logLevel) {
t.logger.Warningf("Encountered http2.StreamError: %v", se)
}
t.mu.Lock()
s := t.activeStreams[se.StreamID]
@ -630,19 +662,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
continue
}
if err == io.EOF || err == io.ErrUnexpectedEOF {
t.Close()
t.Close(err)
return
}
if logger.V(logLevel) {
logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
}
t.Close()
t.Close(err)
return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
if t.operateHeaders(frame, handle, traceCtx) {
t.Close()
if err := t.operateHeaders(frame, handle, traceCtx); err != nil {
t.Close(err)
break
}
case *http2.DataFrame:
@ -658,8 +687,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
case *http2.GoAwayFrame:
// TODO: Handle GoAway from the client appropriately.
default:
if logger.V(logLevel) {
logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
if t.logger.V(logLevel) {
t.logger.Infof("Received unsupported frame type %T", frame)
}
}
}
@ -843,8 +872,8 @@ const (
func (t *http2Server) handlePing(f *http2.PingFrame) {
if f.IsAck() {
if f.Data == goAwayPing.data && t.drainChan != nil {
close(t.drainChan)
if f.Data == goAwayPing.data && t.drainEvent != nil {
t.drainEvent.Fire()
return
}
// Maybe it's a BDP ping.
@ -886,10 +915,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
if t.pingStrikes > maxPingStrikes {
// Send goaway and close the connection.
if logger.V(logLevel) {
logger.Errorf("transport: Got too many pings from the client, closing the connection.")
}
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")})
}
}
@ -921,8 +947,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
var sz int64
for _, f := range hdrFrame.hf {
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
if logger.V(logLevel) {
logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
if t.logger.V(logLevel) {
t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
}
return false
}
@ -1035,7 +1061,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
} else {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
}
@ -1140,20 +1166,20 @@ func (t *http2Server) keepalive() {
if val <= 0 {
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
// Gracefully close the connection.
t.Drain()
t.Drain("max_idle")
return
}
idleTimer.Reset(val)
case <-ageTimer.C:
t.Drain()
t.Drain("max_age")
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
select {
case <-ageTimer.C:
// Close the connection after grace period.
if logger.V(logLevel) {
logger.Infof("transport: closing server transport due to maximum connection age.")
if t.logger.V(logLevel) {
t.logger.Infof("Closing server transport due to maximum connection age")
}
t.Close()
t.controlBuf.put(closeConnection{})
case <-t.done:
}
return
@ -1169,10 +1195,7 @@ func (t *http2Server) keepalive() {
continue
}
if outstandingPing && kpTimeoutLeft <= 0 {
if logger.V(logLevel) {
logger.Infof("transport: closing server transport due to idleness.")
}
t.Close()
t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time))
return
}
if !outstandingPing {
@ -1199,20 +1222,23 @@ func (t *http2Server) keepalive() {
// Close starts shutting down the http2Server transport.
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
// could cause some resource issue. Revisit this later.
func (t *http2Server) Close() {
func (t *http2Server) Close(err error) {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
return
}
if t.logger.V(logLevel) {
t.logger.Infof("Closing: %v", err)
}
t.state = closing
streams := t.activeStreams
t.activeStreams = nil
t.mu.Unlock()
t.controlBuf.finish()
close(t.done)
if err := t.conn.Close(); err != nil && logger.V(logLevel) {
logger.Infof("transport: error closing conn during Close: %v", err)
if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
}
channelz.RemoveEntry(t.channelzID)
// Cancel all active streams.
@ -1292,14 +1318,14 @@ func (t *http2Server) RemoteAddr() net.Addr {
return t.remoteAddr
}
func (t *http2Server) Drain() {
func (t *http2Server) Drain(debugData string) {
t.mu.Lock()
defer t.mu.Unlock()
if t.drainChan != nil {
if t.drainEvent != nil {
return
}
t.drainChan = make(chan struct{})
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
t.drainEvent = grpcsync.NewEvent()
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true})
}
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
@ -1319,19 +1345,17 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
// Stop accepting more streams now.
t.state = draining
sid := t.maxStreamID
retErr := g.closeConn
if len(t.activeStreams) == 0 {
g.closeConn = true
retErr = errors.New("second GOAWAY written and no active streams left to process")
}
t.mu.Unlock()
t.maxStreamMu.Unlock()
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
return false, err
}
if g.closeConn {
// Abruptly close the connection following the GoAway (via
// loopywriter). But flush out what's inside the buffer first.
t.framer.writer.Flush()
return false, fmt.Errorf("transport: Connection closing")
if retErr != nil {
return false, retErr
}
return true, nil
}
@ -1343,7 +1367,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
// originated before the GoAway reaches the client.
// After getting the ack or timer expiration send out another GoAway this
// time with an ID of the max stream server intends to process.
if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil {
return false, err
}
if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
@ -1353,7 +1377,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
timer := time.NewTimer(time.Minute)
defer timer.Stop()
select {
case <-t.drainChan:
case <-t.drainEvent.Done():
case <-timer.C:
case <-t.done:
return

View File

@ -21,6 +21,7 @@ package transport
import (
"bufio"
"encoding/base64"
"errors"
"fmt"
"io"
"math"
@ -37,7 +38,6 @@ import (
"golang.org/x/net/http2/hpack"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
@ -85,7 +85,6 @@ var (
// 504 Gateway timeout - UNAVAILABLE.
http.StatusGatewayTimeout: codes.Unavailable,
}
logger = grpclog.Component("transport")
)
// isReservedHeader checks whether hdr belongs to HTTP2 headers
@ -330,7 +329,8 @@ func (w *bufWriter) Write(b []byte) (n int, err error) {
return 0, w.err
}
if w.batchSize == 0 { // Buffer has been disabled.
return w.conn.Write(b)
n, err = w.conn.Write(b)
return n, toIOError(err)
}
for len(b) > 0 {
nn := copy(w.buf[w.offset:], b)
@ -352,10 +352,30 @@ func (w *bufWriter) Flush() error {
return nil
}
_, w.err = w.conn.Write(w.buf[:w.offset])
w.err = toIOError(w.err)
w.offset = 0
return w.err
}
type ioError struct {
error
}
func (i ioError) Unwrap() error {
return i.error
}
func isIOError(err error) bool {
return errors.As(err, &ioError{})
}
func toIOError(err error) error {
if err == nil {
return nil
}
return ioError{error: err}
}
type framer struct {
writer *bufWriter
fr *http2.Framer

View File

@ -0,0 +1,40 @@
/*
*
* Copyright 2023 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package transport
import (
"fmt"
"google.golang.org/grpc/grpclog"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
)
var logger = grpclog.Component("transport")
func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger {
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p))
}
func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger {
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p))
}
func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger {
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p))
}

View File

@ -257,6 +257,9 @@ type Stream struct {
fc *inFlow
wq *writeQuota
// Holds compressor names passed in grpc-accept-encoding metadata from the
// client. This is empty for the client side stream.
clientAdvertisedCompressors string
// Callback to state application's intentions to read data. This
// is used to adjust flow control, if needed.
requestRead func(int)
@ -345,8 +348,24 @@ func (s *Stream) RecvCompress() string {
}
// SetSendCompress sets the compression algorithm to the stream.
func (s *Stream) SetSendCompress(str string) {
s.sendCompress = str
func (s *Stream) SetSendCompress(name string) error {
if s.isHeaderSent() || s.getState() == streamDone {
return errors.New("transport: set send compressor called after headers sent or stream done")
}
s.sendCompress = name
return nil
}
// SendCompress returns the send compressor name.
func (s *Stream) SendCompress() string {
return s.sendCompress
}
// ClientAdvertisedCompressors returns the compressor names advertised by the
// client via grpc-accept-encoding header.
func (s *Stream) ClientAdvertisedCompressors() string {
return s.clientAdvertisedCompressors
}
// Done returns a channel which is closed when it receives the final status
@ -583,8 +602,8 @@ type ConnectOptions struct {
// NewClientTransport establishes the transport with the required ConnectOptions
// and returns it to the caller.
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose)
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
}
// Options provides additional hints and information for message
@ -701,13 +720,13 @@ type ServerTransport interface {
// Close tears down the transport. Once it is called, the transport
// should not be accessed any more. All the pending streams and their
// handlers will be terminated asynchronously.
Close()
Close(err error)
// RemoteAddr returns the remote network address.
RemoteAddr() net.Addr
// Drain notifies the client this ServerTransport stops accepting new RPCs.
Drain()
Drain(debugData string)
// IncrMsgSent increments the number of message sent through this transport.
IncrMsgSent()