updated GHA
Update to v2 SDK updated dependencies
This commit is contained in:
384
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
Normal file
384
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
Normal file
@ -0,0 +1,384 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package gracefulswitch implements a graceful switch load balancer.
|
||||
package gracefulswitch
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
|
||||
var _ balancer.Balancer = (*Balancer)(nil)
|
||||
|
||||
// NewBalancer returns a graceful switch Balancer.
|
||||
func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer {
|
||||
return &Balancer{
|
||||
cc: cc,
|
||||
bOpts: opts,
|
||||
}
|
||||
}
|
||||
|
||||
// Balancer is a utility to gracefully switch from one balancer to
|
||||
// a new balancer. It implements the balancer.Balancer interface.
|
||||
type Balancer struct {
|
||||
bOpts balancer.BuildOptions
|
||||
cc balancer.ClientConn
|
||||
|
||||
// mu protects the following fields and all fields within balancerCurrent
|
||||
// and balancerPending. mu does not need to be held when calling into the
|
||||
// child balancers, as all calls into these children happen only as a direct
|
||||
// result of a call into the gracefulSwitchBalancer, which are also
|
||||
// guaranteed to be synchronous. There is one exception: an UpdateState call
|
||||
// from a child balancer when current and pending are populated can lead to
|
||||
// calling Close() on the current. To prevent that racing with an
|
||||
// UpdateSubConnState from the channel, we hold currentMu during Close and
|
||||
// UpdateSubConnState calls.
|
||||
mu sync.Mutex
|
||||
balancerCurrent *balancerWrapper
|
||||
balancerPending *balancerWrapper
|
||||
closed bool // set to true when this balancer is closed
|
||||
|
||||
// currentMu must be locked before mu. This mutex guards against this
|
||||
// sequence of events: UpdateSubConnState() called, finds the
|
||||
// balancerCurrent, gives up lock, updateState comes in, causes Close() on
|
||||
// balancerCurrent before the UpdateSubConnState is called on the
|
||||
// balancerCurrent.
|
||||
currentMu sync.Mutex
|
||||
}
|
||||
|
||||
// swap swaps out the current lb with the pending lb and updates the ClientConn.
|
||||
// The caller must hold gsb.mu.
|
||||
func (gsb *Balancer) swap() {
|
||||
gsb.cc.UpdateState(gsb.balancerPending.lastState)
|
||||
cur := gsb.balancerCurrent
|
||||
gsb.balancerCurrent = gsb.balancerPending
|
||||
gsb.balancerPending = nil
|
||||
go func() {
|
||||
gsb.currentMu.Lock()
|
||||
defer gsb.currentMu.Unlock()
|
||||
cur.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
// Helper function that checks if the balancer passed in is current or pending.
|
||||
// The caller must hold gsb.mu.
|
||||
func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
|
||||
return bw == gsb.balancerCurrent || bw == gsb.balancerPending
|
||||
}
|
||||
|
||||
// SwitchTo initializes the graceful switch process, which completes based on
|
||||
// connectivity state changes on the current/pending balancer. Thus, the switch
|
||||
// process is not complete when this method returns. This method must be called
|
||||
// synchronously alongside the rest of the balancer.Balancer methods this
|
||||
// Graceful Switch Balancer implements.
|
||||
func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
|
||||
gsb.mu.Lock()
|
||||
if gsb.closed {
|
||||
gsb.mu.Unlock()
|
||||
return errBalancerClosed
|
||||
}
|
||||
bw := &balancerWrapper{
|
||||
gsb: gsb,
|
||||
lastState: balancer.State{
|
||||
ConnectivityState: connectivity.Connecting,
|
||||
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||
},
|
||||
subconns: make(map[balancer.SubConn]bool),
|
||||
}
|
||||
balToClose := gsb.balancerPending // nil if there is no pending balancer
|
||||
if gsb.balancerCurrent == nil {
|
||||
gsb.balancerCurrent = bw
|
||||
} else {
|
||||
gsb.balancerPending = bw
|
||||
}
|
||||
gsb.mu.Unlock()
|
||||
balToClose.Close()
|
||||
// This function takes a builder instead of a balancer because builder.Build
|
||||
// can call back inline, and this utility needs to handle the callbacks.
|
||||
newBalancer := builder.Build(bw, gsb.bOpts)
|
||||
if newBalancer == nil {
|
||||
// This is illegal and should never happen; we clear the balancerWrapper
|
||||
// we were constructing if it happens to avoid a potential panic.
|
||||
gsb.mu.Lock()
|
||||
if gsb.balancerPending != nil {
|
||||
gsb.balancerPending = nil
|
||||
} else {
|
||||
gsb.balancerCurrent = nil
|
||||
}
|
||||
gsb.mu.Unlock()
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
// This write doesn't need to take gsb.mu because this field never gets read
|
||||
// or written to on any calls from the current or pending. Calls from grpc
|
||||
// to this balancer are guaranteed to be called synchronously, so this
|
||||
// bw.Balancer field will never be forwarded to until this SwitchTo()
|
||||
// function returns.
|
||||
bw.Balancer = newBalancer
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns nil if the graceful switch balancer is closed.
|
||||
func (gsb *Balancer) latestBalancer() *balancerWrapper {
|
||||
gsb.mu.Lock()
|
||||
defer gsb.mu.Unlock()
|
||||
if gsb.balancerPending != nil {
|
||||
return gsb.balancerPending
|
||||
}
|
||||
return gsb.balancerCurrent
|
||||
}
|
||||
|
||||
// UpdateClientConnState forwards the update to the latest balancer created.
|
||||
func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||
// The resolver data is only relevant to the most recent LB Policy.
|
||||
balToUpdate := gsb.latestBalancer()
|
||||
if balToUpdate == nil {
|
||||
return errBalancerClosed
|
||||
}
|
||||
// Perform this call without gsb.mu to prevent deadlocks if the child calls
|
||||
// back into the channel. The latest balancer can never be closed during a
|
||||
// call from the channel, even without gsb.mu held.
|
||||
return balToUpdate.UpdateClientConnState(state)
|
||||
}
|
||||
|
||||
// ResolverError forwards the error to the latest balancer created.
|
||||
func (gsb *Balancer) ResolverError(err error) {
|
||||
// The resolver data is only relevant to the most recent LB Policy.
|
||||
balToUpdate := gsb.latestBalancer()
|
||||
if balToUpdate == nil {
|
||||
return
|
||||
}
|
||||
// Perform this call without gsb.mu to prevent deadlocks if the child calls
|
||||
// back into the channel. The latest balancer can never be closed during a
|
||||
// call from the channel, even without gsb.mu held.
|
||||
balToUpdate.ResolverError(err)
|
||||
}
|
||||
|
||||
// ExitIdle forwards the call to the latest balancer created.
|
||||
//
|
||||
// If the latest balancer does not support ExitIdle, the subConns are
|
||||
// re-connected to manually.
|
||||
func (gsb *Balancer) ExitIdle() {
|
||||
balToUpdate := gsb.latestBalancer()
|
||||
if balToUpdate == nil {
|
||||
return
|
||||
}
|
||||
// There is no need to protect this read with a mutex, as the write to the
|
||||
// Balancer field happens in SwitchTo, which completes before this can be
|
||||
// called.
|
||||
if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
|
||||
ei.ExitIdle()
|
||||
return
|
||||
}
|
||||
gsb.mu.Lock()
|
||||
defer gsb.mu.Unlock()
|
||||
for sc := range balToUpdate.subconns {
|
||||
sc.Connect()
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateSubConnState forwards the update to the appropriate child.
|
||||
func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
gsb.currentMu.Lock()
|
||||
defer gsb.currentMu.Unlock()
|
||||
gsb.mu.Lock()
|
||||
// Forward update to the appropriate child. Even if there is a pending
|
||||
// balancer, the current balancer should continue to get SubConn updates to
|
||||
// maintain the proper state while the pending is still connecting.
|
||||
var balToUpdate *balancerWrapper
|
||||
if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] {
|
||||
balToUpdate = gsb.balancerCurrent
|
||||
} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
|
||||
balToUpdate = gsb.balancerPending
|
||||
}
|
||||
gsb.mu.Unlock()
|
||||
if balToUpdate == nil {
|
||||
// SubConn belonged to a stale lb policy that has not yet fully closed,
|
||||
// or the balancer was already closed.
|
||||
return
|
||||
}
|
||||
balToUpdate.UpdateSubConnState(sc, state)
|
||||
}
|
||||
|
||||
// Close closes any active child balancers.
|
||||
func (gsb *Balancer) Close() {
|
||||
gsb.mu.Lock()
|
||||
gsb.closed = true
|
||||
currentBalancerToClose := gsb.balancerCurrent
|
||||
gsb.balancerCurrent = nil
|
||||
pendingBalancerToClose := gsb.balancerPending
|
||||
gsb.balancerPending = nil
|
||||
gsb.mu.Unlock()
|
||||
|
||||
currentBalancerToClose.Close()
|
||||
pendingBalancerToClose.Close()
|
||||
}
|
||||
|
||||
// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
|
||||
// methods to help cleanup SubConns created by the wrapped balancer.
|
||||
//
|
||||
// It implements the balancer.ClientConn interface and is passed down in that
|
||||
// capacity to the wrapped balancer. It maintains a set of subConns created by
|
||||
// the wrapped balancer and calls from the latter to create/update/remove
|
||||
// SubConns update this set before being forwarded to the parent ClientConn.
|
||||
// State updates from the wrapped balancer can result in invocation of the
|
||||
// graceful switch logic.
|
||||
type balancerWrapper struct {
|
||||
balancer.Balancer
|
||||
gsb *Balancer
|
||||
|
||||
lastState balancer.State
|
||||
subconns map[balancer.SubConn]bool // subconns created by this balancer
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
if state.ConnectivityState == connectivity.Shutdown {
|
||||
bw.gsb.mu.Lock()
|
||||
delete(bw.subconns, sc)
|
||||
bw.gsb.mu.Unlock()
|
||||
}
|
||||
// There is no need to protect this read with a mutex, as the write to the
|
||||
// Balancer field happens in SwitchTo, which completes before this can be
|
||||
// called.
|
||||
bw.Balancer.UpdateSubConnState(sc, state)
|
||||
}
|
||||
|
||||
// Close closes the underlying LB policy and removes the subconns it created. bw
|
||||
// must not be referenced via balancerCurrent or balancerPending in gsb when
|
||||
// called. gsb.mu must not be held. Does not panic with a nil receiver.
|
||||
func (bw *balancerWrapper) Close() {
|
||||
// before Close is called.
|
||||
if bw == nil {
|
||||
return
|
||||
}
|
||||
// There is no need to protect this read with a mutex, as Close() is
|
||||
// impossible to be called concurrently with the write in SwitchTo(). The
|
||||
// callsites of Close() for this balancer in Graceful Switch Balancer will
|
||||
// never be called until SwitchTo() returns.
|
||||
bw.Balancer.Close()
|
||||
bw.gsb.mu.Lock()
|
||||
for sc := range bw.subconns {
|
||||
bw.gsb.cc.RemoveSubConn(sc)
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) UpdateState(state balancer.State) {
|
||||
// Hold the mutex for this entire call to ensure it cannot occur
|
||||
// concurrently with other updateState() calls. This causes updates to
|
||||
// lastState and calls to cc.UpdateState to happen atomically.
|
||||
bw.gsb.mu.Lock()
|
||||
defer bw.gsb.mu.Unlock()
|
||||
bw.lastState = state
|
||||
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
return
|
||||
}
|
||||
|
||||
if bw == bw.gsb.balancerCurrent {
|
||||
// In the case that the current balancer exits READY, and there is a pending
|
||||
// balancer, you can forward the pending balancer's cached State up to
|
||||
// ClientConn and swap the pending into the current. This is because there
|
||||
// is no reason to gracefully switch from and keep using the old policy as
|
||||
// the ClientConn is not connected to any backends.
|
||||
if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil {
|
||||
bw.gsb.swap()
|
||||
return
|
||||
}
|
||||
// Even if there is a pending balancer waiting to be gracefully switched to,
|
||||
// continue to forward current balancer updates to the Client Conn. Ignoring
|
||||
// state + picker from the current would cause undefined behavior/cause the
|
||||
// system to behave incorrectly from the current LB policies perspective.
|
||||
// Also, the current LB is still being used by grpc to choose SubConns per
|
||||
// RPC, and thus should use the most updated form of the current balancer.
|
||||
bw.gsb.cc.UpdateState(state)
|
||||
return
|
||||
}
|
||||
// This method is now dealing with a state update from the pending balancer.
|
||||
// If the current balancer is currently in a state other than READY, the new
|
||||
// policy can be swapped into place immediately. This is because there is no
|
||||
// reason to gracefully switch from and keep using the old policy as the
|
||||
// ClientConn is not connected to any backends.
|
||||
if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
|
||||
bw.gsb.swap()
|
||||
}
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
bw.gsb.mu.Unlock()
|
||||
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
|
||||
sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
|
||||
bw.gsb.cc.RemoveSubConn(sc)
|
||||
bw.gsb.mu.Unlock()
|
||||
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
||||
}
|
||||
bw.subconns[sc] = true
|
||||
bw.gsb.mu.Unlock()
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
|
||||
// Ignore ResolveNow requests from anything other than the most recent
|
||||
// balancer, because older balancers were already removed from the config.
|
||||
if bw != bw.gsb.latestBalancer() {
|
||||
return
|
||||
}
|
||||
bw.gsb.cc.ResolveNow(opts)
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
bw.gsb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
bw.gsb.cc.RemoveSubConn(sc)
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
bw.gsb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
bw.gsb.cc.UpdateAddresses(sc, addrs)
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) Target() string {
|
||||
return bw.gsb.cc.Target()
|
||||
}
|
100
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
100
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
@ -25,12 +25,13 @@ import (
|
||||
"os"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
)
|
||||
|
||||
// Logger is the global binary logger. It can be used to get binary logger for
|
||||
// each method.
|
||||
type Logger interface {
|
||||
getMethodLogger(methodName string) *MethodLogger
|
||||
GetMethodLogger(methodName string) MethodLogger
|
||||
}
|
||||
|
||||
// binLogger is the global binary logger for the binary. One of this should be
|
||||
@ -39,24 +40,33 @@ type Logger interface {
|
||||
// It is used to get a methodLogger for each individual method.
|
||||
var binLogger Logger
|
||||
|
||||
// SetLogger sets the binarg logger.
|
||||
var grpclogLogger = grpclog.Component("binarylog")
|
||||
|
||||
// SetLogger sets the binary logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
func SetLogger(l Logger) {
|
||||
binLogger = l
|
||||
}
|
||||
|
||||
// GetLogger gets the binary logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
func GetLogger() Logger {
|
||||
return binLogger
|
||||
}
|
||||
|
||||
// GetMethodLogger returns the methodLogger for the given methodName.
|
||||
//
|
||||
// methodName should be in the format of "/service/method".
|
||||
//
|
||||
// Each methodLogger returned by this method is a new instance. This is to
|
||||
// generate sequence id within the call.
|
||||
func GetMethodLogger(methodName string) *MethodLogger {
|
||||
func GetMethodLogger(methodName string) MethodLogger {
|
||||
if binLogger == nil {
|
||||
return nil
|
||||
}
|
||||
return binLogger.getMethodLogger(methodName)
|
||||
return binLogger.GetMethodLogger(methodName)
|
||||
}
|
||||
|
||||
func init() {
|
||||
@ -65,17 +75,29 @@ func init() {
|
||||
binLogger = NewLoggerFromConfigString(configStr)
|
||||
}
|
||||
|
||||
type methodLoggerConfig struct {
|
||||
// MethodLoggerConfig contains the setting for logging behavior of a method
|
||||
// logger. Currently, it contains the max length of header and message.
|
||||
type MethodLoggerConfig struct {
|
||||
// Max length of header and message.
|
||||
hdr, msg uint64
|
||||
Header, Message uint64
|
||||
}
|
||||
|
||||
// LoggerConfig contains the config for loggers to create method loggers.
|
||||
type LoggerConfig struct {
|
||||
All *MethodLoggerConfig
|
||||
Services map[string]*MethodLoggerConfig
|
||||
Methods map[string]*MethodLoggerConfig
|
||||
|
||||
Blacklist map[string]struct{}
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
all *methodLoggerConfig
|
||||
services map[string]*methodLoggerConfig
|
||||
methods map[string]*methodLoggerConfig
|
||||
config LoggerConfig
|
||||
}
|
||||
|
||||
blacklist map[string]struct{}
|
||||
// NewLoggerFromConfig builds a logger with the given LoggerConfig.
|
||||
func NewLoggerFromConfig(config LoggerConfig) Logger {
|
||||
return &logger{config: config}
|
||||
}
|
||||
|
||||
// newEmptyLogger creates an empty logger. The map fields need to be filled in
|
||||
@ -85,57 +107,57 @@ func newEmptyLogger() *logger {
|
||||
}
|
||||
|
||||
// Set method logger for "*".
|
||||
func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
|
||||
if l.all != nil {
|
||||
func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
|
||||
if l.config.All != nil {
|
||||
return fmt.Errorf("conflicting global rules found")
|
||||
}
|
||||
l.all = ml
|
||||
l.config.All = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set method logger for "service/*".
|
||||
//
|
||||
// New methodLogger with same service overrides the old one.
|
||||
func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
|
||||
if _, ok := l.services[service]; ok {
|
||||
func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
|
||||
if _, ok := l.config.Services[service]; ok {
|
||||
return fmt.Errorf("conflicting service rules for service %v found", service)
|
||||
}
|
||||
if l.services == nil {
|
||||
l.services = make(map[string]*methodLoggerConfig)
|
||||
if l.config.Services == nil {
|
||||
l.config.Services = make(map[string]*MethodLoggerConfig)
|
||||
}
|
||||
l.services[service] = ml
|
||||
l.config.Services[service] = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set method logger for "service/method".
|
||||
//
|
||||
// New methodLogger with same method overrides the old one.
|
||||
func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
|
||||
if _, ok := l.blacklist[method]; ok {
|
||||
func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
|
||||
if _, ok := l.config.Blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.methods[method]; ok {
|
||||
if _, ok := l.config.Methods[method]; ok {
|
||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||
}
|
||||
if l.methods == nil {
|
||||
l.methods = make(map[string]*methodLoggerConfig)
|
||||
if l.config.Methods == nil {
|
||||
l.config.Methods = make(map[string]*MethodLoggerConfig)
|
||||
}
|
||||
l.methods[method] = ml
|
||||
l.config.Methods[method] = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set blacklist method for "-service/method".
|
||||
func (l *logger) setBlacklist(method string) error {
|
||||
if _, ok := l.blacklist[method]; ok {
|
||||
if _, ok := l.config.Blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.methods[method]; ok {
|
||||
if _, ok := l.config.Methods[method]; ok {
|
||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||
}
|
||||
if l.blacklist == nil {
|
||||
l.blacklist = make(map[string]struct{})
|
||||
if l.config.Blacklist == nil {
|
||||
l.config.Blacklist = make(map[string]struct{})
|
||||
}
|
||||
l.blacklist[method] = struct{}{}
|
||||
l.config.Blacklist[method] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -145,23 +167,23 @@ func (l *logger) setBlacklist(method string) error {
|
||||
//
|
||||
// Each methodLogger returned by this method is a new instance. This is to
|
||||
// generate sequence id within the call.
|
||||
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
|
||||
s, m, err := parseMethodName(methodName)
|
||||
func (l *logger) GetMethodLogger(methodName string) MethodLogger {
|
||||
s, m, err := grpcutil.ParseMethod(methodName)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
||||
grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.methods[s+"/"+m]; ok {
|
||||
return newMethodLogger(ml.hdr, ml.msg)
|
||||
if ml, ok := l.config.Methods[s+"/"+m]; ok {
|
||||
return newMethodLogger(ml.Header, ml.Message)
|
||||
}
|
||||
if _, ok := l.blacklist[s+"/"+m]; ok {
|
||||
if _, ok := l.config.Blacklist[s+"/"+m]; ok {
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.services[s]; ok {
|
||||
return newMethodLogger(ml.hdr, ml.msg)
|
||||
if ml, ok := l.config.Services[s]; ok {
|
||||
return newMethodLogger(ml.Header, ml.Message)
|
||||
}
|
||||
if l.all == nil {
|
||||
if l.config.All == nil {
|
||||
return nil
|
||||
}
|
||||
return newMethodLogger(l.all.hdr, l.all.msg)
|
||||
return newMethodLogger(l.config.All.Header, l.config.All.Message)
|
||||
}
|
||||
|
10
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
10
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
@ -24,8 +24,6 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// NewLoggerFromConfigString reads the string and build a logger. It can be used
|
||||
@ -52,7 +50,7 @@ func NewLoggerFromConfigString(s string) Logger {
|
||||
methods := strings.Split(s, ",")
|
||||
for _, method := range methods {
|
||||
if err := l.fillMethodLoggerWithConfigString(method); err != nil {
|
||||
grpclog.Warningf("failed to parse binary log config: %v", err)
|
||||
grpclogLogger.Warningf("failed to parse binary log config: %v", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -91,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||
if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
return nil
|
||||
@ -106,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
|
||||
}
|
||||
if m == "*" {
|
||||
if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||
if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||
if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
}
|
||||
|
43
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
43
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
@ -27,7 +27,6 @@ import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@ -49,7 +48,11 @@ func (g *callIDGenerator) reset() {
|
||||
var idGen callIDGenerator
|
||||
|
||||
// MethodLogger is the sub-logger for each method.
|
||||
type MethodLogger struct {
|
||||
type MethodLogger interface {
|
||||
Log(LogEntryConfig)
|
||||
}
|
||||
|
||||
type methodLogger struct {
|
||||
headerMaxLen, messageMaxLen uint64
|
||||
|
||||
callID uint64
|
||||
@ -58,20 +61,22 @@ type MethodLogger struct {
|
||||
sink Sink // TODO(blog): make this plugable.
|
||||
}
|
||||
|
||||
func newMethodLogger(h, m uint64) *MethodLogger {
|
||||
return &MethodLogger{
|
||||
func newMethodLogger(h, m uint64) *methodLogger {
|
||||
return &methodLogger{
|
||||
headerMaxLen: h,
|
||||
messageMaxLen: m,
|
||||
|
||||
callID: idGen.next(),
|
||||
idWithinCallGen: &callIDGenerator{},
|
||||
|
||||
sink: defaultSink, // TODO(blog): make it plugable.
|
||||
sink: DefaultSink, // TODO(blog): make it plugable.
|
||||
}
|
||||
}
|
||||
|
||||
// Log creates a proto binary log entry, and logs it to the sink.
|
||||
func (ml *MethodLogger) Log(c LogEntryConfig) {
|
||||
// Build is an internal only method for building the proto message out of the
|
||||
// input event. It's made public to enable other library to reuse as much logic
|
||||
// in methodLogger as possible.
|
||||
func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
||||
m := c.toProto()
|
||||
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||||
m.Timestamp = timestamp
|
||||
@ -86,11 +91,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) {
|
||||
case *pb.GrpcLogEntry_Message:
|
||||
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
||||
}
|
||||
|
||||
ml.sink.Write(m)
|
||||
return m
|
||||
}
|
||||
|
||||
func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
// Log creates a proto binary log entry, and logs it to the sink.
|
||||
func (ml *methodLogger) Log(c LogEntryConfig) {
|
||||
ml.sink.Write(ml.Build(c))
|
||||
}
|
||||
|
||||
func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
if ml.headerMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
@ -120,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
return truncated
|
||||
}
|
||||
|
||||
func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||
func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||
if ml.messageMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
@ -219,12 +228,12 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||||
if m, ok := c.Message.(proto.Message); ok {
|
||||
data, err = proto.Marshal(m)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
} else if b, ok := c.Message.([]byte); ok {
|
||||
data = b
|
||||
} else {
|
||||
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
@ -259,12 +268,12 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||||
if m, ok := c.Message.(proto.Message); ok {
|
||||
data, err = proto.Marshal(m)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
} else if b, ok := c.Message.([]byte); ok {
|
||||
data = b
|
||||
} else {
|
||||
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
@ -315,7 +324,7 @@ type ServerTrailer struct {
|
||||
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
st, ok := status.FromError(c.Err)
|
||||
if !ok {
|
||||
grpclog.Info("binarylogging: error in trailer is not a status error")
|
||||
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
|
||||
}
|
||||
var (
|
||||
detailsBytes []byte
|
||||
@ -325,7 +334,7 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
if stProto != nil && len(stProto.Details) != 0 {
|
||||
detailsBytes, err = proto.Marshal(stProto)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
}
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
|
33
vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
generated
vendored
33
vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
generated
vendored
@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2018 gRPC authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -eux -o pipefail
|
||||
|
||||
TMP=$(mktemp -d)
|
||||
|
||||
function finish {
|
||||
rm -rf "$TMP"
|
||||
}
|
||||
trap finish EXIT
|
||||
|
||||
pushd "$TMP"
|
||||
mkdir -p grpc/binarylog/grpc_binarylog_v1
|
||||
curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
|
||||
|
||||
protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
|
||||
popd
|
||||
rm -f ./grpc_binarylog_v1/*.pb.go
|
||||
cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/
|
||||
|
106
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
106
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
@ -21,32 +21,23 @@ package binarylog
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
|
||||
// DefaultSink is the sink where the logs will be written to. It's exported
|
||||
// for the binarylog package to update.
|
||||
DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
|
||||
)
|
||||
|
||||
// SetDefaultSink sets the sink where binary logs will be written to.
|
||||
//
|
||||
// Not thread safe. Only set during initialization.
|
||||
func SetDefaultSink(s Sink) {
|
||||
if defaultSink != nil {
|
||||
defaultSink.Close()
|
||||
}
|
||||
defaultSink = s
|
||||
}
|
||||
|
||||
// Sink writes log entry into the binary log sink.
|
||||
//
|
||||
// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
|
||||
type Sink interface {
|
||||
// Write will be called to write the log entry into the sink.
|
||||
//
|
||||
@ -67,7 +58,7 @@ func (ns *noopSink) Close() error { return nil }
|
||||
// message is prefixed with a 4 byte big endian unsigned integer as the length.
|
||||
//
|
||||
// No buffer is done, Close() doesn't try to close the writer.
|
||||
func newWriterSink(w io.Writer) *writerSink {
|
||||
func newWriterSink(w io.Writer) Sink {
|
||||
return &writerSink{out: w}
|
||||
}
|
||||
|
||||
@ -78,7 +69,8 @@ type writerSink struct {
|
||||
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
|
||||
b, err := proto.Marshal(e)
|
||||
if err != nil {
|
||||
grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
|
||||
grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
|
||||
return err
|
||||
}
|
||||
hdr := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(hdr, uint32(len(b)))
|
||||
@ -93,25 +85,28 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
|
||||
|
||||
func (ws *writerSink) Close() error { return nil }
|
||||
|
||||
type bufWriteCloserSink struct {
|
||||
mu sync.Mutex
|
||||
closer io.Closer
|
||||
out *writerSink // out is built on buf.
|
||||
buf *bufio.Writer // buf is kept for flush.
|
||||
type bufferedSink struct {
|
||||
mu sync.Mutex
|
||||
closer io.Closer
|
||||
out Sink // out is built on buf.
|
||||
buf *bufio.Writer // buf is kept for flush.
|
||||
flusherStarted bool
|
||||
|
||||
writeStartOnce sync.Once
|
||||
writeTicker *time.Ticker
|
||||
writeTicker *time.Ticker
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
|
||||
// Start the write loop when Write is called.
|
||||
fs.writeStartOnce.Do(fs.startFlushGoroutine)
|
||||
func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if !fs.flusherStarted {
|
||||
// Start the write loop when Write is called.
|
||||
fs.startFlushGoroutine()
|
||||
fs.flusherStarted = true
|
||||
}
|
||||
if err := fs.out.Write(e); err != nil {
|
||||
fs.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
fs.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -119,44 +114,57 @@ const (
|
||||
bufFlushDuration = 60 * time.Second
|
||||
)
|
||||
|
||||
func (fs *bufWriteCloserSink) startFlushGoroutine() {
|
||||
func (fs *bufferedSink) startFlushGoroutine() {
|
||||
fs.writeTicker = time.NewTicker(bufFlushDuration)
|
||||
go func() {
|
||||
for range fs.writeTicker.C {
|
||||
for {
|
||||
select {
|
||||
case <-fs.done:
|
||||
return
|
||||
case <-fs.writeTicker.C:
|
||||
}
|
||||
fs.mu.Lock()
|
||||
fs.buf.Flush()
|
||||
if err := fs.buf.Flush(); err != nil {
|
||||
grpclogLogger.Warningf("failed to flush to Sink: %v", err)
|
||||
}
|
||||
fs.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (fs *bufWriteCloserSink) Close() error {
|
||||
func (fs *bufferedSink) Close() error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.writeTicker != nil {
|
||||
fs.writeTicker.Stop()
|
||||
}
|
||||
fs.mu.Lock()
|
||||
fs.buf.Flush()
|
||||
fs.closer.Close()
|
||||
fs.out.Close()
|
||||
fs.mu.Unlock()
|
||||
close(fs.done)
|
||||
if err := fs.buf.Flush(); err != nil {
|
||||
grpclogLogger.Warningf("failed to flush to Sink: %v", err)
|
||||
}
|
||||
if err := fs.closer.Close(); err != nil {
|
||||
grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
|
||||
}
|
||||
if err := fs.out.Close(); err != nil {
|
||||
grpclogLogger.Warningf("failed to close the Sink: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newBufWriteCloserSink(o io.WriteCloser) Sink {
|
||||
// NewBufferedSink creates a binary log sink with the given WriteCloser.
|
||||
//
|
||||
// Write() marshals the proto message and writes it to the given writer. Each
|
||||
// message is prefixed with a 4 byte big endian unsigned integer as the length.
|
||||
//
|
||||
// Content is kept in a buffer, and is flushed every 60 seconds.
|
||||
//
|
||||
// Close closes the WriteCloser.
|
||||
func NewBufferedSink(o io.WriteCloser) Sink {
|
||||
bufW := bufio.NewWriter(o)
|
||||
return &bufWriteCloserSink{
|
||||
return &bufferedSink{
|
||||
closer: o,
|
||||
out: newWriterSink(bufW),
|
||||
buf: bufW,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// NewTempFileSink creates a temp file and returns a Sink that writes to this
|
||||
// file.
|
||||
func NewTempFileSink() (Sink, error) {
|
||||
tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp file: %v", err)
|
||||
}
|
||||
return newBufWriteCloserSink(tempFile), nil
|
||||
}
|
||||
|
41
vendor/google.golang.org/grpc/internal/binarylog/util.go
generated
vendored
41
vendor/google.golang.org/grpc/internal/binarylog/util.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// parseMethodName splits service and method from the input. It expects format
|
||||
// "/service/method".
|
||||
//
|
||||
// TODO: move to internal/grpcutil.
|
||||
func parseMethodName(methodName string) (service, method string, _ error) {
|
||||
if !strings.HasPrefix(methodName, "/") {
|
||||
return "", "", errors.New("invalid method name: should start with /")
|
||||
}
|
||||
methodName = methodName[1:]
|
||||
|
||||
pos := strings.LastIndex(methodName, "/")
|
||||
if pos < 0 {
|
||||
return "", "", errors.New("invalid method name: suffix /method is missing")
|
||||
}
|
||||
return methodName[:pos], methodName[pos+1:], nil
|
||||
}
|
232
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
232
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
@ -24,6 +24,8 @@
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
@ -49,7 +51,8 @@ var (
|
||||
// TurnOn turns on channelz data collection.
|
||||
func TurnOn() {
|
||||
if !IsOn() {
|
||||
NewChannelzStorage()
|
||||
db.set(newChannelMap())
|
||||
idGen.reset()
|
||||
atomic.StoreInt32(&curState, 1)
|
||||
}
|
||||
}
|
||||
@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap {
|
||||
return d.DB
|
||||
}
|
||||
|
||||
// NewChannelzStorage initializes channelz data storage and id generator.
|
||||
// NewChannelzStorageForTesting initializes channelz data storage and id
|
||||
// generator for testing purposes.
|
||||
//
|
||||
// This function returns a cleanup function to wait for all channelz state to be reset by the
|
||||
// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests
|
||||
// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen
|
||||
// to remove some entity just register by the new test, since the id space is the same.
|
||||
//
|
||||
// Note: This function is exported for testing purpose only. User should not call
|
||||
// it in most cases.
|
||||
func NewChannelzStorage() (cleanup func() error) {
|
||||
db.set(&channelMap{
|
||||
topLevelChannels: make(map[int64]struct{}),
|
||||
channels: make(map[int64]*channel),
|
||||
listenSockets: make(map[int64]*listenSocket),
|
||||
normalSockets: make(map[int64]*normalSocket),
|
||||
servers: make(map[int64]*server),
|
||||
subChannels: make(map[int64]*subChannel),
|
||||
})
|
||||
// Returns a cleanup function to be invoked by the test, which waits for up to
|
||||
// 10s for all channelz state to be reset by the grpc goroutines when those
|
||||
// entities get closed. This cleanup function helps with ensuring that tests
|
||||
// don't mess up each other.
|
||||
func NewChannelzStorageForTesting() (cleanup func() error) {
|
||||
db.set(newChannelMap())
|
||||
idGen.reset()
|
||||
|
||||
return func() error {
|
||||
var err error
|
||||
cm := db.get()
|
||||
if cm == nil {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i < 1000; i++ {
|
||||
cm.mu.Lock()
|
||||
if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 {
|
||||
cm.mu.Unlock()
|
||||
// all things stored in the channelz map have been cleared.
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
cm.mu.RLock()
|
||||
topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)
|
||||
cm.mu.RUnlock()
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets)
|
||||
}
|
||||
if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 {
|
||||
return nil
|
||||
}
|
||||
cm.mu.Unlock()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
<-ticker.C
|
||||
}
|
||||
|
||||
cm.mu.Lock()
|
||||
err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets))
|
||||
cm.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric {
|
||||
return db.get().GetServer(id)
|
||||
}
|
||||
|
||||
// RegisterChannel registers the given channel c in channelz database with ref
|
||||
// as its reference name, and add it to the child list of its parent (identified
|
||||
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
|
||||
// assigned to this channel.
|
||||
func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
||||
// RegisterChannel registers the given channel c in the channelz database with
|
||||
// ref as its reference name, and adds it to the child list of its parent
|
||||
// (identified by pid). pid == nil means no parent.
|
||||
//
|
||||
// Returns a unique channelz identifier assigned to this channel.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
|
||||
id := idGen.genID()
|
||||
var parent int64
|
||||
isTopChannel := true
|
||||
if pid != nil {
|
||||
isTopChannel = false
|
||||
parent = pid.Int()
|
||||
}
|
||||
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefChannel, id, pid)
|
||||
}
|
||||
|
||||
cn := &channel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
subChans: make(map[int64]string),
|
||||
nestedChans: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid,
|
||||
pid: parent,
|
||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||
}
|
||||
if pid == 0 {
|
||||
db.get().addChannel(id, cn, true, pid, ref)
|
||||
} else {
|
||||
db.get().addChannel(id, cn, false, pid, ref)
|
||||
}
|
||||
return id
|
||||
db.get().addChannel(id, cn, isTopChannel, parent)
|
||||
return newIdentifer(RefChannel, id, pid)
|
||||
}
|
||||
|
||||
// RegisterSubChannel registers the given channel c in channelz database with ref
|
||||
// as its reference name, and add it to the child list of its parent (identified
|
||||
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
|
||||
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a SubChannel's parent id cannot be 0")
|
||||
return 0
|
||||
// RegisterSubChannel registers the given subChannel c in the channelz database
|
||||
// with ref as its reference name, and adds it to the child list of its parent
|
||||
// (identified by pid).
|
||||
//
|
||||
// Returns a unique channelz identifier assigned to this subChannel.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) {
|
||||
if pid == nil {
|
||||
return nil, errors.New("a SubChannel's parent id cannot be nil")
|
||||
}
|
||||
id := idGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefSubChannel, id, pid), nil
|
||||
}
|
||||
|
||||
sc := &subChannel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
sockets: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid,
|
||||
pid: pid.Int(),
|
||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||
}
|
||||
db.get().addSubChannel(id, sc, pid, ref)
|
||||
return id
|
||||
db.get().addSubChannel(id, sc, pid.Int())
|
||||
return newIdentifer(RefSubChannel, id, pid), nil
|
||||
}
|
||||
|
||||
// RegisterServer registers the given server s in channelz database. It returns
|
||||
// the unique channelz tracking id assigned to this server.
|
||||
func RegisterServer(s Server, ref string) int64 {
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterServer(s Server, ref string) *Identifier {
|
||||
id := idGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefServer, id, nil)
|
||||
}
|
||||
|
||||
svr := &server{
|
||||
refName: ref,
|
||||
s: s,
|
||||
@ -244,61 +264,92 @@ func RegisterServer(s Server, ref string) int64 {
|
||||
id: id,
|
||||
}
|
||||
db.get().addServer(id, svr)
|
||||
return id
|
||||
return newIdentifer(RefServer, id, nil)
|
||||
}
|
||||
|
||||
// RegisterListenSocket registers the given listen socket s in channelz database
|
||||
// with ref as its reference name, and add it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this listen socket.
|
||||
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a ListenSocket's parent id cannot be 0")
|
||||
return 0
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
|
||||
if pid == nil {
|
||||
return nil, errors.New("a ListenSocket's parent id cannot be 0")
|
||||
}
|
||||
id := idGen.genID()
|
||||
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
|
||||
db.get().addListenSocket(id, ls, pid, ref)
|
||||
return id
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefListenSocket, id, pid), nil
|
||||
}
|
||||
|
||||
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()}
|
||||
db.get().addListenSocket(id, ls, pid.Int())
|
||||
return newIdentifer(RefListenSocket, id, pid), nil
|
||||
}
|
||||
|
||||
// RegisterNormalSocket registers the given normal socket s in channelz database
|
||||
// with ref as its reference name, and add it to the child list of its parent
|
||||
// with ref as its reference name, and adds it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this normal socket.
|
||||
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a NormalSocket's parent id cannot be 0")
|
||||
return 0
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
|
||||
if pid == nil {
|
||||
return nil, errors.New("a NormalSocket's parent id cannot be 0")
|
||||
}
|
||||
id := idGen.genID()
|
||||
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
|
||||
db.get().addNormalSocket(id, ns, pid, ref)
|
||||
return id
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||
}
|
||||
|
||||
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()}
|
||||
db.get().addNormalSocket(id, ns, pid.Int())
|
||||
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||
}
|
||||
|
||||
// RemoveEntry removes an entry with unique channelz trakcing id to be id from
|
||||
// RemoveEntry removes an entry with unique channelz tracking id to be id from
|
||||
// channelz database.
|
||||
func RemoveEntry(id int64) {
|
||||
db.get().removeEntry(id)
|
||||
//
|
||||
// If channelz is not turned ON, this function is a no-op.
|
||||
func RemoveEntry(id *Identifier) {
|
||||
if !IsOn() {
|
||||
return
|
||||
}
|
||||
db.get().removeEntry(id.Int())
|
||||
}
|
||||
|
||||
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
|
||||
// to the channel trace.
|
||||
// The Parent field is optional. It is used for event that will be recorded in the entity's parent
|
||||
// trace also.
|
||||
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe
|
||||
// the event to be added to the channel trace.
|
||||
//
|
||||
// The Parent field is optional. It is used for an event that will be recorded
|
||||
// in the entity's parent trace.
|
||||
type TraceEventDesc struct {
|
||||
Desc string
|
||||
Severity Severity
|
||||
Parent *TraceEventDesc
|
||||
}
|
||||
|
||||
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
|
||||
func AddTraceEvent(id int64, desc *TraceEventDesc) {
|
||||
// AddTraceEvent adds trace related to the entity with specified id, using the
|
||||
// provided TraceEventDesc.
|
||||
//
|
||||
// If channelz is not turned ON, this will simply log the event descriptions.
|
||||
func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) {
|
||||
// Log only the trace description associated with the bottom most entity.
|
||||
switch desc.Severity {
|
||||
case CtUnknown, CtInfo:
|
||||
l.InfoDepth(depth+1, withParens(id)+desc.Desc)
|
||||
case CtWarning:
|
||||
l.WarningDepth(depth+1, withParens(id)+desc.Desc)
|
||||
case CtError:
|
||||
l.ErrorDepth(depth+1, withParens(id)+desc.Desc)
|
||||
}
|
||||
|
||||
if getMaxTraceEntry() == 0 {
|
||||
return
|
||||
}
|
||||
db.get().traceEvent(id, desc)
|
||||
if IsOn() {
|
||||
db.get().traceEvent(id.Int(), desc)
|
||||
}
|
||||
}
|
||||
|
||||
// channelMap is the storage data structure for channelz.
|
||||
@ -316,6 +367,17 @@ type channelMap struct {
|
||||
normalSockets map[int64]*normalSocket
|
||||
}
|
||||
|
||||
func newChannelMap() *channelMap {
|
||||
return &channelMap{
|
||||
topLevelChannels: make(map[int64]struct{}),
|
||||
channels: make(map[int64]*channel),
|
||||
listenSockets: make(map[int64]*listenSocket),
|
||||
normalSockets: make(map[int64]*normalSocket),
|
||||
servers: make(map[int64]*server),
|
||||
subChannels: make(map[int64]*subChannel),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelMap) addServer(id int64, s *server) {
|
||||
c.mu.Lock()
|
||||
s.cm = c
|
||||
@ -323,7 +385,7 @@ func (c *channelMap) addServer(id int64, s *server) {
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
|
||||
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) {
|
||||
c.mu.Lock()
|
||||
cn.cm = c
|
||||
cn.trace.cm = c
|
||||
@ -336,7 +398,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
|
||||
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) {
|
||||
c.mu.Lock()
|
||||
sc.cm = c
|
||||
sc.trace.cm = c
|
||||
@ -345,7 +407,7 @@ func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref stri
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
|
||||
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) {
|
||||
c.mu.Lock()
|
||||
ls.cm = c
|
||||
c.listenSockets[id] = ls
|
||||
@ -353,7 +415,7 @@ func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
|
||||
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) {
|
||||
c.mu.Lock()
|
||||
ns.cm = c
|
||||
c.normalSockets[id] = ns
|
||||
@ -620,7 +682,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64)
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
var s []*SocketMetric
|
||||
s := make([]*SocketMetric, 0, len(sks))
|
||||
for _, ns := range sks {
|
||||
sm := &SocketMetric{}
|
||||
sm.SocketData = ns.s.ChannelzMetric()
|
||||
|
75
vendor/google.golang.org/grpc/internal/channelz/id.go
generated
vendored
Normal file
75
vendor/google.golang.org/grpc/internal/channelz/id.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Identifier is an opaque identifier which uniquely identifies an entity in the
|
||||
// channelz database.
|
||||
type Identifier struct {
|
||||
typ RefChannelType
|
||||
id int64
|
||||
str string
|
||||
pid *Identifier
|
||||
}
|
||||
|
||||
// Type returns the entity type corresponding to id.
|
||||
func (id *Identifier) Type() RefChannelType {
|
||||
return id.typ
|
||||
}
|
||||
|
||||
// Int returns the integer identifier corresponding to id.
|
||||
func (id *Identifier) Int() int64 {
|
||||
return id.id
|
||||
}
|
||||
|
||||
// String returns a string representation of the entity corresponding to id.
|
||||
//
|
||||
// This includes some information about the parent as well. Examples:
|
||||
// Top-level channel: [Channel #channel-number]
|
||||
// Nested channel: [Channel #parent-channel-number Channel #channel-number]
|
||||
// Sub channel: [Channel #parent-channel SubChannel #subchannel-number]
|
||||
func (id *Identifier) String() string {
|
||||
return id.str
|
||||
}
|
||||
|
||||
// Equal returns true if other is the same as id.
|
||||
func (id *Identifier) Equal(other *Identifier) bool {
|
||||
if (id != nil) != (other != nil) {
|
||||
return false
|
||||
}
|
||||
if id == nil && other == nil {
|
||||
return true
|
||||
}
|
||||
return id.typ == other.typ && id.id == other.id && id.pid == other.pid
|
||||
}
|
||||
|
||||
// NewIdentifierForTesting returns a new opaque identifier to be used only for
|
||||
// testing purposes.
|
||||
func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier {
|
||||
return newIdentifer(typ, id, pid)
|
||||
}
|
||||
|
||||
func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier {
|
||||
str := fmt.Sprintf("%s #%d", typ, id)
|
||||
if pid != nil {
|
||||
str = fmt.Sprintf("%s %s", pid, str)
|
||||
}
|
||||
return &Identifier{typ: typ, id: id, str: str, pid: pid}
|
||||
}
|
79
vendor/google.golang.org/grpc/internal/channelz/logging.go
generated
vendored
Normal file
79
vendor/google.golang.org/grpc/internal/channelz/logging.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("channelz")
|
||||
|
||||
func withParens(id *Identifier) string {
|
||||
return "[" + id.String() + "] "
|
||||
}
|
||||
|
||||
// Info logs and adds a trace event if channelz is on.
|
||||
func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// Infof logs and adds a trace event if channelz is on.
|
||||
func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// Warning logs and adds a trace event if channelz is on.
|
||||
func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtWarning,
|
||||
})
|
||||
}
|
||||
|
||||
// Warningf logs and adds a trace event if channelz is on.
|
||||
func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtWarning,
|
||||
})
|
||||
}
|
||||
|
||||
// Error logs and adds a trace event if channelz is on.
|
||||
func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtError,
|
||||
})
|
||||
}
|
||||
|
||||
// Errorf logs and adds a trace event if channelz is on.
|
||||
func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtError,
|
||||
})
|
||||
}
|
56
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
56
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
@ -26,7 +26,6 @@ import (
|
||||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// entry represents a node in the channelz database.
|
||||
@ -60,17 +59,17 @@ func (d *dummyEntry) addChild(id int64, e entry) {
|
||||
// the addrConn will create a new transport. And when registering the new transport in
|
||||
// channelz, its parent addrConn could have already been torn down and deleted
|
||||
// from channelz tracking, and thus reach the code here.
|
||||
grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
|
||||
logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) deleteChild(id int64) {
|
||||
// It is possible for a normal program to reach here under race condition.
|
||||
// Refer to the example described in addChild().
|
||||
grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
|
||||
logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) triggerDelete() {
|
||||
grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
|
||||
logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
|
||||
}
|
||||
|
||||
func (*dummyEntry) deleteSelfIfReady() {
|
||||
@ -215,7 +214,7 @@ func (c *channel) addChild(id int64, e entry) {
|
||||
case *channel:
|
||||
c.nestedChans[id] = v.refName
|
||||
default:
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
@ -326,7 +325,7 @@ func (sc *subChannel) addChild(id int64, e entry) {
|
||||
if v, ok := e.(*normalSocket); ok {
|
||||
sc.sockets[id] = v.refName
|
||||
} else {
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
@ -493,11 +492,11 @@ type listenSocket struct {
|
||||
}
|
||||
|
||||
func (ls *listenSocket) addChild(id int64, e entry) {
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteChild(id int64) {
|
||||
grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
|
||||
logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) triggerDelete() {
|
||||
@ -506,7 +505,7 @@ func (ls *listenSocket) triggerDelete() {
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteSelfIfReady() {
|
||||
grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||
logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||
}
|
||||
|
||||
func (ls *listenSocket) getParentID() int64 {
|
||||
@ -522,11 +521,11 @@ type normalSocket struct {
|
||||
}
|
||||
|
||||
func (ns *normalSocket) addChild(id int64, e entry) {
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteChild(id int64) {
|
||||
grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
|
||||
logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) triggerDelete() {
|
||||
@ -535,7 +534,7 @@ func (ns *normalSocket) triggerDelete() {
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteSelfIfReady() {
|
||||
grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
||||
logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
||||
}
|
||||
|
||||
func (ns *normalSocket) getParentID() int64 {
|
||||
@ -594,7 +593,7 @@ func (s *server) addChild(id int64, e entry) {
|
||||
case *listenSocket:
|
||||
s.listenSockets[id] = v.refName
|
||||
default:
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
@ -673,10 +672,10 @@ func (c *channelTrace) clear() {
|
||||
type Severity int
|
||||
|
||||
const (
|
||||
// CtUNKNOWN indicates unknown severity of a trace event.
|
||||
CtUNKNOWN Severity = iota
|
||||
// CtINFO indicates info level severity of a trace event.
|
||||
CtINFO
|
||||
// CtUnknown indicates unknown severity of a trace event.
|
||||
CtUnknown Severity = iota
|
||||
// CtInfo indicates info level severity of a trace event.
|
||||
CtInfo
|
||||
// CtWarning indicates warning level severity of a trace event.
|
||||
CtWarning
|
||||
// CtError indicates error level severity of a trace event.
|
||||
@ -687,12 +686,33 @@ const (
|
||||
type RefChannelType int
|
||||
|
||||
const (
|
||||
// RefUnknown indicates an unknown entity type, the zero value for this type.
|
||||
RefUnknown RefChannelType = iota
|
||||
// RefChannel indicates the referenced entity is a Channel.
|
||||
RefChannel RefChannelType = iota
|
||||
RefChannel
|
||||
// RefSubChannel indicates the referenced entity is a SubChannel.
|
||||
RefSubChannel
|
||||
// RefServer indicates the referenced entity is a Server.
|
||||
RefServer
|
||||
// RefListenSocket indicates the referenced entity is a ListenSocket.
|
||||
RefListenSocket
|
||||
// RefNormalSocket indicates the referenced entity is a NormalSocket.
|
||||
RefNormalSocket
|
||||
)
|
||||
|
||||
var refChannelTypeToString = map[RefChannelType]string{
|
||||
RefUnknown: "Unknown",
|
||||
RefChannel: "Channel",
|
||||
RefSubChannel: "SubChannel",
|
||||
RefServer: "Server",
|
||||
RefListenSocket: "ListenSocket",
|
||||
RefNormalSocket: "NormalSocket",
|
||||
}
|
||||
|
||||
func (r RefChannelType) String() string {
|
||||
return refChannelTypeToString[r]
|
||||
}
|
||||
|
||||
func (c *channelTrace) dumpData() *ChannelTrace {
|
||||
c.mu.Lock()
|
||||
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
|
||||
|
2
vendor/google.golang.org/grpc/internal/channelz/types_linux.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/channelz/types_linux.go
generated
vendored
@ -1,5 +1,3 @@
|
||||
// +build !appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
|
7
vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
generated
vendored
7
vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
generated
vendored
@ -1,4 +1,5 @@
|
||||
// +build !linux appengine
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
*
|
||||
@ -22,8 +23,6 @@ package channelz
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
@ -39,6 +38,6 @@ type SocketOptionData struct {
|
||||
// Windows OS doesn't support Socket Option
|
||||
func (s *SocketOptionData) Getsockopt(fd uintptr) {
|
||||
once.Do(func() {
|
||||
grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
|
||||
logger.Warning("Channelz: socket options are not supported on non-linux environments")
|
||||
})
|
||||
}
|
||||
|
2
vendor/google.golang.org/grpc/internal/channelz/util_linux.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/channelz/util_linux.go
generated
vendored
@ -1,5 +1,3 @@
|
||||
// +build linux,!appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
|
3
vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
generated
vendored
3
vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
generated
vendored
@ -1,4 +1,5 @@
|
||||
// +build !linux appengine
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
*
|
||||
|
49
vendor/google.golang.org/grpc/internal/credentials/credentials.go
generated
vendored
Normal file
49
vendor/google.golang.org/grpc/internal/credentials/credentials.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// requestInfoKey is a struct to be used as the key to store RequestInfo in a
|
||||
// context.
|
||||
type requestInfoKey struct{}
|
||||
|
||||
// NewRequestInfoContext creates a context with ri.
|
||||
func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context {
|
||||
return context.WithValue(ctx, requestInfoKey{}, ri)
|
||||
}
|
||||
|
||||
// RequestInfoFromContext extracts the RequestInfo from ctx.
|
||||
func RequestInfoFromContext(ctx context.Context) interface{} {
|
||||
return ctx.Value(requestInfoKey{})
|
||||
}
|
||||
|
||||
// clientHandshakeInfoKey is a struct used as the key to store
|
||||
// ClientHandshakeInfo in a context.
|
||||
type clientHandshakeInfoKey struct{}
|
||||
|
||||
// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
|
||||
func ClientHandshakeInfoFromContext(ctx context.Context) interface{} {
|
||||
return ctx.Value(clientHandshakeInfoKey{})
|
||||
}
|
||||
|
||||
// NewClientHandshakeInfoContext creates a context with chi.
|
||||
func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context {
|
||||
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
|
||||
}
|
75
vendor/google.golang.org/grpc/internal/credentials/spiffe.go
generated
vendored
Normal file
75
vendor/google.golang.org/grpc/internal/credentials/spiffe.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package credentials defines APIs for parsing SPIFFE ID.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"net/url"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("credentials")
|
||||
|
||||
// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
|
||||
// is invalid, return nil with warning.
|
||||
func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
|
||||
if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return SPIFFEIDFromCert(state.PeerCertificates[0])
|
||||
}
|
||||
|
||||
// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
|
||||
// ID format is invalid, return nil with warning.
|
||||
func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
|
||||
if cert == nil || cert.URIs == nil {
|
||||
return nil
|
||||
}
|
||||
var spiffeID *url.URL
|
||||
for _, uri := range cert.URIs {
|
||||
if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
|
||||
continue
|
||||
}
|
||||
// From this point, we assume the uri is intended for a SPIFFE ID.
|
||||
if len(uri.String()) > 2048 {
|
||||
logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
|
||||
return nil
|
||||
}
|
||||
if len(uri.Host) == 0 || len(uri.Path) == 0 {
|
||||
logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
|
||||
return nil
|
||||
}
|
||||
if len(uri.Host) > 255 {
|
||||
logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
|
||||
return nil
|
||||
}
|
||||
// A valid SPIFFE certificate can only have exactly one URI SAN field.
|
||||
if len(cert.URIs) > 1 {
|
||||
logger.Warning("invalid SPIFFE ID: multiple URI SANs")
|
||||
return nil
|
||||
}
|
||||
spiffeID = uri
|
||||
}
|
||||
return spiffeID
|
||||
}
|
58
vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
generated
vendored
Normal file
58
vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"net"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type sysConn = syscall.Conn
|
||||
|
||||
// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
|
||||
// SyscallConn() (the method in interface syscall.Conn) is explicitly
|
||||
// implemented on this type,
|
||||
//
|
||||
// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
|
||||
// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
|
||||
// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
|
||||
// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
|
||||
// help here).
|
||||
type syscallConn struct {
|
||||
net.Conn
|
||||
// sysConn is a type alias of syscall.Conn. It's necessary because the name
|
||||
// `Conn` collides with `net.Conn`.
|
||||
sysConn
|
||||
}
|
||||
|
||||
// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
|
||||
// implements syscall.Conn. rawConn will be used to support syscall, and newConn
|
||||
// will be used for read/write.
|
||||
//
|
||||
// This function returns newConn if rawConn doesn't implement syscall.Conn.
|
||||
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
|
||||
sysConn, ok := rawConn.(syscall.Conn)
|
||||
if !ok {
|
||||
return newConn
|
||||
}
|
||||
return &syscallConn{
|
||||
Conn: newConn,
|
||||
sysConn: sysConn,
|
||||
}
|
||||
}
|
52
vendor/google.golang.org/grpc/internal/credentials/util.go
generated
vendored
Normal file
52
vendor/google.golang.org/grpc/internal/credentials/util.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
const alpnProtoStrH2 = "h2"
|
||||
|
||||
// AppendH2ToNextProtos appends h2 to next protos.
|
||||
func AppendH2ToNextProtos(ps []string) []string {
|
||||
for _, p := range ps {
|
||||
if p == alpnProtoStrH2 {
|
||||
return ps
|
||||
}
|
||||
}
|
||||
ret := make([]string, 0, len(ps)+1)
|
||||
ret = append(ret, ps...)
|
||||
return append(ret, alpnProtoStrH2)
|
||||
}
|
||||
|
||||
// CloneTLSConfig returns a shallow clone of the exported
|
||||
// fields of cfg, ignoring the unexported sync.Once, which
|
||||
// contains a mutex and must not be copied.
|
||||
//
|
||||
// If cfg is nil, a new zero tls.Config is returned.
|
||||
//
|
||||
// TODO: inline this function if possible.
|
||||
func CloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
|
||||
return cfg.Clone()
|
||||
}
|
5
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
5
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
@ -26,13 +26,10 @@ import (
|
||||
|
||||
const (
|
||||
prefix = "GRPC_GO_"
|
||||
retryStr = prefix + "RETRY"
|
||||
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
|
||||
)
|
||||
|
||||
var (
|
||||
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
|
||||
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
|
||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||
TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false")
|
||||
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
|
||||
)
|
||||
|
101
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
Normal file
101
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package envconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
|
||||
// Do not use this and read from env directly. Its value is read and kept in
|
||||
// variable XDSBootstrapFileName.
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
|
||||
// XDSBootstrapFileContentEnv is the env variable to set bootstrap file
|
||||
// content. Do not use this and read from env directly. Its value is read
|
||||
// and kept in variable XDSBootstrapFileContent.
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
|
||||
|
||||
ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
|
||||
clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
|
||||
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
|
||||
outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION"
|
||||
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
|
||||
rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
|
||||
|
||||
c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
|
||||
)
|
||||
|
||||
var (
|
||||
// XDSBootstrapFileName holds the name of the file which contains xDS
|
||||
// bootstrap configuration. Users can specify the location of the bootstrap
|
||||
// file by setting the environment variable "GRPC_XDS_BOOTSTRAP".
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv)
|
||||
// XDSBootstrapFileContent holds the content of the xDS bootstrap
|
||||
// configuration. Users can specify the bootstrap config by setting the
|
||||
// environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
|
||||
// XDSRingHash indicates whether ring hash support is enabled, which can be
|
||||
// disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
|
||||
XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
|
||||
// XDSClientSideSecurity is used to control processing of security
|
||||
// configuration on the client-side.
|
||||
//
|
||||
// Note that there is no env var protection for the server-side because we
|
||||
// have a brand new API on the server-side and users explicitly need to use
|
||||
// the new API to get security integration on the server.
|
||||
XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
|
||||
// XDSAggregateAndDNS indicates whether processing of aggregated cluster
|
||||
// and DNS cluster is enabled, which can be enabled by setting the
|
||||
// environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
|
||||
// "true".
|
||||
XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false")
|
||||
|
||||
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
||||
// which can be disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
||||
XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
|
||||
// XDSOutlierDetection indicates whether outlier detection support is
|
||||
// enabled, which can be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true".
|
||||
XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true")
|
||||
// XDSFederation indicates whether federation support is enabled.
|
||||
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
|
||||
|
||||
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
||||
// support for the RLS CLuster Specifier is enabled, which can be enabled by
|
||||
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
||||
// "true".
|
||||
XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
|
||||
|
||||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
|
||||
)
|
126
vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
generated
vendored
Normal file
126
vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
generated
vendored
Normal file
@ -0,0 +1,126 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpclog (internal) defines depth logging for grpc.
|
||||
package grpclog
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Logger is the logger used for the non-depth log functions.
|
||||
var Logger LoggerV2
|
||||
|
||||
// DepthLogger is the logger used for the depth log functions.
|
||||
var DepthLogger DepthLoggerV2
|
||||
|
||||
// InfoDepth logs to the INFO log at the specified depth.
|
||||
func InfoDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.InfoDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Infoln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// WarningDepth logs to the WARNING log at the specified depth.
|
||||
func WarningDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.WarningDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Warningln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorDepth logs to the ERROR log at the specified depth.
|
||||
func ErrorDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.ErrorDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Errorln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// FatalDepth logs to the FATAL log at the specified depth.
|
||||
func FatalDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.FatalDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Fatalln(args...)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// LoggerV2 does underlying logging work for grpclog.
|
||||
// This is a copy of the LoggerV2 defined in the external grpclog package. It
|
||||
// is defined here to avoid a circular dependency.
|
||||
type LoggerV2 interface {
|
||||
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
|
||||
Info(args ...interface{})
|
||||
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
|
||||
Infoln(args ...interface{})
|
||||
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||
Infof(format string, args ...interface{})
|
||||
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
|
||||
Warning(args ...interface{})
|
||||
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||
Warningln(args ...interface{})
|
||||
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||
Warningf(format string, args ...interface{})
|
||||
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
Error(args ...interface{})
|
||||
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
Errorln(args ...interface{})
|
||||
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
Errorf(format string, args ...interface{})
|
||||
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatal(args ...interface{})
|
||||
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalln(args ...interface{})
|
||||
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalf(format string, args ...interface{})
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
V(l int) bool
|
||||
}
|
||||
|
||||
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
|
||||
// DepthLoggerV2, the below functions will be called with the appropriate stack
|
||||
// depth set for trivial functions the logger may ignore.
|
||||
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
|
||||
// It is defined here to avoid a circular dependency.
|
||||
//
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type DepthLoggerV2 interface {
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
}
|
81
vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
generated
vendored
Normal file
81
vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpclog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// PrefixLogger does logging with a prefix.
|
||||
//
|
||||
// Logging method on a nil logs without any prefix.
|
||||
type PrefixLogger struct {
|
||||
logger DepthLoggerV2
|
||||
prefix string
|
||||
}
|
||||
|
||||
// Infof does info logging.
|
||||
func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
|
||||
if pl != nil {
|
||||
// Handle nil, so the tests can pass in a nil logger.
|
||||
format = pl.prefix + format
|
||||
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
return
|
||||
}
|
||||
InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Warningf does warning logging.
|
||||
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
|
||||
if pl != nil {
|
||||
format = pl.prefix + format
|
||||
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
|
||||
return
|
||||
}
|
||||
WarningDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Errorf does error logging.
|
||||
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
|
||||
if pl != nil {
|
||||
format = pl.prefix + format
|
||||
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
|
||||
return
|
||||
}
|
||||
ErrorDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Debugf does info logging at verbose level 2.
|
||||
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
||||
if !Logger.V(2) {
|
||||
return
|
||||
}
|
||||
if pl != nil {
|
||||
// Handle nil, so the tests can pass in a nil logger.
|
||||
format = pl.prefix + format
|
||||
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
return
|
||||
}
|
||||
InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// NewPrefixLogger creates a prefix logger with the given prefix.
|
||||
func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
|
||||
return &PrefixLogger{logger: logger, prefix: prefix}
|
||||
}
|
29
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
29
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
@ -31,26 +31,37 @@ var (
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
// Int implements rand.Int on the grpcrand global source.
|
||||
func Int() int {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Int()
|
||||
}
|
||||
|
||||
// Int63n implements rand.Int63n on the grpcrand global source.
|
||||
func Int63n(n int64) int64 {
|
||||
mu.Lock()
|
||||
res := r.Int63n(n)
|
||||
mu.Unlock()
|
||||
return res
|
||||
defer mu.Unlock()
|
||||
return r.Int63n(n)
|
||||
}
|
||||
|
||||
// Intn implements rand.Intn on the grpcrand global source.
|
||||
func Intn(n int) int {
|
||||
mu.Lock()
|
||||
res := r.Intn(n)
|
||||
mu.Unlock()
|
||||
return res
|
||||
defer mu.Unlock()
|
||||
return r.Intn(n)
|
||||
}
|
||||
|
||||
// Float64 implements rand.Float64 on the grpcrand global source.
|
||||
func Float64() float64 {
|
||||
mu.Lock()
|
||||
res := r.Float64()
|
||||
mu.Unlock()
|
||||
return res
|
||||
defer mu.Unlock()
|
||||
return r.Float64()
|
||||
}
|
||||
|
||||
// Uint64 implements rand.Uint64 on the grpcrand global source.
|
||||
func Uint64() uint64 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Uint64()
|
||||
}
|
||||
|
63
vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
generated
vendored
Normal file
63
vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const maxTimeoutValue int64 = 100000000 - 1
|
||||
|
||||
// div does integer division and round-up the result. Note that this is
|
||||
// equivalent to (d+r-1)/r but has less chance to overflow.
|
||||
func div(d, r time.Duration) int64 {
|
||||
if d%r > 0 {
|
||||
return int64(d/r + 1)
|
||||
}
|
||||
return int64(d / r)
|
||||
}
|
||||
|
||||
// EncodeDuration encodes the duration to the format grpc-timeout header
|
||||
// accepts.
|
||||
//
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
func EncodeDuration(t time.Duration) string {
|
||||
// TODO: This is simplistic and not bandwidth efficient. Improve it.
|
||||
if t <= 0 {
|
||||
return "0n"
|
||||
}
|
||||
if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "n"
|
||||
}
|
||||
if d := div(t, time.Microsecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "u"
|
||||
}
|
||||
if d := div(t, time.Millisecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "m"
|
||||
}
|
||||
if d := div(t, time.Second); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "S"
|
||||
}
|
||||
if d := div(t, time.Minute); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "M"
|
||||
}
|
||||
// Note that maxTimeoutValue * time.Hour > MaxInt64.
|
||||
return strconv.FormatInt(div(t, time.Hour), 10) + "H"
|
||||
}
|
@ -1,8 +1,6 @@
|
||||
// +build go1.13
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,16 +16,5 @@
|
||||
*
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
import "net"
|
||||
|
||||
func init() {
|
||||
filterError = func(err error) error {
|
||||
if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound {
|
||||
// The name does not exist; not an error.
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Package grpcutil provides utility functions used across the gRPC codebase.
|
||||
package grpcutil
|
40
vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
generated
vendored
Normal file
40
vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
type mdExtraKey struct{}
|
||||
|
||||
// WithExtraMetadata creates a new context with incoming md attached.
|
||||
func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
|
||||
return context.WithValue(ctx, mdExtraKey{}, md)
|
||||
}
|
||||
|
||||
// ExtraMetadata returns the incoming metadata in ctx if it exists. The
|
||||
// returned MD should not be modified. Writing to it may cause races.
|
||||
// Modification should be made to copies of the returned MD.
|
||||
func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
|
||||
md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
|
||||
return
|
||||
}
|
84
vendor/google.golang.org/grpc/internal/grpcutil/method.go
generated
vendored
Normal file
84
vendor/google.golang.org/grpc/internal/grpcutil/method.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseMethod splits service and method from the input. It expects format
|
||||
// "/service/method".
|
||||
//
|
||||
func ParseMethod(methodName string) (service, method string, _ error) {
|
||||
if !strings.HasPrefix(methodName, "/") {
|
||||
return "", "", errors.New("invalid method name: should start with /")
|
||||
}
|
||||
methodName = methodName[1:]
|
||||
|
||||
pos := strings.LastIndex(methodName, "/")
|
||||
if pos < 0 {
|
||||
return "", "", errors.New("invalid method name: suffix /method is missing")
|
||||
}
|
||||
return methodName[:pos], methodName[pos+1:], nil
|
||||
}
|
||||
|
||||
const baseContentType = "application/grpc"
|
||||
|
||||
// ContentSubtype returns the content-subtype for the given content-type. The
|
||||
// given content-type must be a valid content-type that starts with
|
||||
// "application/grpc". A content-subtype will follow "application/grpc" after a
|
||||
// "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// If contentType is not a valid content-type for gRPC, the boolean
|
||||
// will be false, otherwise true. If content-type == "application/grpc",
|
||||
// "application/grpc+", or "application/grpc;", the boolean will be true,
|
||||
// but no content-subtype will be returned.
|
||||
//
|
||||
// contentType is assumed to be lowercase already.
|
||||
func ContentSubtype(contentType string) (string, bool) {
|
||||
if contentType == baseContentType {
|
||||
return "", true
|
||||
}
|
||||
if !strings.HasPrefix(contentType, baseContentType) {
|
||||
return "", false
|
||||
}
|
||||
// guaranteed since != baseContentType and has baseContentType prefix
|
||||
switch contentType[len(baseContentType)] {
|
||||
case '+', ';':
|
||||
// this will return true for "application/grpc+" or "application/grpc;"
|
||||
// which the previous validContentType function tested to be valid, so we
|
||||
// just say that no content-subtype is specified in this case
|
||||
return contentType[len(baseContentType)+1:], true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// ContentType builds full content type with the given sub-type.
|
||||
//
|
||||
// contentSubtype is assumed to be lowercase
|
||||
func ContentType(contentSubtype string) string {
|
||||
if contentSubtype == "" {
|
||||
return baseContentType
|
||||
}
|
||||
return baseContentType + "+" + contentSubtype
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,29 +16,16 @@
|
||||
*
|
||||
*/
|
||||
|
||||
// This file contains wrappers for grpclog functions.
|
||||
// The transport package only logs to verbose level 2 by default.
|
||||
package grpcutil
|
||||
|
||||
package transport
|
||||
import "regexp"
|
||||
|
||||
import "google.golang.org/grpc/grpclog"
|
||||
|
||||
const logLevel = 2
|
||||
|
||||
func infof(format string, args ...interface{}) {
|
||||
if grpclog.V(logLevel) {
|
||||
grpclog.Infof(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func warningf(format string, args ...interface{}) {
|
||||
if grpclog.V(logLevel) {
|
||||
grpclog.Warningf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func errorf(format string, args ...interface{}) {
|
||||
if grpclog.V(logLevel) {
|
||||
grpclog.Errorf(format, args...)
|
||||
// FullMatchWithRegex returns whether the full text matches the regex provided.
|
||||
func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
|
||||
if len(text) == 0 {
|
||||
return re.MatchString(text)
|
||||
}
|
||||
re.Longest()
|
||||
rem := re.FindString(text)
|
||||
return len(rem) == len(text)
|
||||
}
|
113
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
113
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -37,17 +38,101 @@ var (
|
||||
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
|
||||
// default, but tests may wish to set it lower for convenience.
|
||||
KeepaliveMinPingTime = 10 * time.Second
|
||||
// StatusRawProto is exported by status/status.go. This func returns a
|
||||
// pointer to the wrapped Status proto for a given status.Status without a
|
||||
// call to proto.Clone(). The returned Status proto should not be mutated by
|
||||
// the caller.
|
||||
StatusRawProto interface{} // func (*status.Status) *spb.Status
|
||||
// NewRequestInfoContext creates a new context based on the argument context attaching
|
||||
// the passed in RequestInfo to the new context.
|
||||
NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
|
||||
// ParseServiceConfigForTesting is for creating a fake
|
||||
// ClientConn for resolver testing only
|
||||
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
|
||||
// ParseServiceConfig parses a JSON representation of the service config.
|
||||
ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult
|
||||
// EqualServiceConfigForTesting is for testing service config generation and
|
||||
// parsing. Both a and b should be returned by ParseServiceConfig.
|
||||
// This function compares the config without rawJSON stripped, in case the
|
||||
// there's difference in white space.
|
||||
EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
|
||||
// GetCertificateProviderBuilder returns the registered builder for the
|
||||
// given name. This is set by package certprovider for use from xDS
|
||||
// bootstrap code while parsing certificate provider configs in the
|
||||
// bootstrap file.
|
||||
GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
|
||||
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
|
||||
// stored in the passed in attributes. This is set by
|
||||
// credentials/xds/xds.go.
|
||||
GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo
|
||||
// GetServerCredentials returns the transport credentials configured on a
|
||||
// gRPC server. An xDS-enabled server needs to know what type of credentials
|
||||
// is configured on the underlying gRPC server. This is set by server.go.
|
||||
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
|
||||
// DrainServerTransports initiates a graceful close of existing connections
|
||||
// on a gRPC server accepted on the provided listener address. An
|
||||
// xDS-enabled server invokes this method on a grpc.Server when a particular
|
||||
// listener moves to "not-serving" mode.
|
||||
DrainServerTransports interface{} // func(*grpc.Server, string)
|
||||
// AddExtraServerOptions adds an array of ServerOption that will be
|
||||
// effective globally for newly created servers. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
AddExtraServerOptions interface{} // func(opt ...ServerOption)
|
||||
// ClearExtraServerOptions clears the array of extra ServerOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
ClearExtraServerOptions func()
|
||||
// AddExtraDialOptions adds an array of DialOption that will be effective
|
||||
// globally for newly created client channels. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
AddExtraDialOptions interface{} // func(opt ...DialOption)
|
||||
// ClearExtraDialOptions clears the array of extra DialOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
ClearExtraDialOptions func()
|
||||
|
||||
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
|
||||
// the provided xds bootstrap config instead of the global configuration from
|
||||
// the supported environment variables. The resolver.Builder is meant to be
|
||||
// used in conjunction with the grpc.WithResolvers DialOption.
|
||||
//
|
||||
// Testing Only
|
||||
//
|
||||
// This function should ONLY be used for testing and may not work with some
|
||||
// other features, including the CSDS service.
|
||||
NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error)
|
||||
|
||||
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
|
||||
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
|
||||
// variable.
|
||||
//
|
||||
// TODO: Remove this function once the RLS env var is removed.
|
||||
RegisterRLSClusterSpecifierPluginForTesting func()
|
||||
|
||||
// UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
|
||||
// Specifier Plugin for testing purposes. This is needed because there is no way
|
||||
// to unregister the RLS Cluster Specifier Plugin after registering it solely
|
||||
// for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
|
||||
//
|
||||
// TODO: Remove this function once the RLS env var is removed.
|
||||
UnregisterRLSClusterSpecifierPluginForTesting func()
|
||||
|
||||
// RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
|
||||
// purposes, regardless of the RBAC environment variable.
|
||||
//
|
||||
// TODO: Remove this function once the RBAC env var is removed.
|
||||
RegisterRBACHTTPFilterForTesting func()
|
||||
|
||||
// UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
|
||||
// testing purposes. This is needed because there is no way to unregister the
|
||||
// HTTP Filter after registering it solely for testing purposes using
|
||||
// RegisterRBACHTTPFilterForTesting().
|
||||
//
|
||||
// TODO: Remove this function once the RBAC env var is removed.
|
||||
UnregisterRBACHTTPFilterForTesting func()
|
||||
|
||||
// RegisterOutlierDetectionBalancerForTesting registers the Outlier
|
||||
// Detection Balancer for testing purposes, regardless of the Outlier
|
||||
// Detection environment variable.
|
||||
//
|
||||
// TODO: Remove this function once the Outlier Detection env var is removed.
|
||||
RegisterOutlierDetectionBalancerForTesting func()
|
||||
|
||||
// UnregisterOutlierDetectionBalancerForTesting unregisters the Outlier
|
||||
// Detection Balancer for testing purposes. This is needed because there is
|
||||
// no way to unregister the Outlier Detection Balancer after registering it
|
||||
// solely for testing purposes using
|
||||
// RegisterOutlierDetectionBalancerForTesting().
|
||||
//
|
||||
// TODO: Remove this function once the Outlier Detection env var is removed.
|
||||
UnregisterOutlierDetectionBalancerForTesting func()
|
||||
)
|
||||
|
||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||
@ -70,3 +155,9 @@ const (
|
||||
// that supports backend returned by grpclb balancer.
|
||||
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
|
||||
)
|
||||
|
||||
// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
|
||||
//
|
||||
// It currently has an experimental suffix which would be removed once
|
||||
// end-to-end testing of the policy is completed.
|
||||
const RLSLoadBalancingPolicyName = "rls_experimental"
|
||||
|
120
vendor/google.golang.org/grpc/internal/metadata/metadata.go
generated
vendored
Normal file
120
vendor/google.golang.org/grpc/internal/metadata/metadata.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package metadata contains functions to set and get metadata from addresses.
|
||||
//
|
||||
// This package is experimental.
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
type mdKeyType string
|
||||
|
||||
const mdKey = mdKeyType("grpc.internal.address.metadata")
|
||||
|
||||
type mdValue metadata.MD
|
||||
|
||||
func (m mdValue) Equal(o interface{}) bool {
|
||||
om, ok := o.(mdValue)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if len(m) != len(om) {
|
||||
return false
|
||||
}
|
||||
for k, v := range m {
|
||||
ov := om[k]
|
||||
if len(ov) != len(v) {
|
||||
return false
|
||||
}
|
||||
for i, ve := range v {
|
||||
if ov[i] != ve {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Get returns the metadata of addr.
|
||||
func Get(addr resolver.Address) metadata.MD {
|
||||
attrs := addr.Attributes
|
||||
if attrs == nil {
|
||||
return nil
|
||||
}
|
||||
md, _ := attrs.Value(mdKey).(mdValue)
|
||||
return metadata.MD(md)
|
||||
}
|
||||
|
||||
// Set sets (overrides) the metadata in addr.
|
||||
//
|
||||
// When a SubConn is created with this address, the RPCs sent on it will all
|
||||
// have this metadata.
|
||||
func Set(addr resolver.Address, md metadata.MD) resolver.Address {
|
||||
addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
|
||||
return addr
|
||||
}
|
||||
|
||||
// Validate returns an error if the input md contains invalid keys or values.
|
||||
//
|
||||
// If the header is not a pseudo-header, the following items are checked:
|
||||
// - header names must contain one or more characters from this set [0-9 a-z _ - .].
|
||||
// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed.
|
||||
// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E].
|
||||
func Validate(md metadata.MD) error {
|
||||
for k, vals := range md {
|
||||
// pseudo-header will be ignored
|
||||
if k[0] == ':' {
|
||||
continue
|
||||
}
|
||||
// check key, for i that saving a conversion if not using for range
|
||||
for i := 0; i < len(k); i++ {
|
||||
r := k[i]
|
||||
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
|
||||
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k)
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(k, "-bin") {
|
||||
continue
|
||||
}
|
||||
// check value
|
||||
for _, val := range vals {
|
||||
if hasNotPrintable(val) {
|
||||
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E
|
||||
func hasNotPrintable(msg string) bool {
|
||||
// for i that saving a conversion if not using for range
|
||||
for i := 0; i < len(msg); i++ {
|
||||
if msg[i] < 0x20 || msg[i] > 0x7E {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
82
vendor/google.golang.org/grpc/internal/pretty/pretty.go
generated
vendored
Normal file
82
vendor/google.golang.org/grpc/internal/pretty/pretty.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package pretty defines helper functions to pretty-print structs for logging.
|
||||
package pretty
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
protov1 "github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
protov2 "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const jsonIndent = " "
|
||||
|
||||
// ToJSON marshals the input into a json string.
|
||||
//
|
||||
// If marshal fails, it falls back to fmt.Sprintf("%+v").
|
||||
func ToJSON(e interface{}) string {
|
||||
switch ee := e.(type) {
|
||||
case protov1.Message:
|
||||
mm := jsonpb.Marshaler{Indent: jsonIndent}
|
||||
ret, err := mm.MarshalToString(ee)
|
||||
if err != nil {
|
||||
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
|
||||
// messages are not imported, and this will fail because the message
|
||||
// is not found.
|
||||
return fmt.Sprintf("%+v", ee)
|
||||
}
|
||||
return ret
|
||||
case protov2.Message:
|
||||
mm := protojson.MarshalOptions{
|
||||
Multiline: true,
|
||||
Indent: jsonIndent,
|
||||
}
|
||||
ret, err := mm.Marshal(ee)
|
||||
if err != nil {
|
||||
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
|
||||
// messages are not imported, and this will fail because the message
|
||||
// is not found.
|
||||
return fmt.Sprintf("%+v", ee)
|
||||
}
|
||||
return string(ret)
|
||||
default:
|
||||
ret, err := json.MarshalIndent(ee, "", jsonIndent)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("%+v", ee)
|
||||
}
|
||||
return string(ret)
|
||||
}
|
||||
}
|
||||
|
||||
// FormatJSON formats the input json bytes with indentation.
|
||||
//
|
||||
// If Indent fails, it returns the unchanged input as string.
|
||||
func FormatJSON(b []byte) string {
|
||||
var out bytes.Buffer
|
||||
err := json.Indent(&out, b, "", jsonIndent)
|
||||
if err != nil {
|
||||
return string(b)
|
||||
}
|
||||
return out.String()
|
||||
}
|
167
vendor/google.golang.org/grpc/internal/resolver/config_selector.go
generated
vendored
Normal file
167
vendor/google.golang.org/grpc/internal/resolver/config_selector.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package resolver provides internal resolver-related functionality.
|
||||
package resolver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/internal/serviceconfig"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// ConfigSelector controls what configuration to use for every RPC.
|
||||
type ConfigSelector interface {
|
||||
// Selects the configuration for the RPC, or terminates it using the error.
|
||||
// This error will be converted by the gRPC library to a status error with
|
||||
// code UNKNOWN if it is not returned as a status error.
|
||||
SelectConfig(RPCInfo) (*RPCConfig, error)
|
||||
}
|
||||
|
||||
// RPCInfo contains RPC information needed by a ConfigSelector.
|
||||
type RPCInfo struct {
|
||||
// Context is the user's context for the RPC and contains headers and
|
||||
// application timeout. It is passed for interception purposes and for
|
||||
// efficiency reasons. SelectConfig should not be blocking.
|
||||
Context context.Context
|
||||
Method string // i.e. "/Service/Method"
|
||||
}
|
||||
|
||||
// RPCConfig describes the configuration to use for each RPC.
|
||||
type RPCConfig struct {
|
||||
// The context to use for the remainder of the RPC; can pass info to LB
|
||||
// policy or affect timeout or metadata.
|
||||
Context context.Context
|
||||
MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC
|
||||
OnCommitted func() // Called when the RPC has been committed (retries no longer possible)
|
||||
Interceptor ClientInterceptor
|
||||
}
|
||||
|
||||
// ClientStream is the same as grpc.ClientStream, but defined here for circular
|
||||
// dependency reasons.
|
||||
type ClientStream interface {
|
||||
// Header returns the header metadata received from the server if there
|
||||
// is any. It blocks if the metadata is not ready to read.
|
||||
Header() (metadata.MD, error)
|
||||
// Trailer returns the trailer metadata from the server, if there is any.
|
||||
// It must only be called after stream.CloseAndRecv has returned, or
|
||||
// stream.Recv has returned a non-nil error (including io.EOF).
|
||||
Trailer() metadata.MD
|
||||
// CloseSend closes the send direction of the stream. It closes the stream
|
||||
// when non-nil error is met. It is also not safe to call CloseSend
|
||||
// concurrently with SendMsg.
|
||||
CloseSend() error
|
||||
// Context returns the context for this stream.
|
||||
//
|
||||
// It should not be called until after Header or RecvMsg has returned. Once
|
||||
// called, subsequent client-side retries are disabled.
|
||||
Context() context.Context
|
||||
// SendMsg is generally called by generated code. On error, SendMsg aborts
|
||||
// the stream. If the error was generated by the client, the status is
|
||||
// returned directly; otherwise, io.EOF is returned and the status of
|
||||
// the stream may be discovered using RecvMsg.
|
||||
//
|
||||
// SendMsg blocks until:
|
||||
// - There is sufficient flow control to schedule m with the transport, or
|
||||
// - The stream is done, or
|
||||
// - The stream breaks.
|
||||
//
|
||||
// SendMsg does not wait until the message is received by the server. An
|
||||
// untimely stream closure may result in lost messages. To ensure delivery,
|
||||
// users should ensure the RPC completed successfully using RecvMsg.
|
||||
//
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||||
// to call SendMsg on the same stream in different goroutines. It is also
|
||||
// not safe to call CloseSend concurrently with SendMsg.
|
||||
SendMsg(m interface{}) error
|
||||
// RecvMsg blocks until it receives a message into m or the stream is
|
||||
// done. It returns io.EOF when the stream completes successfully. On
|
||||
// any other error, the stream is aborted and the error contains the RPC
|
||||
// status.
|
||||
//
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not
|
||||
// safe to call RecvMsg on the same stream in different goroutines.
|
||||
RecvMsg(m interface{}) error
|
||||
}
|
||||
|
||||
// ClientInterceptor is an interceptor for gRPC client streams.
|
||||
type ClientInterceptor interface {
|
||||
// NewStream produces a ClientStream for an RPC which may optionally use
|
||||
// the provided function to produce a stream for delegation. Note:
|
||||
// RPCInfo.Context should not be used (will be nil).
|
||||
//
|
||||
// done is invoked when the RPC is finished using its connection, or could
|
||||
// not be assigned a connection. RPC operations may still occur on
|
||||
// ClientStream after done is called, since the interceptor is invoked by
|
||||
// application-layer operations. done must never be nil when called.
|
||||
NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error)
|
||||
}
|
||||
|
||||
// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side.
|
||||
type ServerInterceptor interface {
|
||||
// AllowRPC checks if an incoming RPC is allowed to proceed based on
|
||||
// information about connection RPC was received on, and HTTP Headers. This
|
||||
// information will be piped into context.
|
||||
AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting.
|
||||
}
|
||||
|
||||
type csKeyType string
|
||||
|
||||
const csKey = csKeyType("grpc.internal.resolver.configSelector")
|
||||
|
||||
// SetConfigSelector sets the config selector in state and returns the new
|
||||
// state.
|
||||
func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State {
|
||||
state.Attributes = state.Attributes.WithValue(csKey, cs)
|
||||
return state
|
||||
}
|
||||
|
||||
// GetConfigSelector retrieves the config selector from state, if present, and
|
||||
// returns it or nil if absent.
|
||||
func GetConfigSelector(state resolver.State) ConfigSelector {
|
||||
cs, _ := state.Attributes.Value(csKey).(ConfigSelector)
|
||||
return cs
|
||||
}
|
||||
|
||||
// SafeConfigSelector allows for safe switching of ConfigSelector
|
||||
// implementations such that previous values are guaranteed to not be in use
|
||||
// when UpdateConfigSelector returns.
|
||||
type SafeConfigSelector struct {
|
||||
mu sync.RWMutex
|
||||
cs ConfigSelector
|
||||
}
|
||||
|
||||
// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until
|
||||
// all uses of the previous ConfigSelector have completed.
|
||||
func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) {
|
||||
scs.mu.Lock()
|
||||
defer scs.mu.Unlock()
|
||||
scs.cs = cs
|
||||
}
|
||||
|
||||
// SelectConfig defers to the current ConfigSelector in scs.
|
||||
func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) {
|
||||
scs.mu.RLock()
|
||||
defer scs.mu.RUnlock()
|
||||
return scs.cs.SelectConfig(r)
|
||||
}
|
73
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
73
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
@ -32,7 +32,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/resolver"
|
||||
@ -43,6 +45,15 @@ import (
|
||||
// addresses from SRV records. Must not be changed after init time.
|
||||
var EnableSRVLookups = false
|
||||
|
||||
var logger = grpclog.Component("dns")
|
||||
|
||||
// Globals to stub out in tests. TODO: Perhaps these two can be combined into a
|
||||
// single variable for testing the resolver?
|
||||
var (
|
||||
newTimer = time.NewTimer
|
||||
newTimerDNSResRate = time.NewTimer
|
||||
)
|
||||
|
||||
func init() {
|
||||
resolver.Register(NewBuilder())
|
||||
}
|
||||
@ -140,7 +151,6 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
||||
|
||||
d.wg.Add(1)
|
||||
go d.watcher()
|
||||
d.ResolveNow(resolver.ResolveNowOptions{})
|
||||
return d, nil
|
||||
}
|
||||
|
||||
@ -198,28 +208,38 @@ func (d *dnsResolver) Close() {
|
||||
|
||||
func (d *dnsResolver) watcher() {
|
||||
defer d.wg.Done()
|
||||
backoffIndex := 1
|
||||
for {
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
return
|
||||
case <-d.rn:
|
||||
}
|
||||
|
||||
state, err := d.lookup()
|
||||
if err != nil {
|
||||
// Report error to the underlying grpc.ClientConn.
|
||||
d.cc.ReportError(err)
|
||||
} else {
|
||||
d.cc.UpdateState(*state)
|
||||
err = d.cc.UpdateState(*state)
|
||||
}
|
||||
|
||||
// Sleep to prevent excessive re-resolutions. Incoming resolution requests
|
||||
// will be queued in d.rn.
|
||||
t := time.NewTimer(minDNSResRate)
|
||||
var timer *time.Timer
|
||||
if err == nil {
|
||||
// Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
|
||||
// to prevent constantly re-resolving.
|
||||
backoffIndex = 1
|
||||
timer = newTimerDNSResRate(minDNSResRate)
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
case <-d.rn:
|
||||
}
|
||||
} else {
|
||||
// Poll on an error found in DNS Resolver or an error received from ClientConn.
|
||||
timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
|
||||
backoffIndex++
|
||||
}
|
||||
select {
|
||||
case <-t.C:
|
||||
case <-d.ctx.Done():
|
||||
t.Stop()
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -251,27 +271,22 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
|
||||
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
|
||||
}
|
||||
addr := ip + ":" + strconv.Itoa(int(s.Port))
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
|
||||
}
|
||||
}
|
||||
return newAddrs, nil
|
||||
}
|
||||
|
||||
var filterError = func(err error) error {
|
||||
func handleDNSError(err error, lookupType string) error {
|
||||
if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
||||
// Timeouts and temporary errors should be communicated to gRPC to
|
||||
// attempt another DNS query (with backoff). Other errors should be
|
||||
// suppressed (they may represent the absence of a TXT record).
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func handleDNSError(err error, lookupType string) error {
|
||||
err = filterError(err)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
|
||||
grpclog.Infoln(err)
|
||||
logger.Info(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -294,7 +309,7 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
||||
|
||||
// TXT record must have "grpc_config=" attribute in order to be used as service config.
|
||||
if !strings.HasPrefix(res, txtAttribute) {
|
||||
grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
// This is not an error; it is the equivalent of not having a service config.
|
||||
return nil
|
||||
}
|
||||
@ -303,12 +318,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
|
||||
var newAddrs []resolver.Address
|
||||
addrs, err := d.resolver.LookupHost(d.ctx, d.host)
|
||||
if err != nil {
|
||||
err = handleDNSError(err, "A")
|
||||
return nil, err
|
||||
}
|
||||
newAddrs := make([]resolver.Address, 0, len(addrs))
|
||||
for _, a := range addrs {
|
||||
ip, ok := formatIP(a)
|
||||
if !ok {
|
||||
@ -326,13 +341,15 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
|
||||
if hostErr != nil && (srvErr != nil || len(srv) == 0) {
|
||||
return nil, hostErr
|
||||
}
|
||||
state := &resolver.State{
|
||||
Addresses: append(addrs, srv...),
|
||||
|
||||
state := resolver.State{Addresses: addrs}
|
||||
if len(srv) > 0 {
|
||||
state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
|
||||
}
|
||||
if !d.disableServiceConfig {
|
||||
state.ServiceConfig = d.lookupTXT()
|
||||
}
|
||||
return state, nil
|
||||
return &state, nil
|
||||
}
|
||||
|
||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
||||
@ -418,12 +435,12 @@ func canaryingSC(js string) string {
|
||||
var rcs []rawChoice
|
||||
err := json.Unmarshal([]byte(js), &rcs)
|
||||
if err != nil {
|
||||
grpclog.Warningf("dns: error parsing service config json: %v", err)
|
||||
logger.Warningf("dns: error parsing service config json: %v", err)
|
||||
return ""
|
||||
}
|
||||
cliHostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
grpclog.Warningf("dns: error getting client hostname: %v", err)
|
||||
logger.Warningf("dns: error getting client hostname: %v", err)
|
||||
return ""
|
||||
}
|
||||
var sc string
|
||||
|
73
vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
generated
vendored
Normal file
73
vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package unix implements a resolver for unix targets.
|
||||
package unix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/internal/transport/networktype"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
const unixScheme = "unix"
|
||||
const unixAbstractScheme = "unix-abstract"
|
||||
|
||||
type builder struct {
|
||||
scheme string
|
||||
}
|
||||
|
||||
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
if target.Authority != "" {
|
||||
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority)
|
||||
}
|
||||
|
||||
// gRPC was parsing the dial target manually before PR #4817, and we
|
||||
// switched to using url.Parse() in that PR. To avoid breaking existing
|
||||
// resolver implementations we ended up stripping the leading "/" from the
|
||||
// endpoint. This obviously does not work for the "unix" scheme. Hence we
|
||||
// end up using the parsed URL instead.
|
||||
endpoint := target.URL.Path
|
||||
if endpoint == "" {
|
||||
endpoint = target.URL.Opaque
|
||||
}
|
||||
addr := resolver.Address{Addr: endpoint}
|
||||
if b.scheme == unixAbstractScheme {
|
||||
// prepend "\x00" to address for unix-abstract
|
||||
addr.Addr = "\x00" + addr.Addr
|
||||
}
|
||||
cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
|
||||
return &nopResolver{}, nil
|
||||
}
|
||||
|
||||
func (b *builder) Scheme() string {
|
||||
return b.scheme
|
||||
}
|
||||
|
||||
type nopResolver struct {
|
||||
}
|
||||
|
||||
func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
|
||||
|
||||
func (*nopResolver) Close() {}
|
||||
|
||||
func init() {
|
||||
resolver.Register(&builder{scheme: unixScheme})
|
||||
resolver.Register(&builder{scheme: unixAbstractScheme})
|
||||
}
|
180
vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
generated
vendored
Normal file
180
vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package serviceconfig contains utility functions to parse service config.
|
||||
package serviceconfig
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
externalserviceconfig "google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
// BalancerConfig wraps the name and config associated with one load balancing
|
||||
// policy. It corresponds to a single entry of the loadBalancingConfig field
|
||||
// from ServiceConfig.
|
||||
//
|
||||
// It implements the json.Unmarshaler interface.
|
||||
//
|
||||
// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
|
||||
type BalancerConfig struct {
|
||||
Name string
|
||||
Config externalserviceconfig.LoadBalancingConfig
|
||||
}
|
||||
|
||||
type intermediateBalancerConfig []map[string]json.RawMessage
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
//
|
||||
// It marshals the balancer and config into a length-1 slice
|
||||
// ([]map[string]config).
|
||||
func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
|
||||
if bc.Config == nil {
|
||||
// If config is nil, return empty config `{}`.
|
||||
return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil
|
||||
}
|
||||
c, err := json.Marshal(bc.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
//
|
||||
// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
|
||||
// config. This method iterates through that list in order, and stops at the
|
||||
// first policy that is supported.
|
||||
// - If the config for the first supported policy is invalid, the whole service
|
||||
// config is invalid.
|
||||
// - If the list doesn't contain any supported policy, the whole service config
|
||||
// is invalid.
|
||||
func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
||||
var ir intermediateBalancerConfig
|
||||
err := json.Unmarshal(b, &ir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var names []string
|
||||
for i, lbcfg := range ir {
|
||||
if len(lbcfg) != 1 {
|
||||
return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
|
||||
}
|
||||
|
||||
var (
|
||||
name string
|
||||
jsonCfg json.RawMessage
|
||||
)
|
||||
// Get the key:value pair from the map. We have already made sure that
|
||||
// the map contains a single entry.
|
||||
for name, jsonCfg = range lbcfg {
|
||||
}
|
||||
|
||||
names = append(names, name)
|
||||
builder := balancer.Get(name)
|
||||
if builder == nil {
|
||||
// If the balancer is not registered, move on to the next config.
|
||||
// This is not an error.
|
||||
continue
|
||||
}
|
||||
bc.Name = name
|
||||
|
||||
parser, ok := builder.(balancer.ConfigParser)
|
||||
if !ok {
|
||||
if string(jsonCfg) != "{}" {
|
||||
logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
|
||||
}
|
||||
// Stop at this, though the builder doesn't support parsing config.
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg, err := parser.ParseConfig(jsonCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
|
||||
}
|
||||
bc.Config = cfg
|
||||
return nil
|
||||
}
|
||||
// This is reached when the for loop iterates over all entries, but didn't
|
||||
// return. This means we had a loadBalancingConfig slice but did not
|
||||
// encounter a registered policy. The config is considered invalid in this
|
||||
// case.
|
||||
return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names)
|
||||
}
|
||||
|
||||
// MethodConfig defines the configuration recommended by the service providers for a
|
||||
// particular method.
|
||||
type MethodConfig struct {
|
||||
// WaitForReady indicates whether RPCs sent to this method should wait until
|
||||
// the connection is ready by default (!failfast). The value specified via the
|
||||
// gRPC client API will override the value set here.
|
||||
WaitForReady *bool
|
||||
// Timeout is the default timeout for RPCs sent to this method. The actual
|
||||
// deadline used will be the minimum of the value specified here and the value
|
||||
// set by the application via the gRPC client API. If either one is not set,
|
||||
// then the other will be used. If neither is set, then the RPC has no deadline.
|
||||
Timeout *time.Duration
|
||||
// MaxReqSize is the maximum allowed payload size for an individual request in a
|
||||
// stream (client->server) in bytes. The size which is measured is the serialized
|
||||
// payload after per-message compression (but before stream compression) in bytes.
|
||||
// The actual value used is the minimum of the value specified here and the value set
|
||||
// by the application via the gRPC client API. If either one is not set, then the other
|
||||
// will be used. If neither is set, then the built-in default is used.
|
||||
MaxReqSize *int
|
||||
// MaxRespSize is the maximum allowed payload size for an individual response in a
|
||||
// stream (server->client) in bytes.
|
||||
MaxRespSize *int
|
||||
// RetryPolicy configures retry options for the method.
|
||||
RetryPolicy *RetryPolicy
|
||||
}
|
||||
|
||||
// RetryPolicy defines the go-native version of the retry policy defined by the
|
||||
// service config here:
|
||||
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
|
||||
type RetryPolicy struct {
|
||||
// MaxAttempts is the maximum number of attempts, including the original RPC.
|
||||
//
|
||||
// This field is required and must be two or greater.
|
||||
MaxAttempts int
|
||||
|
||||
// Exponential backoff parameters. The initial retry attempt will occur at
|
||||
// random(0, initialBackoff). In general, the nth attempt will occur at
|
||||
// random(0,
|
||||
// min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
|
||||
//
|
||||
// These fields are required and must be greater than zero.
|
||||
InitialBackoff time.Duration
|
||||
MaxBackoff time.Duration
|
||||
BackoffMultiplier float64
|
||||
|
||||
// The set of status codes which may be retried.
|
||||
//
|
||||
// Status codes are specified as strings, e.g., "UNAVAILABLE".
|
||||
//
|
||||
// This field is required and must be non-empty.
|
||||
// Note: a set is used to store this for easy lookup.
|
||||
RetryableStatusCodes map[codes.Code]bool
|
||||
}
|
166
vendor/google.golang.org/grpc/internal/status/status.go
generated
vendored
Normal file
166
vendor/google.golang.org/grpc/internal/status/status.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package status implements errors returned by gRPC. These errors are
|
||||
// serialized and transmitted on the wire between server and client, and allow
|
||||
// for additional data to be transmitted via the Details field in the status
|
||||
// proto. gRPC service handlers should return an error created by this
|
||||
// package, and gRPC clients should expect a corresponding error to be
|
||||
// returned from the RPC call.
|
||||
//
|
||||
// This package upholds the invariants that a non-nil error may not
|
||||
// contain an OK code, and an OK code must result in a nil error.
|
||||
package status
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// Status represents an RPC status code, message, and details. It is immutable
|
||||
// and should be created with New, Newf, or FromProto.
|
||||
type Status struct {
|
||||
s *spb.Status
|
||||
}
|
||||
|
||||
// New returns a Status representing c and msg.
|
||||
func New(c codes.Code, msg string) *Status {
|
||||
return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
|
||||
}
|
||||
|
||||
// Newf returns New(c, fmt.Sprintf(format, a...)).
|
||||
func Newf(c codes.Code, format string, a ...interface{}) *Status {
|
||||
return New(c, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// FromProto returns a Status representing s.
|
||||
func FromProto(s *spb.Status) *Status {
|
||||
return &Status{s: proto.Clone(s).(*spb.Status)}
|
||||
}
|
||||
|
||||
// Err returns an error representing c and msg. If c is OK, returns nil.
|
||||
func Err(c codes.Code, msg string) error {
|
||||
return New(c, msg).Err()
|
||||
}
|
||||
|
||||
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
|
||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
||||
return Err(c, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// Code returns the status code contained in s.
|
||||
func (s *Status) Code() codes.Code {
|
||||
if s == nil || s.s == nil {
|
||||
return codes.OK
|
||||
}
|
||||
return codes.Code(s.s.Code)
|
||||
}
|
||||
|
||||
// Message returns the message contained in s.
|
||||
func (s *Status) Message() string {
|
||||
if s == nil || s.s == nil {
|
||||
return ""
|
||||
}
|
||||
return s.s.Message
|
||||
}
|
||||
|
||||
// Proto returns s's status as an spb.Status proto message.
|
||||
func (s *Status) Proto() *spb.Status {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return proto.Clone(s.s).(*spb.Status)
|
||||
}
|
||||
|
||||
// Err returns an immutable error representing s; returns nil if s.Code() is OK.
|
||||
func (s *Status) Err() error {
|
||||
if s.Code() == codes.OK {
|
||||
return nil
|
||||
}
|
||||
return &Error{s: s}
|
||||
}
|
||||
|
||||
// WithDetails returns a new status with the provided details messages appended to the status.
|
||||
// If any errors are encountered, it returns nil and the first error encountered.
|
||||
func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
|
||||
if s.Code() == codes.OK {
|
||||
return nil, errors.New("no error details for status with code OK")
|
||||
}
|
||||
// s.Code() != OK implies that s.Proto() != nil.
|
||||
p := s.Proto()
|
||||
for _, detail := range details {
|
||||
any, err := ptypes.MarshalAny(detail)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Details = append(p.Details, any)
|
||||
}
|
||||
return &Status{s: p}, nil
|
||||
}
|
||||
|
||||
// Details returns a slice of details messages attached to the status.
|
||||
// If a detail cannot be decoded, the error is returned in place of the detail.
|
||||
func (s *Status) Details() []interface{} {
|
||||
if s == nil || s.s == nil {
|
||||
return nil
|
||||
}
|
||||
details := make([]interface{}, 0, len(s.s.Details))
|
||||
for _, any := range s.s.Details {
|
||||
detail := &ptypes.DynamicAny{}
|
||||
if err := ptypes.UnmarshalAny(any, detail); err != nil {
|
||||
details = append(details, err)
|
||||
continue
|
||||
}
|
||||
details = append(details, detail.Message)
|
||||
}
|
||||
return details
|
||||
}
|
||||
|
||||
func (s *Status) String() string {
|
||||
return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message())
|
||||
}
|
||||
|
||||
// Error wraps a pointer of a status proto. It implements error and Status,
|
||||
// and a nil *Error should never be returned by this package.
|
||||
type Error struct {
|
||||
s *Status
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return e.s.String()
|
||||
}
|
||||
|
||||
// GRPCStatus returns the Status represented by se.
|
||||
func (e *Error) GRPCStatus() *Status {
|
||||
return e.s
|
||||
}
|
||||
|
||||
// Is implements future error.Is functionality.
|
||||
// A Error is equivalent if the code and message are identical.
|
||||
func (e *Error) Is(target error) bool {
|
||||
tse, ok := target.(*Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return proto.Equal(e.s.s, tse.s.s)
|
||||
}
|
28
vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
generated
vendored
28
vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
generated
vendored
@ -1,5 +1,3 @@
|
||||
// +build !appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
@ -32,35 +30,35 @@ import (
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
// GetCPUTime returns the how much CPU time has passed since the start of this process.
|
||||
func GetCPUTime() int64 {
|
||||
var ts unix.Timespec
|
||||
if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
|
||||
grpclog.Fatal(err)
|
||||
logger.Fatal(err)
|
||||
}
|
||||
return ts.Nano()
|
||||
}
|
||||
|
||||
// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
|
||||
type Rusage syscall.Rusage
|
||||
// Rusage is an alias for syscall.Rusage under linux environment.
|
||||
type Rusage = syscall.Rusage
|
||||
|
||||
// GetRusage returns the resource usage of current process.
|
||||
func GetRusage() (rusage *Rusage) {
|
||||
rusage = new(Rusage)
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
|
||||
return
|
||||
func GetRusage() *Rusage {
|
||||
rusage := new(Rusage)
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
|
||||
return rusage
|
||||
}
|
||||
|
||||
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
|
||||
// between two Rusage structs.
|
||||
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
|
||||
f := (*syscall.Rusage)(first)
|
||||
l := (*syscall.Rusage)(latest)
|
||||
var (
|
||||
utimeDiffs = l.Utime.Sec - f.Utime.Sec
|
||||
utimeDiffus = l.Utime.Usec - f.Utime.Usec
|
||||
stimeDiffs = l.Stime.Sec - f.Stime.Sec
|
||||
stimeDiffus = l.Stime.Usec - f.Stime.Usec
|
||||
utimeDiffs = latest.Utime.Sec - first.Utime.Sec
|
||||
utimeDiffus = latest.Utime.Usec - first.Utime.Usec
|
||||
stimeDiffs = latest.Stime.Sec - first.Stime.Sec
|
||||
stimeDiffus = latest.Stime.Usec - first.Stime.Usec
|
||||
)
|
||||
|
||||
uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
|
||||
|
26
vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
generated
vendored
26
vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
generated
vendored
@ -1,4 +1,5 @@
|
||||
// +build !linux appengine
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
*
|
||||
@ -18,6 +19,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
// Package syscall provides functionalities that grpc uses to get low-level
|
||||
// operating system stats/info.
|
||||
package syscall
|
||||
|
||||
import (
|
||||
@ -29,44 +32,45 @@ import (
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
func log() {
|
||||
once.Do(func() {
|
||||
grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
|
||||
logger.Info("CPU time info is unavailable on non-linux environments.")
|
||||
})
|
||||
}
|
||||
|
||||
// GetCPUTime returns the how much CPU time has passed since the start of this process.
|
||||
// It always returns 0 under non-linux or appengine environment.
|
||||
// GetCPUTime returns the how much CPU time has passed since the start of this
|
||||
// process. It always returns 0 under non-linux environments.
|
||||
func GetCPUTime() int64 {
|
||||
log()
|
||||
return 0
|
||||
}
|
||||
|
||||
// Rusage is an empty struct under non-linux or appengine environment.
|
||||
// Rusage is an empty struct under non-linux environments.
|
||||
type Rusage struct{}
|
||||
|
||||
// GetRusage is a no-op function under non-linux or appengine environment.
|
||||
func GetRusage() (rusage *Rusage) {
|
||||
// GetRusage is a no-op function under non-linux environments.
|
||||
func GetRusage() *Rusage {
|
||||
log()
|
||||
return nil
|
||||
}
|
||||
|
||||
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
|
||||
// between two Rusage structs. It a no-op function for non-linux or appengine environment.
|
||||
// between two Rusage structs. It a no-op function for non-linux environments.
|
||||
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
|
||||
log()
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
|
||||
// SetTCPUserTimeout is a no-op function under non-linux environments.
|
||||
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
|
||||
log()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
|
||||
// a negative return value indicates the operation is not supported
|
||||
// GetTCPUserTimeout is a no-op function under non-linux environments.
|
||||
// A negative return value indicates the operation is not supported
|
||||
func GetTCPUserTimeout(conn net.Conn) (int, error) {
|
||||
log()
|
||||
return -1, nil
|
||||
|
142
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
142
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
@ -20,13 +20,17 @@ package transport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
|
||||
@ -128,6 +132,16 @@ type cleanupStream struct {
|
||||
|
||||
func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
|
||||
|
||||
type earlyAbortStream struct {
|
||||
httpStatus uint32
|
||||
streamID uint32
|
||||
contentSubtype string
|
||||
status *status.Status
|
||||
rst bool
|
||||
}
|
||||
|
||||
func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type dataFrame struct {
|
||||
streamID uint32
|
||||
endStream bool
|
||||
@ -284,7 +298,7 @@ type controlBuffer struct {
|
||||
// closed and nilled when transportResponseFrames drops below the
|
||||
// threshold. Both fields are protected by mu.
|
||||
transportResponseFrames int
|
||||
trfChan atomic.Value // *chan struct{}
|
||||
trfChan atomic.Value // chan struct{}
|
||||
}
|
||||
|
||||
func newControlBuffer(done <-chan struct{}) *controlBuffer {
|
||||
@ -298,10 +312,10 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer {
|
||||
// throttle blocks if there are too many incomingSettings/cleanupStreams in the
|
||||
// controlbuf.
|
||||
func (c *controlBuffer) throttle() {
|
||||
ch, _ := c.trfChan.Load().(*chan struct{})
|
||||
ch, _ := c.trfChan.Load().(chan struct{})
|
||||
if ch != nil {
|
||||
select {
|
||||
case <-*ch:
|
||||
case <-ch:
|
||||
case <-c.done:
|
||||
}
|
||||
}
|
||||
@ -335,8 +349,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b
|
||||
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
|
||||
// We are adding the frame that puts us over the threshold; create
|
||||
// a throttling channel.
|
||||
ch := make(chan struct{})
|
||||
c.trfChan.Store(&ch)
|
||||
c.trfChan.Store(make(chan struct{}))
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
@ -377,9 +390,9 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
|
||||
// We are removing the frame that put us over the
|
||||
// threshold; close and clear the throttling channel.
|
||||
ch := c.trfChan.Load().(*chan struct{})
|
||||
close(*ch)
|
||||
c.trfChan.Store((*chan struct{})(nil))
|
||||
ch := c.trfChan.Load().(chan struct{})
|
||||
close(ch)
|
||||
c.trfChan.Store((chan struct{})(nil))
|
||||
}
|
||||
c.transportResponseFrames--
|
||||
}
|
||||
@ -395,7 +408,6 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||
select {
|
||||
case <-c.ch:
|
||||
case <-c.done:
|
||||
c.finish()
|
||||
return nil, ErrConnClosing
|
||||
}
|
||||
}
|
||||
@ -420,6 +432,14 @@ func (c *controlBuffer) finish() {
|
||||
hdr.onOrphaned(ErrConnClosing)
|
||||
}
|
||||
}
|
||||
// In case throttle() is currently in flight, it needs to be unblocked.
|
||||
// Otherwise, the transport may not close, since the transport is closed by
|
||||
// the reader encountering the connection error.
|
||||
ch, _ := c.trfChan.Load().(chan struct{})
|
||||
if ch != nil {
|
||||
close(ch)
|
||||
}
|
||||
c.trfChan.Store((chan struct{})(nil))
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
@ -505,7 +525,9 @@ func (l *loopyWriter) run() (err error) {
|
||||
// 1. When the connection is closed by some other known issue.
|
||||
// 2. User closed the connection.
|
||||
// 3. A graceful close of connection.
|
||||
infof("transport: loopyWriter.run returning. %v", err)
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: loopyWriter.run returning. %v", err)
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
@ -605,7 +627,9 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
if l.side == serverSide {
|
||||
str, ok := l.estdStreams[h.streamID]
|
||||
if !ok {
|
||||
warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Case 1.A: Server is responding back with headers.
|
||||
@ -658,7 +682,9 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
|
||||
l.hBuf.Reset()
|
||||
for _, f := range hf {
|
||||
if err := l.hEnc.WriteField(f); err != nil {
|
||||
warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
var (
|
||||
@ -743,6 +769,32 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
|
||||
if l.side == clientSide {
|
||||
return errors.New("earlyAbortStream not handled on client")
|
||||
}
|
||||
// In case the caller forgets to set the http status, default to 200.
|
||||
if eas.httpStatus == 0 {
|
||||
eas.httpStatus = 200
|
||||
}
|
||||
headerFields := []hpack.HeaderField{
|
||||
{Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))},
|
||||
{Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)},
|
||||
{Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))},
|
||||
{Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())},
|
||||
}
|
||||
|
||||
if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if eas.rst {
|
||||
if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
|
||||
if l.side == clientSide {
|
||||
l.draining = true
|
||||
@ -781,6 +833,8 @@ func (l *loopyWriter) handle(i interface{}) error {
|
||||
return l.registerStreamHandler(i)
|
||||
case *cleanupStream:
|
||||
return l.cleanupStreamHandler(i)
|
||||
case *earlyAbortStream:
|
||||
return l.earlyAbortStreamHandler(i)
|
||||
case *incomingGoAway:
|
||||
return l.incomingGoAwayHandler(i)
|
||||
case *dataFrame:
|
||||
@ -857,38 +911,45 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
var (
|
||||
idx int
|
||||
buf []byte
|
||||
)
|
||||
if len(dataItem.h) != 0 { // data header has not been written out yet.
|
||||
buf = dataItem.h
|
||||
} else {
|
||||
idx = 1
|
||||
buf = dataItem.d
|
||||
}
|
||||
size := http2MaxFrameLen
|
||||
if len(buf) < size {
|
||||
size = len(buf)
|
||||
}
|
||||
// Figure out the maximum size we can send
|
||||
maxSize := http2MaxFrameLen
|
||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
|
||||
str.state = waitingOnStreamQuota
|
||||
return false, nil
|
||||
} else if strQuota < size {
|
||||
size = strQuota
|
||||
} else if maxSize > strQuota {
|
||||
maxSize = strQuota
|
||||
}
|
||||
if maxSize > int(l.sendQuota) { // connection-level flow control.
|
||||
maxSize = int(l.sendQuota)
|
||||
}
|
||||
// Compute how much of the header and data we can send within quota and max frame length
|
||||
hSize := min(maxSize, len(dataItem.h))
|
||||
dSize := min(maxSize-hSize, len(dataItem.d))
|
||||
if hSize != 0 {
|
||||
if dSize == 0 {
|
||||
buf = dataItem.h
|
||||
} else {
|
||||
// We can add some data to grpc message header to distribute bytes more equally across frames.
|
||||
// Copy on the stack to avoid generating garbage
|
||||
var localBuf [http2MaxFrameLen]byte
|
||||
copy(localBuf[:hSize], dataItem.h)
|
||||
copy(localBuf[hSize:], dataItem.d[:dSize])
|
||||
buf = localBuf[:hSize+dSize]
|
||||
}
|
||||
} else {
|
||||
buf = dataItem.d
|
||||
}
|
||||
|
||||
if l.sendQuota < uint32(size) { // connection-level flow control.
|
||||
size = int(l.sendQuota)
|
||||
}
|
||||
size := hSize + dSize
|
||||
|
||||
// Now that outgoing flow controls are checked we can replenish str's write quota
|
||||
str.wq.replenish(size)
|
||||
var endStream bool
|
||||
// If this is the last data message on this stream and all of it can be written in this iteration.
|
||||
if dataItem.endStream && size == len(buf) {
|
||||
// buf contains either data or it contains header but data is empty.
|
||||
if idx == 1 || len(dataItem.d) == 0 {
|
||||
endStream = true
|
||||
}
|
||||
if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
|
||||
endStream = true
|
||||
}
|
||||
if dataItem.onEachWrite != nil {
|
||||
dataItem.onEachWrite()
|
||||
@ -896,14 +957,10 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
|
||||
return false, err
|
||||
}
|
||||
buf = buf[size:]
|
||||
str.bytesOutStanding += size
|
||||
l.sendQuota -= uint32(size)
|
||||
if idx == 0 {
|
||||
dataItem.h = buf
|
||||
} else {
|
||||
dataItem.d = buf
|
||||
}
|
||||
dataItem.h = dataItem.h[hSize:]
|
||||
dataItem.d = dataItem.d[dSize:]
|
||||
|
||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
|
||||
str.itl.dequeue()
|
||||
@ -924,3 +981,10 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
4
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
@ -136,12 +136,10 @@ type inFlow struct {
|
||||
|
||||
// newLimit updates the inflow window to a new value n.
|
||||
// It assumes that n is always greater than the old limit.
|
||||
func (f *inFlow) newLimit(n uint32) uint32 {
|
||||
func (f *inFlow) newLimit(n uint32) {
|
||||
f.mu.Lock()
|
||||
d := n - f.limit
|
||||
f.limit = n
|
||||
f.mu.Unlock()
|
||||
return d
|
||||
}
|
||||
|
||||
func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
||||
|
101
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
101
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
@ -39,6 +39,7 @@ import (
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
@ -48,7 +49,7 @@ import (
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||
// from inside an http.Handler. It requires that the http Server
|
||||
// supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
return nil, errors.New("gRPC requires HTTP/2")
|
||||
}
|
||||
@ -57,7 +58,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
|
||||
}
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
// TODO: do we assume contentType is lowercase? we did before
|
||||
contentSubtype, validContentType := contentSubtype(contentType)
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
|
||||
if !validContentType {
|
||||
return nil, errors.New("invalid gRPC request content-type")
|
||||
}
|
||||
@ -112,11 +113,10 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
|
||||
// at this point to be speaking over HTTP/2, so it's able to speak valid
|
||||
// gRPC.
|
||||
type serverHandlerTransport struct {
|
||||
rw http.ResponseWriter
|
||||
req *http.Request
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
didCommonHeaders bool
|
||||
rw http.ResponseWriter
|
||||
req *http.Request
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
|
||||
headerMD metadata.MD
|
||||
|
||||
@ -138,12 +138,11 @@ type serverHandlerTransport struct {
|
||||
// TODO make sure this is consistent across handler_server and http2_server
|
||||
contentSubtype string
|
||||
|
||||
stats stats.Handler
|
||||
stats []stats.Handler
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close() error {
|
||||
func (ht *serverHandlerTransport) Close() {
|
||||
ht.closeOnce.Do(ht.closeCloseChanOnce)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
|
||||
@ -186,8 +185,11 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
||||
ht.writeStatusMu.Lock()
|
||||
defer ht.writeStatusMu.Unlock()
|
||||
|
||||
headersWritten := s.updateHeaderSent()
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
if !headersWritten {
|
||||
ht.writePendingHeaders(s)
|
||||
}
|
||||
|
||||
// And flush, in case no header or body has been sent yet.
|
||||
// This forces a separation of headers and trailers if this is the
|
||||
@ -226,8 +228,10 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
||||
})
|
||||
|
||||
if err == nil { // transport has not been closed
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
for _, sh := range ht.stats {
|
||||
sh.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
@ -236,14 +240,16 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
||||
return err
|
||||
}
|
||||
|
||||
// writePendingHeaders sets common and custom headers on the first
|
||||
// write call (Write, WriteHeader, or WriteStatus)
|
||||
func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) {
|
||||
ht.writeCommonHeaders(s)
|
||||
ht.writeCustomHeaders(s)
|
||||
}
|
||||
|
||||
// writeCommonHeaders sets common headers on the first write
|
||||
// call (Write, WriteHeader, or WriteStatus).
|
||||
func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
||||
if ht.didCommonHeaders {
|
||||
return
|
||||
}
|
||||
ht.didCommonHeaders = true
|
||||
|
||||
h := ht.rw.Header()
|
||||
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
||||
h.Set("Content-Type", ht.contentType)
|
||||
@ -262,9 +268,30 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
||||
}
|
||||
}
|
||||
|
||||
// writeCustomHeaders sets custom headers set on the stream via SetHeader
|
||||
// on the first write call (Write, WriteHeader, or WriteStatus).
|
||||
func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
|
||||
h := ht.rw.Header()
|
||||
|
||||
s.hdrMu.Lock()
|
||||
for k, vv := range s.header {
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
h.Add(k, encodeMetadataHeader(k, v))
|
||||
}
|
||||
}
|
||||
|
||||
s.hdrMu.Unlock()
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
||||
headersWritten := s.updateHeaderSent()
|
||||
return ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
if !headersWritten {
|
||||
ht.writePendingHeaders(s)
|
||||
}
|
||||
ht.rw.Write(hdr)
|
||||
ht.rw.Write(data)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
@ -272,27 +299,27 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
if err := s.SetHeader(md); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headersWritten := s.updateHeaderSent()
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
h := ht.rw.Header()
|
||||
for k, vv := range md {
|
||||
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
v = encodeMetadataHeader(k, v)
|
||||
h.Add(k, v)
|
||||
}
|
||||
if !headersWritten {
|
||||
ht.writePendingHeaders(s)
|
||||
}
|
||||
|
||||
ht.rw.WriteHeader(200)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
Header: md.Copy(),
|
||||
for _, sh := range ht.stats {
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
sh.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
Header: md.Copy(),
|
||||
Compression: s.sendCompress,
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -338,18 +365,18 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
||||
Addr: ht.RemoteAddr(),
|
||||
}
|
||||
if req.TLS != nil {
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{credentials.PrivacyAndIntegrity}}
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
|
||||
}
|
||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||
s.ctx = peer.NewContext(ctx, pr)
|
||||
if ht.stats != nil {
|
||||
s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
for _, sh := range ht.stats {
|
||||
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: ht.RemoteAddr(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
ht.stats.HandleRPC(s.ctx, inHeader)
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.trReader = &transportReader{
|
||||
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
|
||||
|
510
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
510
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
@ -24,6 +24,8 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -32,15 +34,18 @@ import (
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
icredentials "google.golang.org/grpc/internal/credentials"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
imetadata "google.golang.org/grpc/internal/metadata"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/grpc/internal/transport/networktype"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@ -57,7 +62,7 @@ type http2Client struct {
|
||||
cancel context.CancelFunc
|
||||
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||||
userAgent string
|
||||
md interface{}
|
||||
md metadata.MD
|
||||
conn net.Conn // underlying communication channel
|
||||
loopy *loopyWriter
|
||||
remoteAddr net.Addr
|
||||
@ -85,7 +90,7 @@ type http2Client struct {
|
||||
kp keepalive.ClientParameters
|
||||
keepaliveEnabled bool
|
||||
|
||||
statsHandler stats.Handler
|
||||
statsHandlers []stats.Handler
|
||||
|
||||
initialWindowSize int32
|
||||
|
||||
@ -112,6 +117,9 @@ type http2Client struct {
|
||||
// goAwayReason records the http2.ErrCode and debug data received with the
|
||||
// GoAway frame.
|
||||
goAwayReason GoAwayReason
|
||||
// goAwayDebugMessage contains a detailed human readable string about a
|
||||
// GoAway frame, useful for error messages.
|
||||
goAwayDebugMessage string
|
||||
// A condition variable used to signal when the keepalive goroutine should
|
||||
// go dormant. The condition for dormancy is based on the number of active
|
||||
// streams and the `PermitWithoutStream` keepalive client parameter. And
|
||||
@ -124,7 +132,7 @@ type http2Client struct {
|
||||
kpDormant bool
|
||||
|
||||
// Fields below are for channelz metric collection.
|
||||
channelzID int64 // channelz unique identification number
|
||||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
|
||||
onGoAway func(GoAwayReason)
|
||||
@ -135,11 +143,34 @@ type http2Client struct {
|
||||
connectionID uint64
|
||||
}
|
||||
|
||||
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
|
||||
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
|
||||
address := addr.Addr
|
||||
networkType, ok := networktype.Get(addr)
|
||||
if fn != nil {
|
||||
return fn(ctx, addr)
|
||||
// Special handling for unix scheme with custom dialer. Back in the day,
|
||||
// we did not have a unix resolver and therefore targets with a unix
|
||||
// scheme would end up using the passthrough resolver. So, user's used a
|
||||
// custom dialer in this case and expected the original dial target to
|
||||
// be passed to the custom dialer. Now, we have a unix resolver. But if
|
||||
// a custom dialer is specified, we want to retain the old behavior in
|
||||
// terms of the address being passed to the custom dialer.
|
||||
if networkType == "unix" && !strings.HasPrefix(address, "\x00") {
|
||||
// Supported unix targets are either "unix://absolute-path" or
|
||||
// "unix:relative-path".
|
||||
if filepath.IsAbs(address) {
|
||||
return fn(ctx, "unix://"+address)
|
||||
}
|
||||
return fn(ctx, "unix:"+address)
|
||||
}
|
||||
return fn(ctx, address)
|
||||
}
|
||||
return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
|
||||
if !ok {
|
||||
networkType, address = parseDialTarget(address)
|
||||
}
|
||||
if networkType == "tcp" && useProxy {
|
||||
return proxyDial(ctx, address, grpcUA)
|
||||
}
|
||||
return (&net.Dialer{}).DialContext(ctx, networkType, address)
|
||||
}
|
||||
|
||||
func isTemporary(err error) bool {
|
||||
@ -161,7 +192,7 @@ func isTemporary(err error) bool {
|
||||
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
||||
// and starts to receive messages on it. Non-nil error returns if construction
|
||||
// fails.
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
||||
scheme := "http"
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer func() {
|
||||
@ -170,7 +201,13 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
}
|
||||
}()
|
||||
|
||||
conn, err := dial(connectCtx, opts.Dialer, addr.Addr)
|
||||
// gRPC, resolver, balancer etc. can specify arbitrary data in the
|
||||
// Attributes field of resolver.Address, which is shoved into connectCtx
|
||||
// and passed to the dialer and credential handshaker. This makes it possible for
|
||||
// address specific arbitrary data to reach custom dialers and credential handshakers.
|
||||
connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
|
||||
|
||||
conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent)
|
||||
if err != nil {
|
||||
if opts.FailOnNonTempDialError {
|
||||
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
|
||||
@ -214,12 +251,34 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
}
|
||||
}
|
||||
if transportCreds != nil {
|
||||
scheme = "https"
|
||||
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
|
||||
rawConn := conn
|
||||
// Pull the deadline from the connectCtx, which will be used for
|
||||
// timeouts in the authentication protocol handshake. Can ignore the
|
||||
// boolean as the deadline will return the zero value, which will make
|
||||
// the conn not timeout on I/O operations.
|
||||
deadline, _ := connectCtx.Deadline()
|
||||
rawConn.SetDeadline(deadline)
|
||||
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn)
|
||||
rawConn.SetDeadline(time.Time{})
|
||||
if err != nil {
|
||||
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
|
||||
}
|
||||
for _, cd := range perRPCCreds {
|
||||
if cd.RequireTransportSecurity() {
|
||||
if ci, ok := authInfo.(interface {
|
||||
GetCommonAuthInfo() credentials.CommonAuthInfo
|
||||
}); ok {
|
||||
secLevel := ci.GetCommonAuthInfo().SecurityLevel
|
||||
if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity {
|
||||
return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
isSecure = true
|
||||
if transportCreds.Info().SecurityProtocol == "tls" {
|
||||
scheme = "https"
|
||||
}
|
||||
}
|
||||
dynamicWindow := true
|
||||
icwz := int32(initialWindowSize)
|
||||
@ -238,7 +297,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
ctxDone: ctx.Done(), // Cache Done chan.
|
||||
cancel: cancel,
|
||||
userAgent: opts.UserAgent,
|
||||
md: addr.Metadata,
|
||||
conn: conn,
|
||||
remoteAddr: conn.RemoteAddr(),
|
||||
localAddr: conn.LocalAddr(),
|
||||
@ -253,7 +311,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
isSecure: isSecure,
|
||||
perRPCCreds: perRPCCreds,
|
||||
kp: kp,
|
||||
statsHandler: opts.StatsHandler,
|
||||
statsHandlers: opts.StatsHandlers,
|
||||
initialWindowSize: initialWindowSize,
|
||||
onPrefaceReceipt: onPrefaceReceipt,
|
||||
nextID: 1,
|
||||
@ -266,6 +324,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
keepaliveEnabled: keepaliveEnabled,
|
||||
bufferPool: newBufferPool(),
|
||||
}
|
||||
|
||||
if md, ok := addr.Metadata.(*metadata.MD); ok {
|
||||
t.md = *md
|
||||
} else if md := imetadata.Get(addr); md != nil {
|
||||
t.md = md
|
||||
}
|
||||
t.controlBuf = newControlBuffer(t.ctxDone)
|
||||
if opts.InitialWindowSize >= defaultWindowSize {
|
||||
t.initialWindowSize = opts.InitialWindowSize
|
||||
@ -277,18 +341,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
updateFlowControl: t.updateFlowControl,
|
||||
}
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
for _, sh := range t.statsHandlers {
|
||||
t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
})
|
||||
connBegin := &stats.ConnBegin{
|
||||
Client: true,
|
||||
}
|
||||
t.statsHandler.HandleConn(t.ctx, connBegin)
|
||||
sh.HandleConn(t.ctx, connBegin)
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||
t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t.keepaliveEnabled {
|
||||
t.kpDormancyCond = sync.NewCond(&t.mu)
|
||||
@ -302,12 +367,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
// Send connection preface to server.
|
||||
n, err := t.conn.Write(clientPreface)
|
||||
if err != nil {
|
||||
t.Close()
|
||||
return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
|
||||
err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
|
||||
t.Close(err)
|
||||
return nil, err
|
||||
}
|
||||
if n != len(clientPreface) {
|
||||
t.Close()
|
||||
return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
|
||||
err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
|
||||
t.Close(err)
|
||||
return nil, err
|
||||
}
|
||||
var ss []http2.Setting
|
||||
|
||||
@ -325,14 +392,16 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
}
|
||||
err = t.framer.fr.WriteSettings(ss...)
|
||||
if err != nil {
|
||||
t.Close()
|
||||
return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
|
||||
err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
|
||||
t.Close(err)
|
||||
return nil, err
|
||||
}
|
||||
// Adjust the connection flow control window if needed.
|
||||
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
|
||||
if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
|
||||
t.Close()
|
||||
return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
|
||||
err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
|
||||
t.Close(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -345,13 +414,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
err := t.loopy.run()
|
||||
if err != nil {
|
||||
errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
}
|
||||
// If it's a connection error, let reader goroutine handle it
|
||||
// since there might be data in the buffers.
|
||||
if _, ok := err.(net.Error); !ok {
|
||||
t.conn.Close()
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
}
|
||||
}
|
||||
// Do not close the transport. Let reader goroutine handle it since
|
||||
// there might be data in the buffers.
|
||||
t.conn.Close()
|
||||
t.controlBuf.finish()
|
||||
close(t.writerDone)
|
||||
}()
|
||||
return t, nil
|
||||
@ -367,6 +437,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
buf: newRecvBuffer(),
|
||||
headerChan: make(chan struct{}),
|
||||
contentSubtype: callHdr.ContentSubtype,
|
||||
doneFunc: callHdr.DoneFunc,
|
||||
}
|
||||
s.wq = newWriteQuota(defaultWriteQuota, s.done)
|
||||
s.requestRead = func(n int) {
|
||||
@ -406,7 +477,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||
Method: callHdr.Method,
|
||||
AuthInfo: t.authInfo,
|
||||
}
|
||||
ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
|
||||
ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri)
|
||||
authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -425,7 +496,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
|
||||
if callHdr.PreviousAttempts > 0 {
|
||||
@ -440,7 +511,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
||||
// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
|
||||
timeout := time.Until(dl)
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
|
||||
}
|
||||
for k, v := range authData {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
@ -469,25 +540,23 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||
for _, vv := range added {
|
||||
for i, v := range vv {
|
||||
if i%2 == 0 {
|
||||
k = v
|
||||
k = strings.ToLower(v)
|
||||
continue
|
||||
}
|
||||
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
}
|
||||
}
|
||||
if md, ok := t.md.(*metadata.MD); ok {
|
||||
for k, vv := range *md {
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
for k, vv := range t.md {
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
}
|
||||
return headerFields, nil
|
||||
@ -520,7 +589,7 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
|
||||
return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err)
|
||||
}
|
||||
for k, v := range data {
|
||||
// Capital header names are illegal in HTTP/2.
|
||||
@ -537,8 +606,11 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||||
// Note: if these credentials are provided both via dial options and call
|
||||
// options, then both sets of credentials will be applied.
|
||||
if callCreds := callHdr.Creds; callCreds != nil {
|
||||
if !t.isSecure && callCreds.RequireTransportSecurity() {
|
||||
return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
|
||||
if callCreds.RequireTransportSecurity() {
|
||||
ri, _ := credentials.RequestInfoFromContext(ctx)
|
||||
if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil {
|
||||
return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
|
||||
}
|
||||
}
|
||||
data, err := callCreds.GetRequestMetadata(ctx, audience)
|
||||
if err != nil {
|
||||
@ -554,13 +626,34 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||||
return callAuthData, nil
|
||||
}
|
||||
|
||||
// NewStreamError wraps an error and reports additional information. Typically
|
||||
// NewStream errors result in transparent retry, as they mean nothing went onto
|
||||
// the wire. However, there are two notable exceptions:
|
||||
//
|
||||
// 1. If the stream headers violate the max header list size allowed by the
|
||||
// server. It's possible this could succeed on another transport, even if
|
||||
// it's unlikely, but do not transparently retry.
|
||||
// 2. If the credentials errored when requesting their headers. In this case,
|
||||
// it's possible a retry can fix the problem, but indefinitely transparently
|
||||
// retrying is not appropriate as it is likely the credentials, if they can
|
||||
// eventually succeed, would need I/O to do so.
|
||||
type NewStreamError struct {
|
||||
Err error
|
||||
|
||||
AllowTransparentRetry bool
|
||||
}
|
||||
|
||||
func (e NewStreamError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// NewStream creates a stream and registers it into the transport as "active"
|
||||
// streams.
|
||||
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
|
||||
// streams. All non-nil errors returned will be *NewStreamError.
|
||||
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
|
||||
ctx = peer.NewContext(ctx, t.getPeer())
|
||||
headerFields, err := t.createHeaderFields(ctx, callHdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
|
||||
}
|
||||
s := t.newStream(ctx, callHdr)
|
||||
cleanup := func(err error) {
|
||||
@ -660,36 +753,47 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||
return true
|
||||
}, hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Connection closed.
|
||||
return nil, &NewStreamError{Err: err, AllowTransparentRetry: true}
|
||||
}
|
||||
if success {
|
||||
break
|
||||
}
|
||||
if hdrListSizeErr != nil {
|
||||
return nil, hdrListSizeErr
|
||||
return nil, &NewStreamError{Err: hdrListSizeErr}
|
||||
}
|
||||
firstTry = false
|
||||
select {
|
||||
case <-ch:
|
||||
case <-s.ctx.Done():
|
||||
return nil, ContextErr(s.ctx.Err())
|
||||
case <-ctx.Done():
|
||||
return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
|
||||
case <-t.goAway:
|
||||
return nil, errStreamDrain
|
||||
return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true}
|
||||
case <-t.ctx.Done():
|
||||
return nil, ErrConnClosing
|
||||
return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
|
||||
}
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
header, _, _ := metadata.FromOutgoingContextRaw(ctx)
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: callHdr.SendCompress,
|
||||
Header: header.Copy(),
|
||||
if len(t.statsHandlers) != 0 {
|
||||
header, ok := metadata.FromOutgoingContext(ctx)
|
||||
if ok {
|
||||
header.Set("user-agent", t.userAgent)
|
||||
} else {
|
||||
header = metadata.Pairs("user-agent", t.userAgent)
|
||||
}
|
||||
for _, sh := range t.statsHandlers {
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
// Note: Creating a new stats object to prevent pollution.
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: callHdr.SendCompress,
|
||||
Header: header,
|
||||
}
|
||||
sh.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
@ -764,6 +868,9 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
||||
t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
|
||||
// This will unblock write.
|
||||
close(s.done)
|
||||
if s.doneFunc != nil {
|
||||
s.doneFunc()
|
||||
}
|
||||
}
|
||||
|
||||
// Close kicks off the shutdown process of the transport. This should be called
|
||||
@ -773,12 +880,12 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
||||
// This method blocks until the addrConn that initiated this transport is
|
||||
// re-connected. This happens because t.onClose() begins reconnect logic at the
|
||||
// addrConn level and blocks until the addrConn is successfully connected.
|
||||
func (t *http2Client) Close() error {
|
||||
func (t *http2Client) Close(err error) {
|
||||
t.mu.Lock()
|
||||
// Make sure we only Close once.
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
return nil
|
||||
return
|
||||
}
|
||||
// Call t.onClose before setting the state to closing to prevent the client
|
||||
// from attempting to create new streams ASAP.
|
||||
@ -794,21 +901,30 @@ func (t *http2Client) Close() error {
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.finish()
|
||||
t.cancel()
|
||||
err := t.conn.Close()
|
||||
if channelz.IsOn() {
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
t.conn.Close()
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
// Append info about previous goaways if there were any, since this may be important
|
||||
// for understanding the root cause for this connection to be closed.
|
||||
_, goAwayDebugMessage := t.GetGoAwayReason()
|
||||
|
||||
var st *status.Status
|
||||
if len(goAwayDebugMessage) > 0 {
|
||||
st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
|
||||
err = st.Err()
|
||||
} else {
|
||||
st = status.New(codes.Unavailable, err.Error())
|
||||
}
|
||||
|
||||
// Notify all active streams.
|
||||
for _, s := range streams {
|
||||
t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false)
|
||||
t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
for _, sh := range t.statsHandlers {
|
||||
connEnd := &stats.ConnEnd{
|
||||
Client: true,
|
||||
}
|
||||
t.statsHandler.HandleConn(t.ctx, connEnd)
|
||||
sh.HandleConn(t.ctx, connEnd)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GracefulClose sets the state to draining, which prevents new streams from
|
||||
@ -827,7 +943,7 @@ func (t *http2Client) GracefulClose() {
|
||||
active := len(t.activeStreams)
|
||||
t.mu.Unlock()
|
||||
if active == 0 {
|
||||
t.Close()
|
||||
t.Close(ErrConnClosing)
|
||||
return
|
||||
}
|
||||
t.controlBuf.put(&incomingGoAway{})
|
||||
@ -847,18 +963,10 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
df := &dataFrame{
|
||||
streamID: s.id,
|
||||
endStream: opts.Last,
|
||||
h: hdr,
|
||||
d: data,
|
||||
}
|
||||
if hdr != nil || data != nil { // If it's not an empty data frame.
|
||||
// Add some data to grpc message header so that we can equally
|
||||
// distribute bytes across frames.
|
||||
emptyLen := http2MaxFrameLen - len(hdr)
|
||||
if emptyLen > len(data) {
|
||||
emptyLen = len(data)
|
||||
}
|
||||
hdr = append(hdr, data[:emptyLen]...)
|
||||
data = data[emptyLen:]
|
||||
df.h, df.d = hdr, data
|
||||
// TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler.
|
||||
if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
|
||||
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -976,7 +1084,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
|
||||
}
|
||||
// The server has closed the stream without sending trailers. Record that
|
||||
// the read direction is closed, and set the status appropriately.
|
||||
if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
|
||||
if f.StreamEnded() {
|
||||
t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
|
||||
}
|
||||
}
|
||||
@ -992,7 +1100,9 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||
}
|
||||
statusCode, ok := http2ErrConvTab[f.ErrCode]
|
||||
if !ok {
|
||||
warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
||||
}
|
||||
statusCode = codes.Unknown
|
||||
}
|
||||
if statusCode == codes.Canceled {
|
||||
@ -1074,12 +1184,14 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
return
|
||||
}
|
||||
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
|
||||
infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
|
||||
}
|
||||
}
|
||||
id := f.LastStreamID
|
||||
if id > 0 && id%2 != 1 {
|
||||
if id > 0 && id%2 == 0 {
|
||||
t.mu.Unlock()
|
||||
t.Close()
|
||||
t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
|
||||
return
|
||||
}
|
||||
// A client can receive multiple GoAways from the server (see
|
||||
@ -1097,7 +1209,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
// If there are multiple GoAways the first one should always have an ID greater than the following ones.
|
||||
if id > t.prevGoAwayID {
|
||||
t.mu.Unlock()
|
||||
t.Close()
|
||||
t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID))
|
||||
return
|
||||
}
|
||||
default:
|
||||
@ -1127,7 +1239,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
active := len(t.activeStreams)
|
||||
t.mu.Unlock()
|
||||
if active == 0 {
|
||||
t.Close()
|
||||
t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
|
||||
}
|
||||
}
|
||||
|
||||
@ -1143,12 +1255,17 @@ func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
|
||||
t.goAwayReason = GoAwayTooManyPings
|
||||
}
|
||||
}
|
||||
if len(f.DebugData()) == 0 {
|
||||
t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode)
|
||||
} else {
|
||||
t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData()))
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Client) GetGoAwayReason() GoAwayReason {
|
||||
func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
return t.goAwayReason
|
||||
return t.goAwayReason, t.goAwayDebugMessage
|
||||
}
|
||||
|
||||
func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
|
||||
@ -1175,34 +1292,128 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
return
|
||||
}
|
||||
|
||||
state := &decodeState{}
|
||||
// Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode.
|
||||
state.data.isGRPC = !initialHeader
|
||||
if err := state.decodeHeader(frame); err != nil {
|
||||
t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
|
||||
// frame.Truncated is set to true when framer detects that the current header
|
||||
// list size hits MaxHeaderListSize limit.
|
||||
if frame.Truncated {
|
||||
se := status.New(codes.Internal, "peer header list size exceeded limit")
|
||||
t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
// If a gRPC Response-Headers has already been received, then it means
|
||||
// that the peer is speaking gRPC and we are in gRPC mode.
|
||||
isGRPC = !initialHeader
|
||||
mdata = make(map[string][]string)
|
||||
contentTypeErr = "malformed header: missing HTTP content-type"
|
||||
grpcMessage string
|
||||
statusGen *status.Status
|
||||
recvCompress string
|
||||
httpStatusCode *int
|
||||
httpStatusErr string
|
||||
rawStatusCode = codes.Unknown
|
||||
// headerError is set if an error is encountered while parsing the headers
|
||||
headerError string
|
||||
)
|
||||
|
||||
if initialHeader {
|
||||
httpStatusErr = "malformed header: missing HTTP status"
|
||||
}
|
||||
|
||||
for _, hf := range frame.Fields {
|
||||
switch hf.Name {
|
||||
case "content-type":
|
||||
if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType {
|
||||
contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value)
|
||||
break
|
||||
}
|
||||
contentTypeErr = ""
|
||||
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
||||
isGRPC = true
|
||||
case "grpc-encoding":
|
||||
recvCompress = hf.Value
|
||||
case "grpc-status":
|
||||
code, err := strconv.ParseInt(hf.Value, 10, 32)
|
||||
if err != nil {
|
||||
se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err))
|
||||
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
|
||||
return
|
||||
}
|
||||
rawStatusCode = codes.Code(uint32(code))
|
||||
case "grpc-message":
|
||||
grpcMessage = decodeGrpcMessage(hf.Value)
|
||||
case "grpc-status-details-bin":
|
||||
var err error
|
||||
statusGen, err = decodeGRPCStatusDetails(hf.Value)
|
||||
if err != nil {
|
||||
headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err)
|
||||
}
|
||||
case ":status":
|
||||
if hf.Value == "200" {
|
||||
httpStatusErr = ""
|
||||
statusCode := 200
|
||||
httpStatusCode = &statusCode
|
||||
break
|
||||
}
|
||||
|
||||
c, err := strconv.ParseInt(hf.Value, 10, 32)
|
||||
if err != nil {
|
||||
se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
|
||||
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
|
||||
return
|
||||
}
|
||||
statusCode := int(c)
|
||||
httpStatusCode = &statusCode
|
||||
|
||||
httpStatusErr = fmt.Sprintf(
|
||||
"unexpected HTTP status code received from server: %d (%s)",
|
||||
statusCode,
|
||||
http.StatusText(statusCode),
|
||||
)
|
||||
default:
|
||||
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
|
||||
break
|
||||
}
|
||||
v, err := decodeMetadataHeader(hf.Name, hf.Value)
|
||||
if err != nil {
|
||||
headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err)
|
||||
logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
||||
break
|
||||
}
|
||||
mdata[hf.Name] = append(mdata[hf.Name], v)
|
||||
}
|
||||
}
|
||||
|
||||
if !isGRPC || httpStatusErr != "" {
|
||||
var code = codes.Internal // when header does not include HTTP status, return INTERNAL
|
||||
|
||||
if httpStatusCode != nil {
|
||||
var ok bool
|
||||
code, ok = HTTPStatusConvTab[*httpStatusCode]
|
||||
if !ok {
|
||||
code = codes.Unknown
|
||||
}
|
||||
}
|
||||
var errs []string
|
||||
if httpStatusErr != "" {
|
||||
errs = append(errs, httpStatusErr)
|
||||
}
|
||||
if contentTypeErr != "" {
|
||||
errs = append(errs, contentTypeErr)
|
||||
}
|
||||
// Verify the HTTP response is a 200.
|
||||
se := status.New(code, strings.Join(errs, "; "))
|
||||
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
|
||||
return
|
||||
}
|
||||
|
||||
if headerError != "" {
|
||||
se := status.New(codes.Internal, headerError)
|
||||
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
|
||||
return
|
||||
}
|
||||
|
||||
isHeader := false
|
||||
defer func() {
|
||||
if t.statsHandler != nil {
|
||||
if isHeader {
|
||||
inHeader := &stats.InHeader{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: s.header.Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inHeader)
|
||||
} else {
|
||||
inTrailer := &stats.InTrailer{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Trailer: s.trailer.Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inTrailer)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// If headerChan hasn't been closed yet
|
||||
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
|
||||
@ -1213,9 +1424,9 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
// These values can be set without any synchronization because
|
||||
// stream goroutine will read it only after seeing a closed
|
||||
// headerChan which we'll close after setting this.
|
||||
s.recvCompress = state.data.encoding
|
||||
if len(state.data.mdata) > 0 {
|
||||
s.header = state.data.mdata
|
||||
s.recvCompress = recvCompress
|
||||
if len(mdata) > 0 {
|
||||
s.header = mdata
|
||||
}
|
||||
} else {
|
||||
// HEADERS frame block carries a Trailers-Only.
|
||||
@ -1224,13 +1435,36 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
close(s.headerChan)
|
||||
}
|
||||
|
||||
for _, sh := range t.statsHandlers {
|
||||
if isHeader {
|
||||
inHeader := &stats.InHeader{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: metadata.MD(mdata).Copy(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
} else {
|
||||
inTrailer := &stats.InTrailer{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Trailer: metadata.MD(mdata).Copy(),
|
||||
}
|
||||
sh.HandleRPC(s.ctx, inTrailer)
|
||||
}
|
||||
}
|
||||
|
||||
if !endStream {
|
||||
return
|
||||
}
|
||||
|
||||
if statusGen == nil {
|
||||
statusGen = status.New(rawStatusCode, grpcMessage)
|
||||
}
|
||||
|
||||
// if client received END_STREAM from server while stream was still active, send RST_STREAM
|
||||
rst := s.getState() == streamActive
|
||||
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true)
|
||||
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true)
|
||||
}
|
||||
|
||||
// reader runs as a separate goroutine in charge of reading data from network
|
||||
@ -1244,7 +1478,8 @@ func (t *http2Client) reader() {
|
||||
// Check the validity of server preface.
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
if err != nil {
|
||||
t.Close() // this kicks off resetTransport, so must be last before return
|
||||
err = connectionErrorf(true, err, "error reading server preface: %v", err)
|
||||
t.Close(err) // this kicks off resetTransport, so must be last before return
|
||||
return
|
||||
}
|
||||
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
|
||||
@ -1253,7 +1488,8 @@ func (t *http2Client) reader() {
|
||||
}
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
t.Close() // this kicks off resetTransport, so must be last before return
|
||||
// this kicks off resetTransport, so must be last before return
|
||||
t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame))
|
||||
return
|
||||
}
|
||||
t.onPrefaceReceipt()
|
||||
@ -1277,13 +1513,19 @@ func (t *http2Client) reader() {
|
||||
if s != nil {
|
||||
// use error detail to provide better err message
|
||||
code := http2ErrConvTab[se.Code]
|
||||
msg := t.framer.fr.ErrorDetail().Error()
|
||||
errorDetail := t.framer.fr.ErrorDetail()
|
||||
var msg string
|
||||
if errorDetail != nil {
|
||||
msg = errorDetail.Error()
|
||||
} else {
|
||||
msg = "received invalid frame"
|
||||
}
|
||||
t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
// Transport error.
|
||||
t.Close()
|
||||
t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -1303,7 +1545,9 @@ func (t *http2Client) reader() {
|
||||
case *http2.WindowUpdateFrame:
|
||||
t.handleWindowUpdate(frame)
|
||||
default:
|
||||
errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1315,7 +1559,7 @@ func minTime(a, b time.Duration) time.Duration {
|
||||
return b
|
||||
}
|
||||
|
||||
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
|
||||
// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
|
||||
func (t *http2Client) keepalive() {
|
||||
p := &ping{data: [8]byte{}}
|
||||
// True iff a ping has been sent, and no data has been received since then.
|
||||
@ -1340,7 +1584,7 @@ func (t *http2Client) keepalive() {
|
||||
continue
|
||||
}
|
||||
if outstandingPing && timeoutLeft <= 0 {
|
||||
t.Close()
|
||||
t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout"))
|
||||
return
|
||||
}
|
||||
t.mu.Lock()
|
||||
|
484
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
484
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
@ -21,11 +21,11 @@ package transport
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@ -34,12 +34,11 @@ import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
@ -53,13 +52,10 @@ import (
|
||||
var (
|
||||
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
||||
// the stream's state.
|
||||
ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
|
||||
ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times")
|
||||
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
|
||||
// than the limit set by peer.
|
||||
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
|
||||
// statusRawProto is a function to get to the raw status proto wrapped in a
|
||||
// status.Status without a proto.Clone().
|
||||
statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
|
||||
ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer")
|
||||
)
|
||||
|
||||
// serverConnectionCounter counts the number of connections a server has seen
|
||||
@ -77,7 +73,6 @@ type http2Server struct {
|
||||
writerDone chan struct{} // sync point to enable testing.
|
||||
remoteAddr net.Addr
|
||||
localAddr net.Addr
|
||||
maxStreamID uint32 // max stream ID ever seen
|
||||
authInfo credentials.AuthInfo // auth info about the connection
|
||||
inTapHandle tap.ServerInHandle
|
||||
framer *framer
|
||||
@ -87,7 +82,7 @@ type http2Server struct {
|
||||
// updates, reset streams, and various settings) to the controller.
|
||||
controlBuf *controlBuffer
|
||||
fc *trInFlow
|
||||
stats stats.Handler
|
||||
stats []stats.Handler
|
||||
// Keepalive and max-age parameters for the server.
|
||||
kp keepalive.ServerParameters
|
||||
// Keepalive enforcement policy.
|
||||
@ -106,11 +101,11 @@ type http2Server struct {
|
||||
|
||||
mu sync.Mutex // guard the following
|
||||
|
||||
// drainChan is initialized when drain(...) is called the first time.
|
||||
// drainChan is initialized when Drain() is called the first time.
|
||||
// After which the server writes out the first GoAway(with ID 2^31-1) frame.
|
||||
// Then an independent goroutine will be launched to later send the second GoAway.
|
||||
// During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
|
||||
// Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is
|
||||
// Thus call to Drain() will be a no-op if drainChan is already initialized since draining is
|
||||
// already underway.
|
||||
drainChan chan struct{}
|
||||
state transportState
|
||||
@ -122,16 +117,42 @@ type http2Server struct {
|
||||
idle time.Time
|
||||
|
||||
// Fields below are for channelz metric collection.
|
||||
channelzID int64 // channelz unique identification number
|
||||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
|
||||
// maxStreamMu guards the maximum stream ID
|
||||
// This lock may not be taken if mu is already held.
|
||||
maxStreamMu sync.Mutex
|
||||
maxStreamID uint32 // max stream ID ever seen
|
||||
}
|
||||
|
||||
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
|
||||
// returned if something goes wrong.
|
||||
func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
|
||||
// NewServerTransport creates a http2 transport with conn and configuration
|
||||
// options from config.
|
||||
//
|
||||
// It returns a non-nil transport and a nil error on success. On failure, it
|
||||
// returns a nil transport and a non-nil error. For a special case where the
|
||||
// underlying conn gets closed before the client preface could be read, it
|
||||
// returns a nil transport and a nil error.
|
||||
func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
|
||||
var authInfo credentials.AuthInfo
|
||||
rawConn := conn
|
||||
if config.Credentials != nil {
|
||||
var err error
|
||||
conn, authInfo, err = config.Credentials.ServerHandshake(rawConn)
|
||||
if err != nil {
|
||||
// ErrConnDispatched means that the connection was dispatched away
|
||||
// from gRPC; those connections should be left open. io.EOF means
|
||||
// the connection was closed before handshaking completed, which can
|
||||
// happen naturally from probers. Return these errors directly.
|
||||
if err == credentials.ErrConnDispatched || err == io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
|
||||
}
|
||||
}
|
||||
writeBufSize := config.WriteBufferSize
|
||||
readBufSize := config.ReadBufferSize
|
||||
maxHeaderListSize := defaultServerMaxHeaderListSize
|
||||
@ -210,18 +231,24 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
if kp.Timeout == 0 {
|
||||
kp.Timeout = defaultServerKeepaliveTimeout
|
||||
}
|
||||
if kp.Time != infinity {
|
||||
if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
|
||||
}
|
||||
}
|
||||
kep := config.KeepalivePolicy
|
||||
if kep.MinTime == 0 {
|
||||
kep.MinTime = defaultKeepalivePolicyMinTime
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
t := &http2Server{
|
||||
ctx: context.Background(),
|
||||
ctx: setConnection(context.Background(), rawConn),
|
||||
done: done,
|
||||
conn: conn,
|
||||
remoteAddr: conn.RemoteAddr(),
|
||||
localAddr: conn.LocalAddr(),
|
||||
authInfo: config.AuthInfo,
|
||||
authInfo: authInfo,
|
||||
framer: framer,
|
||||
readerDone: make(chan struct{}),
|
||||
writerDone: make(chan struct{}),
|
||||
@ -230,7 +257,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
fc: &trInFlow{limit: uint32(icwz)},
|
||||
state: reachable,
|
||||
activeStreams: make(map[uint32]*Stream),
|
||||
stats: config.StatsHandler,
|
||||
stats: config.StatsHandlers,
|
||||
kp: kp,
|
||||
idle: time.Now(),
|
||||
kep: kep,
|
||||
@ -245,20 +272,20 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
updateFlowControl: t.updateFlowControl,
|
||||
}
|
||||
}
|
||||
if t.stats != nil {
|
||||
t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
for _, sh := range t.stats {
|
||||
t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
})
|
||||
connBegin := &stats.ConnBegin{}
|
||||
t.stats.HandleConn(t.ctx, connBegin)
|
||||
sh.HandleConn(t.ctx, connBegin)
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||
t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
|
||||
|
||||
t.framer.writer.Flush()
|
||||
|
||||
defer func() {
|
||||
@ -270,6 +297,14 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
// Check the validity of client preface.
|
||||
preface := make([]byte, len(clientPreface))
|
||||
if _, err := io.ReadFull(t.conn, preface); err != nil {
|
||||
// In deployments where a gRPC server runs behind a cloud load balancer
|
||||
// which performs regular TCP level health checks, the connection is
|
||||
// closed immediately by the latter. Returning io.EOF here allows the
|
||||
// grpc server implementation to recognize this scenario and suppress
|
||||
// logging to reduce spam.
|
||||
if err == io.EOF {
|
||||
return nil, io.EOF
|
||||
}
|
||||
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
|
||||
}
|
||||
if !bytes.Equal(preface, clientPreface) {
|
||||
@ -294,9 +329,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
|
||||
if err := t.loopy.run(); err != nil {
|
||||
errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
}
|
||||
}
|
||||
t.conn.Close()
|
||||
t.controlBuf.finish()
|
||||
close(t.writerDone)
|
||||
}()
|
||||
go t.keepalive()
|
||||
@ -305,38 +343,145 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
|
||||
// operateHeader takes action on the decoded headers.
|
||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
|
||||
// Acquire max stream ID lock for entire duration
|
||||
t.maxStreamMu.Lock()
|
||||
defer t.maxStreamMu.Unlock()
|
||||
|
||||
streamID := frame.Header().StreamID
|
||||
state := &decodeState{
|
||||
serverSide: true,
|
||||
}
|
||||
if err := state.decodeHeader(frame); err != nil {
|
||||
if se, ok := status.FromError(err); ok {
|
||||
t.controlBuf.put(&cleanupStream{
|
||||
streamID: streamID,
|
||||
rst: true,
|
||||
rstCode: statusCodeConvTab[se.Code()],
|
||||
onWrite: func() {},
|
||||
})
|
||||
}
|
||||
|
||||
// frame.Truncated is set to true when framer detects that the current header
|
||||
// list size hits MaxHeaderListSize limit.
|
||||
if frame.Truncated {
|
||||
t.controlBuf.put(&cleanupStream{
|
||||
streamID: streamID,
|
||||
rst: true,
|
||||
rstCode: http2.ErrCodeFrameSize,
|
||||
onWrite: func() {},
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||||
// illegal gRPC stream id.
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
||||
}
|
||||
return true
|
||||
}
|
||||
t.maxStreamID = streamID
|
||||
|
||||
buf := newRecvBuffer()
|
||||
s := &Stream{
|
||||
id: streamID,
|
||||
st: t,
|
||||
buf: buf,
|
||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||
recvCompress: state.data.encoding,
|
||||
method: state.data.method,
|
||||
contentSubtype: state.data.contentSubtype,
|
||||
id: streamID,
|
||||
st: t,
|
||||
buf: buf,
|
||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||
}
|
||||
var (
|
||||
// If a gRPC Response-Headers has already been received, then it means
|
||||
// that the peer is speaking gRPC and we are in gRPC mode.
|
||||
isGRPC = false
|
||||
mdata = make(map[string][]string)
|
||||
httpMethod string
|
||||
// headerError is set if an error is encountered while parsing the headers
|
||||
headerError bool
|
||||
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
)
|
||||
|
||||
for _, hf := range frame.Fields {
|
||||
switch hf.Name {
|
||||
case "content-type":
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
|
||||
if !validContentType {
|
||||
break
|
||||
}
|
||||
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
||||
s.contentSubtype = contentSubtype
|
||||
isGRPC = true
|
||||
case "grpc-encoding":
|
||||
s.recvCompress = hf.Value
|
||||
case ":method":
|
||||
httpMethod = hf.Value
|
||||
case ":path":
|
||||
s.method = hf.Value
|
||||
case "grpc-timeout":
|
||||
timeoutSet = true
|
||||
var err error
|
||||
if timeout, err = decodeTimeout(hf.Value); err != nil {
|
||||
headerError = true
|
||||
}
|
||||
// "Transports must consider requests containing the Connection header
|
||||
// as malformed." - A41
|
||||
case "connection":
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
|
||||
}
|
||||
headerError = true
|
||||
default:
|
||||
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
|
||||
break
|
||||
}
|
||||
v, err := decodeMetadataHeader(hf.Name, hf.Value)
|
||||
if err != nil {
|
||||
headerError = true
|
||||
logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
||||
break
|
||||
}
|
||||
mdata[hf.Name] = append(mdata[hf.Name], v)
|
||||
}
|
||||
}
|
||||
|
||||
// "If multiple Host headers or multiple :authority headers are present, the
|
||||
// request must be rejected with an HTTP status code 400 as required by Host
|
||||
// validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM
|
||||
// with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2
|
||||
// error, this takes precedence over a client not speaking gRPC.
|
||||
if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
|
||||
errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: %v", errMsg)
|
||||
}
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: 400,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: status.New(codes.Internal, errMsg),
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
if !isGRPC || headerError {
|
||||
t.controlBuf.put(&cleanupStream{
|
||||
streamID: streamID,
|
||||
rst: true,
|
||||
rstCode: http2.ErrCodeProtocol,
|
||||
onWrite: func() {},
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
// "If :authority is missing, Host must be renamed to :authority." - A41
|
||||
if len(mdata[":authority"]) == 0 {
|
||||
// No-op if host isn't present, no eventual :authority header is a valid
|
||||
// RPC.
|
||||
if host, ok := mdata["host"]; ok {
|
||||
mdata[":authority"] = host
|
||||
delete(mdata, "host")
|
||||
}
|
||||
} else {
|
||||
// "If :authority is present, Host must be discarded" - A41
|
||||
delete(mdata, "host")
|
||||
}
|
||||
|
||||
if frame.StreamEnded() {
|
||||
// s is just created by the caller. No lock needed.
|
||||
s.state = streamReadDone
|
||||
}
|
||||
if state.data.timeoutSet {
|
||||
s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout)
|
||||
if timeoutSet {
|
||||
s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout)
|
||||
} else {
|
||||
s.ctx, s.cancel = context.WithCancel(t.ctx)
|
||||
}
|
||||
@ -349,31 +494,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
}
|
||||
s.ctx = peer.NewContext(s.ctx, pr)
|
||||
// Attach the received metadata to the context.
|
||||
if len(state.data.mdata) > 0 {
|
||||
s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata)
|
||||
}
|
||||
if state.data.statsTags != nil {
|
||||
s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags)
|
||||
}
|
||||
if state.data.statsTrace != nil {
|
||||
s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace)
|
||||
}
|
||||
if t.inTapHandle != nil {
|
||||
var err error
|
||||
info := &tap.Info{
|
||||
FullMethodName: state.data.method,
|
||||
if len(mdata) > 0 {
|
||||
s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
|
||||
if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 {
|
||||
s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1]))
|
||||
}
|
||||
s.ctx, err = t.inTapHandle(s.ctx, info)
|
||||
if err != nil {
|
||||
warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
|
||||
t.controlBuf.put(&cleanupStream{
|
||||
streamID: s.id,
|
||||
rst: true,
|
||||
rstCode: http2.ErrCodeRefusedStream,
|
||||
onWrite: func() {},
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 {
|
||||
s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1]))
|
||||
}
|
||||
}
|
||||
t.mu.Lock()
|
||||
@ -393,14 +520,43 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
s.cancel()
|
||||
return false
|
||||
}
|
||||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||||
if httpMethod != http.MethodPost {
|
||||
t.mu.Unlock()
|
||||
// illegal gRPC stream id.
|
||||
errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
||||
errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: %v", errMsg)
|
||||
}
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: 405,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: status.New(codes.Internal, errMsg),
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
s.cancel()
|
||||
return true
|
||||
return false
|
||||
}
|
||||
if t.inTapHandle != nil {
|
||||
var err error
|
||||
if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
|
||||
t.mu.Unlock()
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
|
||||
}
|
||||
stat, ok := status.FromError(err)
|
||||
if !ok {
|
||||
stat = status.New(codes.PermissionDenied, err.Error())
|
||||
}
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: 200,
|
||||
streamID: s.id,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: stat,
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return false
|
||||
}
|
||||
}
|
||||
t.maxStreamID = streamID
|
||||
t.activeStreams[streamID] = s
|
||||
if len(t.activeStreams) == 1 {
|
||||
t.idle = time.Time{}
|
||||
@ -414,17 +570,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
t.adjustWindow(s, uint32(n))
|
||||
}
|
||||
s.ctx = traceCtx(s.ctx, s.method)
|
||||
if t.stats != nil {
|
||||
s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
for _, sh := range t.stats {
|
||||
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: s.recvCompress,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: metadata.MD(state.data.mdata).Copy(),
|
||||
Header: metadata.MD(mdata).Copy(),
|
||||
}
|
||||
t.stats.HandleRPC(s.ctx, inHeader)
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.ctxDone = s.ctx.Done()
|
||||
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
|
||||
@ -459,7 +615,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
if err != nil {
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
|
||||
}
|
||||
t.mu.Lock()
|
||||
s := t.activeStreams[se.StreamID]
|
||||
t.mu.Unlock()
|
||||
@ -479,7 +637,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
||||
}
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
@ -502,7 +662,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
case *http2.GoAwayFrame:
|
||||
// TODO: Handle GoAway from the client appropriately.
|
||||
default:
|
||||
errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -604,6 +766,10 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if s.getState() == streamReadDone {
|
||||
t.closeStream(s, true, http2.ErrCodeStreamClosed, false)
|
||||
return
|
||||
}
|
||||
if size > 0 {
|
||||
if err := s.fc.onData(size); err != nil {
|
||||
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
|
||||
@ -624,7 +790,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
|
||||
s.write(recvMsg{buffer: buffer})
|
||||
}
|
||||
}
|
||||
if f.Header().Flags.Has(http2.FlagDataEndStream) {
|
||||
if f.StreamEnded() {
|
||||
// Received the end of stream from the client.
|
||||
s.compareAndSwapState(streamActive, streamReadDone)
|
||||
s.write(recvMsg{err: io.EOF})
|
||||
@ -724,7 +890,9 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
|
||||
|
||||
if t.pingStrikes > maxPingStrikes {
|
||||
// Send goaway and close the connection.
|
||||
errorf("transport: Got too many pings from the client, closing the connection.")
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: Got too many pings from the client, closing the connection.")
|
||||
}
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
|
||||
}
|
||||
}
|
||||
@ -757,18 +925,34 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
||||
var sz int64
|
||||
for _, f := range hdrFrame.hf {
|
||||
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
|
||||
errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *http2Server) streamContextErr(s *Stream) error {
|
||||
select {
|
||||
case <-t.done:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
return ContextErr(s.ctx.Err())
|
||||
}
|
||||
|
||||
// WriteHeader sends the header metadata md back to the client.
|
||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
if s.updateHeaderSent() || s.getState() == streamDone {
|
||||
if s.updateHeaderSent() {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
|
||||
if s.getState() == streamDone {
|
||||
return t.streamContextErr(s)
|
||||
}
|
||||
|
||||
s.hdrMu.Lock()
|
||||
if md.Len() > 0 {
|
||||
if s.header.Len() > 0 {
|
||||
@ -779,7 +963,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
}
|
||||
if err := t.writeHeaderLocked(s); err != nil {
|
||||
s.hdrMu.Unlock()
|
||||
return err
|
||||
return status.Convert(err).Err()
|
||||
}
|
||||
s.hdrMu.Unlock()
|
||||
return nil
|
||||
@ -794,7 +978,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||
// first and create a slice of that exact size.
|
||||
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
|
||||
if s.sendCompress != "" {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||
}
|
||||
@ -812,13 +996,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||
t.closeStream(s, true, http2.ErrCodeInternal, false)
|
||||
return ErrHeaderListSizeLimitViolation
|
||||
}
|
||||
if t.stats != nil {
|
||||
// Note: WireLength is not set in outHeader.
|
||||
// TODO(mmukhi): Revisit this later, if needed.
|
||||
for _, sh := range t.stats {
|
||||
// Note: Headers are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
outHeader := &stats.OutHeader{
|
||||
Header: s.header.Copy(),
|
||||
Header: s.header.Copy(),
|
||||
Compression: s.sendCompress,
|
||||
}
|
||||
t.stats.HandleRPC(s.Context(), outHeader)
|
||||
sh.HandleRPC(s.Context(), outHeader)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -843,17 +1028,17 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
}
|
||||
} else { // Send a trailer only response.
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
|
||||
}
|
||||
}
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
|
||||
|
||||
if p := statusRawProto(st); p != nil && len(p.Details) > 0 {
|
||||
if p := st.Proto(); p != nil && len(p.Details) > 0 {
|
||||
stBytes, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
// TODO: return error instead, when callers are able to handle it.
|
||||
grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
|
||||
logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
|
||||
} else {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
|
||||
}
|
||||
@ -879,8 +1064,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
// Send a RST_STREAM after the trailers if the client has not already half-closed.
|
||||
rst := s.getState() == streamActive
|
||||
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
|
||||
if t.stats != nil {
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
for _, sh := range t.stats {
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
sh.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
@ -892,32 +1079,14 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
||||
if !s.isHeaderSent() { // Headers haven't been written yet.
|
||||
if err := t.WriteHeader(s, nil); err != nil {
|
||||
if _, ok := err.(ConnectionError); ok {
|
||||
return err
|
||||
}
|
||||
// TODO(mmukhi, dfawley): Make sure this is the right code to return.
|
||||
return status.Errorf(codes.Internal, "transport: %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Writing headers checks for this condition.
|
||||
if s.getState() == streamDone {
|
||||
// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
|
||||
s.cancel()
|
||||
select {
|
||||
case <-t.done:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
return ContextErr(s.ctx.Err())
|
||||
return t.streamContextErr(s)
|
||||
}
|
||||
}
|
||||
// Add some data to header frame so that we can equally distribute bytes across frames.
|
||||
emptyLen := http2MaxFrameLen - len(hdr)
|
||||
if emptyLen > len(data) {
|
||||
emptyLen = len(data)
|
||||
}
|
||||
hdr = append(hdr, data[:emptyLen]...)
|
||||
data = data[emptyLen:]
|
||||
df := &dataFrame{
|
||||
streamID: s.id,
|
||||
h: hdr,
|
||||
@ -925,12 +1094,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
onEachWrite: t.setResetPingStrikes,
|
||||
}
|
||||
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
|
||||
select {
|
||||
case <-t.done:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
return ContextErr(s.ctx.Err())
|
||||
return t.streamContextErr(s)
|
||||
}
|
||||
return t.controlBuf.put(df)
|
||||
}
|
||||
@ -979,17 +1143,19 @@ func (t *http2Server) keepalive() {
|
||||
if val <= 0 {
|
||||
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
|
||||
// Gracefully close the connection.
|
||||
t.drain(http2.ErrCodeNo, []byte{})
|
||||
t.Drain()
|
||||
return
|
||||
}
|
||||
idleTimer.Reset(val)
|
||||
case <-ageTimer.C:
|
||||
t.drain(http2.ErrCodeNo, []byte{})
|
||||
t.Drain()
|
||||
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
|
||||
select {
|
||||
case <-ageTimer.C:
|
||||
// Close the connection after grace period.
|
||||
infof("transport: closing server transport due to maximum connection age.")
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing server transport due to maximum connection age.")
|
||||
}
|
||||
t.Close()
|
||||
case <-t.done:
|
||||
}
|
||||
@ -1006,7 +1172,9 @@ func (t *http2Server) keepalive() {
|
||||
continue
|
||||
}
|
||||
if outstandingPing && kpTimeoutLeft <= 0 {
|
||||
infof("transport: closing server transport due to idleness.")
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing server transport due to idleness.")
|
||||
}
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
@ -1034,11 +1202,11 @@ func (t *http2Server) keepalive() {
|
||||
// Close starts shutting down the http2Server transport.
|
||||
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
|
||||
// could cause some resource issue. Revisit this later.
|
||||
func (t *http2Server) Close() error {
|
||||
func (t *http2Server) Close() {
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
return errors.New("transport: Close() was already called")
|
||||
return
|
||||
}
|
||||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
@ -1046,27 +1214,22 @@ func (t *http2Server) Close() error {
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.finish()
|
||||
close(t.done)
|
||||
err := t.conn.Close()
|
||||
if channelz.IsOn() {
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
if err := t.conn.Close(); err != nil && logger.V(logLevel) {
|
||||
logger.Infof("transport: error closing conn during Close: %v", err)
|
||||
}
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
// Cancel all active streams.
|
||||
for _, s := range streams {
|
||||
s.cancel()
|
||||
}
|
||||
if t.stats != nil {
|
||||
for _, sh := range t.stats {
|
||||
connEnd := &stats.ConnEnd{}
|
||||
t.stats.HandleConn(t.ctx, connEnd)
|
||||
sh.HandleConn(t.ctx, connEnd)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// deleteStream deletes the stream s from transport's active streams.
|
||||
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||
// called to interrupt the potential blocking on other goroutines.
|
||||
s.cancel()
|
||||
|
||||
t.mu.Lock()
|
||||
if _, ok := t.activeStreams[s.id]; ok {
|
||||
@ -1088,6 +1251,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
||||
|
||||
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
|
||||
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||
// called to interrupt the potential blocking on other goroutines.
|
||||
s.cancel()
|
||||
|
||||
oldState := s.swapState(streamDone)
|
||||
if oldState == streamDone {
|
||||
// If the stream was already done, return.
|
||||
@ -1107,6 +1275,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h
|
||||
|
||||
// closeStream clears the footprint of a stream when the stream is not needed any more.
|
||||
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||
// called to interrupt the potential blocking on other goroutines.
|
||||
s.cancel()
|
||||
|
||||
s.swapState(streamDone)
|
||||
t.deleteStream(s, eosReceived)
|
||||
|
||||
@ -1123,17 +1296,13 @@ func (t *http2Server) RemoteAddr() net.Addr {
|
||||
}
|
||||
|
||||
func (t *http2Server) Drain() {
|
||||
t.drain(http2.ErrCodeNo, []byte{})
|
||||
}
|
||||
|
||||
func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.drainChan != nil {
|
||||
return
|
||||
}
|
||||
t.drainChan = make(chan struct{})
|
||||
t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
|
||||
}
|
||||
|
||||
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
|
||||
@ -1141,20 +1310,23 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
|
||||
// Handles outgoing GoAway and returns true if loopy needs to put itself
|
||||
// in draining mode.
|
||||
func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
t.maxStreamMu.Lock()
|
||||
t.mu.Lock()
|
||||
if t.state == closing { // TODO(mmukhi): This seems unnecessary.
|
||||
t.mu.Unlock()
|
||||
t.maxStreamMu.Unlock()
|
||||
// The transport is closing.
|
||||
return false, ErrConnClosing
|
||||
}
|
||||
sid := t.maxStreamID
|
||||
if !g.headsUp {
|
||||
// Stop accepting more streams now.
|
||||
t.state = draining
|
||||
sid := t.maxStreamID
|
||||
if len(t.activeStreams) == 0 {
|
||||
g.closeConn = true
|
||||
}
|
||||
t.mu.Unlock()
|
||||
t.maxStreamMu.Unlock()
|
||||
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -1167,6 +1339,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
t.mu.Unlock()
|
||||
t.maxStreamMu.Unlock()
|
||||
// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
|
||||
// Follow that with a ping and wait for the ack to come back or a timer
|
||||
// to expire. During this time accept new streams since they might have
|
||||
@ -1251,3 +1424,18 @@ func getJitter(v time.Duration) time.Duration {
|
||||
j := grpcrand.Int63n(2*r) - r
|
||||
return time.Duration(j)
|
||||
}
|
||||
|
||||
type connectionKey struct{}
|
||||
|
||||
// GetConnection gets the connection from the context.
|
||||
func GetConnection(ctx context.Context) net.Conn {
|
||||
conn, _ := ctx.Value(connectionKey{}).(net.Conn)
|
||||
return conn
|
||||
}
|
||||
|
||||
// SetConnection adds the connection to the context to be able to get
|
||||
// information about the destination ip and port for an incoming RPC. This also
|
||||
// allows any unary or streaming interceptors to see the connection.
|
||||
func setConnection(ctx context.Context, conn net.Conn) context.Context {
|
||||
return context.WithValue(ctx, connectionKey{}, conn)
|
||||
}
|
||||
|
338
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
338
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -37,6 +38,7 @@ import (
|
||||
"golang.org/x/net/http2/hpack"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
@ -50,7 +52,7 @@ const (
|
||||
// "proto" as a suffix after "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
baseContentType = "application/grpc"
|
||||
|
||||
)
|
||||
|
||||
var (
|
||||
@ -71,13 +73,6 @@ var (
|
||||
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
||||
http2.ErrCodeHTTP11Required: codes.Internal,
|
||||
}
|
||||
statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
||||
codes.Internal: http2.ErrCodeInternal,
|
||||
codes.Canceled: http2.ErrCodeCancel,
|
||||
codes.Unavailable: http2.ErrCodeRefusedStream,
|
||||
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
|
||||
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
|
||||
}
|
||||
// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
|
||||
HTTPStatusConvTab = map[int]codes.Code{
|
||||
// 400 Bad Request - INTERNAL.
|
||||
@ -97,54 +92,9 @@ var (
|
||||
// 504 Gateway timeout - UNAVAILABLE.
|
||||
http.StatusGatewayTimeout: codes.Unavailable,
|
||||
}
|
||||
logger = grpclog.Component("transport")
|
||||
)
|
||||
|
||||
type parsedHeaderData struct {
|
||||
encoding string
|
||||
// statusGen caches the stream status received from the trailer the server
|
||||
// sent. Client side only. Do not access directly. After all trailers are
|
||||
// parsed, use the status method to retrieve the status.
|
||||
statusGen *status.Status
|
||||
// rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not
|
||||
// intended for direct access outside of parsing.
|
||||
rawStatusCode *int
|
||||
rawStatusMsg string
|
||||
httpStatus *int
|
||||
// Server side only fields.
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
method string
|
||||
// key-value metadata map from the peer.
|
||||
mdata map[string][]string
|
||||
statsTags []byte
|
||||
statsTrace []byte
|
||||
contentSubtype string
|
||||
|
||||
// isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP).
|
||||
//
|
||||
// We are in gRPC mode (peer speaking gRPC) if:
|
||||
// * We are client side and have already received a HEADER frame that indicates gRPC peer.
|
||||
// * The header contains valid a content-type, i.e. a string starts with "application/grpc"
|
||||
// And we should handle error specific to gRPC.
|
||||
//
|
||||
// Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we
|
||||
// are in HTTP fallback mode, and should handle error specific to HTTP.
|
||||
isGRPC bool
|
||||
grpcErr error
|
||||
httpErr error
|
||||
contentTypeErr string
|
||||
}
|
||||
|
||||
// decodeState configures decoding criteria and records the decoded data.
|
||||
type decodeState struct {
|
||||
// whether decoding on server side or not
|
||||
serverSide bool
|
||||
|
||||
// Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS
|
||||
// frame once decodeHeader function has been invoked and returned.
|
||||
data parsedHeaderData
|
||||
}
|
||||
|
||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||
// reserved by gRPC protocol. Any other headers are classified as the
|
||||
// user-specified metadata.
|
||||
@ -182,54 +132,6 @@ func isWhitelistedHeader(hdr string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// contentSubtype returns the content-subtype for the given content-type. The
|
||||
// given content-type must be a valid content-type that starts with
|
||||
// "application/grpc". A content-subtype will follow "application/grpc" after a
|
||||
// "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// If contentType is not a valid content-type for gRPC, the boolean
|
||||
// will be false, otherwise true. If content-type == "application/grpc",
|
||||
// "application/grpc+", or "application/grpc;", the boolean will be true,
|
||||
// but no content-subtype will be returned.
|
||||
//
|
||||
// contentType is assumed to be lowercase already.
|
||||
func contentSubtype(contentType string) (string, bool) {
|
||||
if contentType == baseContentType {
|
||||
return "", true
|
||||
}
|
||||
if !strings.HasPrefix(contentType, baseContentType) {
|
||||
return "", false
|
||||
}
|
||||
// guaranteed since != baseContentType and has baseContentType prefix
|
||||
switch contentType[len(baseContentType)] {
|
||||
case '+', ';':
|
||||
// this will return true for "application/grpc+" or "application/grpc;"
|
||||
// which the previous validContentType function tested to be valid, so we
|
||||
// just say that no content-subtype is specified in this case
|
||||
return contentType[len(baseContentType)+1:], true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// contentSubtype is assumed to be lowercase
|
||||
func contentType(contentSubtype string) string {
|
||||
if contentSubtype == "" {
|
||||
return baseContentType
|
||||
}
|
||||
return baseContentType + "+" + contentSubtype
|
||||
}
|
||||
|
||||
func (d *decodeState) status() *status.Status {
|
||||
if d.data.statusGen == nil {
|
||||
// No status-details were provided; generate status using code/msg.
|
||||
d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg)
|
||||
}
|
||||
return d.data.statusGen
|
||||
}
|
||||
|
||||
const binHdrSuffix = "-bin"
|
||||
|
||||
func encodeBinHeader(v []byte) string {
|
||||
@ -259,164 +161,16 @@ func decodeMetadataHeader(k, v string) (string, error) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
|
||||
// frame.Truncated is set to true when framer detects that the current header
|
||||
// list size hits MaxHeaderListSize limit.
|
||||
if frame.Truncated {
|
||||
return status.Error(codes.Internal, "peer header list size exceeded limit")
|
||||
func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) {
|
||||
v, err := decodeBinHeader(rawDetails)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, hf := range frame.Fields {
|
||||
d.processHeaderField(hf)
|
||||
}
|
||||
|
||||
if d.data.isGRPC {
|
||||
if d.data.grpcErr != nil {
|
||||
return d.data.grpcErr
|
||||
}
|
||||
if d.serverSide {
|
||||
return nil
|
||||
}
|
||||
if d.data.rawStatusCode == nil && d.data.statusGen == nil {
|
||||
// gRPC status doesn't exist.
|
||||
// Set rawStatusCode to be unknown and return nil error.
|
||||
// So that, if the stream has ended this Unknown status
|
||||
// will be propagated to the user.
|
||||
// Otherwise, it will be ignored. In which case, status from
|
||||
// a later trailer, that has StreamEnded flag set, is propagated.
|
||||
code := int(codes.Unknown)
|
||||
d.data.rawStatusCode = &code
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HTTP fallback mode
|
||||
if d.data.httpErr != nil {
|
||||
return d.data.httpErr
|
||||
}
|
||||
|
||||
var (
|
||||
code = codes.Internal // when header does not include HTTP status, return INTERNAL
|
||||
ok bool
|
||||
)
|
||||
|
||||
if d.data.httpStatus != nil {
|
||||
code, ok = HTTPStatusConvTab[*(d.data.httpStatus)]
|
||||
if !ok {
|
||||
code = codes.Unknown
|
||||
}
|
||||
}
|
||||
|
||||
return status.Error(code, d.constructHTTPErrMsg())
|
||||
}
|
||||
|
||||
// constructErrMsg constructs error message to be returned in HTTP fallback mode.
|
||||
// Format: HTTP status code and its corresponding message + content-type error message.
|
||||
func (d *decodeState) constructHTTPErrMsg() string {
|
||||
var errMsgs []string
|
||||
|
||||
if d.data.httpStatus == nil {
|
||||
errMsgs = append(errMsgs, "malformed header: missing HTTP status")
|
||||
} else {
|
||||
errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus))
|
||||
}
|
||||
|
||||
if d.data.contentTypeErr == "" {
|
||||
errMsgs = append(errMsgs, "transport: missing content-type field")
|
||||
} else {
|
||||
errMsgs = append(errMsgs, d.data.contentTypeErr)
|
||||
}
|
||||
|
||||
return strings.Join(errMsgs, "; ")
|
||||
}
|
||||
|
||||
func (d *decodeState) addMetadata(k, v string) {
|
||||
if d.data.mdata == nil {
|
||||
d.data.mdata = make(map[string][]string)
|
||||
}
|
||||
d.data.mdata[k] = append(d.data.mdata[k], v)
|
||||
}
|
||||
|
||||
func (d *decodeState) processHeaderField(f hpack.HeaderField) {
|
||||
switch f.Name {
|
||||
case "content-type":
|
||||
contentSubtype, validContentType := contentSubtype(f.Value)
|
||||
if !validContentType {
|
||||
d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value)
|
||||
return
|
||||
}
|
||||
d.data.contentSubtype = contentSubtype
|
||||
// TODO: do we want to propagate the whole content-type in the metadata,
|
||||
// or come up with a way to just propagate the content-subtype if it was set?
|
||||
// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
|
||||
// in the metadata?
|
||||
d.addMetadata(f.Name, f.Value)
|
||||
d.data.isGRPC = true
|
||||
case "grpc-encoding":
|
||||
d.data.encoding = f.Value
|
||||
case "grpc-status":
|
||||
code, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
|
||||
return
|
||||
}
|
||||
d.data.rawStatusCode = &code
|
||||
case "grpc-message":
|
||||
d.data.rawStatusMsg = decodeGrpcMessage(f.Value)
|
||||
case "grpc-status-details-bin":
|
||||
v, err := decodeBinHeader(f.Value)
|
||||
if err != nil {
|
||||
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
|
||||
return
|
||||
}
|
||||
s := &spb.Status{}
|
||||
if err := proto.Unmarshal(v, s); err != nil {
|
||||
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
|
||||
return
|
||||
}
|
||||
d.data.statusGen = status.FromProto(s)
|
||||
case "grpc-timeout":
|
||||
d.data.timeoutSet = true
|
||||
var err error
|
||||
if d.data.timeout, err = decodeTimeout(f.Value); err != nil {
|
||||
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
|
||||
}
|
||||
case ":path":
|
||||
d.data.method = f.Value
|
||||
case ":status":
|
||||
code, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
|
||||
return
|
||||
}
|
||||
d.data.httpStatus = &code
|
||||
case "grpc-tags-bin":
|
||||
v, err := decodeBinHeader(f.Value)
|
||||
if err != nil {
|
||||
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
|
||||
return
|
||||
}
|
||||
d.data.statsTags = v
|
||||
d.addMetadata(f.Name, string(v))
|
||||
case "grpc-trace-bin":
|
||||
v, err := decodeBinHeader(f.Value)
|
||||
if err != nil {
|
||||
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
|
||||
return
|
||||
}
|
||||
d.data.statsTrace = v
|
||||
d.addMetadata(f.Name, string(v))
|
||||
default:
|
||||
if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
|
||||
break
|
||||
}
|
||||
v, err := decodeMetadataHeader(f.Name, f.Value)
|
||||
if err != nil {
|
||||
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
|
||||
return
|
||||
}
|
||||
d.addMetadata(f.Name, v)
|
||||
st := &spb.Status{}
|
||||
if err = proto.Unmarshal(v, st); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return status.FromProto(st), nil
|
||||
}
|
||||
|
||||
type timeoutUnit uint8
|
||||
@ -449,41 +203,6 @@ func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
const maxTimeoutValue int64 = 100000000 - 1
|
||||
|
||||
// div does integer division and round-up the result. Note that this is
|
||||
// equivalent to (d+r-1)/r but has less chance to overflow.
|
||||
func div(d, r time.Duration) int64 {
|
||||
if m := d % r; m > 0 {
|
||||
return int64(d/r + 1)
|
||||
}
|
||||
return int64(d / r)
|
||||
}
|
||||
|
||||
// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
|
||||
func encodeTimeout(t time.Duration) string {
|
||||
if t <= 0 {
|
||||
return "0n"
|
||||
}
|
||||
if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "n"
|
||||
}
|
||||
if d := div(t, time.Microsecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "u"
|
||||
}
|
||||
if d := div(t, time.Millisecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "m"
|
||||
}
|
||||
if d := div(t, time.Second); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "S"
|
||||
}
|
||||
if d := div(t, time.Minute); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "M"
|
||||
}
|
||||
// Note that maxTimeoutValue * time.Hour > MaxInt64.
|
||||
return strconv.FormatInt(div(t, time.Hour), 10) + "H"
|
||||
}
|
||||
|
||||
func decodeTimeout(s string) (time.Duration, error) {
|
||||
size := len(s)
|
||||
if size < 2 {
|
||||
@ -603,8 +322,6 @@ type bufWriter struct {
|
||||
batchSize int
|
||||
conn net.Conn
|
||||
err error
|
||||
|
||||
onFlush func()
|
||||
}
|
||||
|
||||
func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
|
||||
@ -641,9 +358,6 @@ func (w *bufWriter) Flush() error {
|
||||
if w.offset == 0 {
|
||||
return nil
|
||||
}
|
||||
if w.onFlush != nil {
|
||||
w.onFlush()
|
||||
}
|
||||
_, w.err = w.conn.Write(w.buf[:w.offset])
|
||||
w.offset = 0
|
||||
return w.err
|
||||
@ -675,3 +389,31 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList
|
||||
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
|
||||
return f
|
||||
}
|
||||
|
||||
// parseDialTarget returns the network and address to pass to dialer.
|
||||
func parseDialTarget(target string) (string, string) {
|
||||
net := "tcp"
|
||||
m1 := strings.Index(target, ":")
|
||||
m2 := strings.Index(target, ":/")
|
||||
// handle unix:addr which will fail with url.Parse
|
||||
if m1 >= 0 && m2 < 0 {
|
||||
if n := target[0:m1]; n == "unix" {
|
||||
return n, target[m1+1:]
|
||||
}
|
||||
}
|
||||
if m2 >= 0 {
|
||||
t, err := url.Parse(target)
|
||||
if err != nil {
|
||||
return net, target
|
||||
}
|
||||
scheme := t.Scheme
|
||||
addr := t.Path
|
||||
if scheme == "unix" {
|
||||
if addr == "" {
|
||||
addr = t.Host
|
||||
}
|
||||
return scheme, addr
|
||||
}
|
||||
}
|
||||
return net, target
|
||||
}
|
||||
|
46
vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
generated
vendored
Normal file
46
vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package networktype declares the network type to be used in the default
|
||||
// dialer. Attribute of a resolver.Address.
|
||||
package networktype
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// keyType is the key to use for storing State in Attributes.
|
||||
type keyType string
|
||||
|
||||
const key = keyType("grpc.internal.transport.networktype")
|
||||
|
||||
// Set returns a copy of the provided address with attributes containing networkType.
|
||||
func Set(address resolver.Address, networkType string) resolver.Address {
|
||||
address.Attributes = address.Attributes.WithValue(key, networkType)
|
||||
return address
|
||||
}
|
||||
|
||||
// Get returns the network type in the resolver.Address and true, or "", false
|
||||
// if not present.
|
||||
func Get(address resolver.Address) (string, bool) {
|
||||
v := address.Attributes.Value(key)
|
||||
if v == nil {
|
||||
return "", false
|
||||
}
|
||||
return v.(string), true
|
||||
}
|
142
vendor/google.golang.org/grpc/internal/transport/proxy.go
generated
vendored
Normal file
142
vendor/google.golang.org/grpc/internal/transport/proxy.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
const proxyAuthHeaderKey = "Proxy-Authorization"
|
||||
|
||||
var (
|
||||
// The following variable will be overwritten in the tests.
|
||||
httpProxyFromEnvironment = http.ProxyFromEnvironment
|
||||
)
|
||||
|
||||
func mapAddress(address string) (*url.URL, error) {
|
||||
req := &http.Request{
|
||||
URL: &url.URL{
|
||||
Scheme: "https",
|
||||
Host: address,
|
||||
},
|
||||
}
|
||||
url, err := httpProxyFromEnvironment(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return url, nil
|
||||
}
|
||||
|
||||
// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
|
||||
// It's possible that this reader reads more than what's need for the response and stores
|
||||
// those bytes in the buffer.
|
||||
// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
|
||||
// bytes in the buffer.
|
||||
type bufConn struct {
|
||||
net.Conn
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (c *bufConn) Read(b []byte) (int, error) {
|
||||
return c.r.Read(b)
|
||||
}
|
||||
|
||||
func basicAuth(username, password string) string {
|
||||
auth := username + ":" + password
|
||||
return base64.StdEncoding.EncodeToString([]byte(auth))
|
||||
}
|
||||
|
||||
func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
req := &http.Request{
|
||||
Method: http.MethodConnect,
|
||||
URL: &url.URL{Host: backendAddr},
|
||||
Header: map[string][]string{"User-Agent": {grpcUA}},
|
||||
}
|
||||
if t := proxyURL.User; t != nil {
|
||||
u := t.Username()
|
||||
p, _ := t.Password()
|
||||
req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
|
||||
}
|
||||
|
||||
if err := sendHTTPRequest(ctx, req, conn); err != nil {
|
||||
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
|
||||
}
|
||||
|
||||
r := bufio.NewReader(conn)
|
||||
resp, err := http.ReadResponse(r, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading server HTTP response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
dump, err := httputil.DumpResponse(resp, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
|
||||
}
|
||||
|
||||
return &bufConn{Conn: conn, r: r}, nil
|
||||
}
|
||||
|
||||
// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
|
||||
// is necessary, dials, does the HTTP CONNECT handshake, and returns the
|
||||
// connection.
|
||||
func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
|
||||
newAddr := addr
|
||||
proxyURL, err := mapAddress(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
newAddr = proxyURL.Host
|
||||
}
|
||||
|
||||
conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if proxyURL != nil {
|
||||
// proxy is disabled if proxyURL is nil.
|
||||
conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
|
||||
req = req.WithContext(ctx)
|
||||
if err := req.Write(conn); err != nil {
|
||||
return fmt.Errorf("failed to write the HTTP request: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
55
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
55
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
@ -30,16 +30,21 @@ import (
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/tap"
|
||||
)
|
||||
|
||||
const logLevel = 2
|
||||
|
||||
type bufferPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
@ -238,6 +243,7 @@ type Stream struct {
|
||||
ctx context.Context // the associated context of the stream
|
||||
cancel context.CancelFunc // always nil for client side Stream
|
||||
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
|
||||
doneFunc func() // invoked at the end of stream on client side.
|
||||
ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
|
||||
method string // the associated RPC method of the stream
|
||||
recvCompress string
|
||||
@ -514,26 +520,21 @@ const (
|
||||
// ServerConfig consists of all the configurations to establish a server transport.
|
||||
type ServerConfig struct {
|
||||
MaxStreams uint32
|
||||
AuthInfo credentials.AuthInfo
|
||||
ConnectionTimeout time.Duration
|
||||
Credentials credentials.TransportCredentials
|
||||
InTapHandle tap.ServerInHandle
|
||||
StatsHandler stats.Handler
|
||||
StatsHandlers []stats.Handler
|
||||
KeepaliveParams keepalive.ServerParameters
|
||||
KeepalivePolicy keepalive.EnforcementPolicy
|
||||
InitialWindowSize int32
|
||||
InitialConnWindowSize int32
|
||||
WriteBufferSize int
|
||||
ReadBufferSize int
|
||||
ChannelzParentID int64
|
||||
ChannelzParentID *channelz.Identifier
|
||||
MaxHeaderListSize *uint32
|
||||
HeaderTableSize *uint32
|
||||
}
|
||||
|
||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||
// if it fails.
|
||||
func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) {
|
||||
return newHTTP2Server(conn, config)
|
||||
}
|
||||
|
||||
// ConnectOptions covers all relevant options for communicating with the server.
|
||||
type ConnectOptions struct {
|
||||
// UserAgent is the application user agent.
|
||||
@ -552,8 +553,8 @@ type ConnectOptions struct {
|
||||
CredsBundle credentials.Bundle
|
||||
// KeepaliveParams stores the keepalive parameters.
|
||||
KeepaliveParams keepalive.ClientParameters
|
||||
// StatsHandler stores the handler for stats.
|
||||
StatsHandler stats.Handler
|
||||
// StatsHandlers stores the handler for stats.
|
||||
StatsHandlers []stats.Handler
|
||||
// InitialWindowSize sets the initial window size for a stream.
|
||||
InitialWindowSize int32
|
||||
// InitialConnWindowSize sets the initial window size for a connection.
|
||||
@ -563,22 +564,17 @@ type ConnectOptions struct {
|
||||
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
||||
ReadBufferSize int
|
||||
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||
ChannelzParentID int64
|
||||
ChannelzParentID *channelz.Identifier
|
||||
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
||||
MaxHeaderListSize *uint32
|
||||
}
|
||||
|
||||
// TargetInfo contains the information of the target such as network address and metadata.
|
||||
type TargetInfo struct {
|
||||
Addr string
|
||||
Metadata interface{}
|
||||
Authority string
|
||||
// UseProxy specifies if a proxy should be used.
|
||||
UseProxy bool
|
||||
}
|
||||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
// and returns it to the caller.
|
||||
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
|
||||
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose)
|
||||
}
|
||||
|
||||
// Options provides additional hints and information for message
|
||||
@ -613,6 +609,8 @@ type CallHdr struct {
|
||||
ContentSubtype string
|
||||
|
||||
PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
|
||||
|
||||
DoneFunc func() // called when the stream is finished
|
||||
}
|
||||
|
||||
// ClientTransport is the common interface for all gRPC client-side transport
|
||||
@ -621,7 +619,7 @@ type ClientTransport interface {
|
||||
// Close tears down this transport. Once it returns, the transport
|
||||
// should not be accessed any more. The caller must make sure this
|
||||
// is called only once.
|
||||
Close() error
|
||||
Close(err error)
|
||||
|
||||
// GracefulClose starts to tear down the transport: the transport will stop
|
||||
// accepting new RPCs and NewStream will return error. Once all streams are
|
||||
@ -655,8 +653,9 @@ type ClientTransport interface {
|
||||
// HTTP/2).
|
||||
GoAway() <-chan struct{}
|
||||
|
||||
// GetGoAwayReason returns the reason why GoAway frame was received.
|
||||
GetGoAwayReason() GoAwayReason
|
||||
// GetGoAwayReason returns the reason why GoAway frame was received, along
|
||||
// with a human readable string with debug info.
|
||||
GetGoAwayReason() (GoAwayReason, string)
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
RemoteAddr() net.Addr
|
||||
@ -692,7 +691,7 @@ type ServerTransport interface {
|
||||
// Close tears down the transport. Once it is called, the transport
|
||||
// should not be accessed any more. All the pending streams and their
|
||||
// handlers will be terminated asynchronously.
|
||||
Close() error
|
||||
Close()
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
RemoteAddr() net.Addr
|
||||
@ -743,6 +742,12 @@ func (e ConnectionError) Origin() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Unwrap returns the original error of this connection error or nil when the
|
||||
// origin is nil.
|
||||
func (e ConnectionError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrConnClosing indicates that the transport is closing.
|
||||
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
|
||||
|
40
vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
generated
vendored
Normal file
40
vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// handshakeClusterNameKey is the type used as the key to store cluster name in
|
||||
// the Attributes field of resolver.Address.
|
||||
type handshakeClusterNameKey struct{}
|
||||
|
||||
// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field
|
||||
// is updated with the cluster name.
|
||||
func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address {
|
||||
addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName)
|
||||
return addr
|
||||
}
|
||||
|
||||
// GetXDSHandshakeClusterName returns cluster name stored in attr.
|
||||
func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) {
|
||||
v := attr.Value(handshakeClusterNameKey{})
|
||||
name, ok := v.(string)
|
||||
return name, ok
|
||||
}
|
Reference in New Issue
Block a user