add vendor
This commit is contained in:
407
vendor/google.golang.org/appengine/datastore/datastore.go
generated
vendored
Normal file
407
vendor/google.golang.org/appengine/datastore/datastore.go
generated
vendored
Normal file
@ -0,0 +1,407 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/appengine"
|
||||
"google.golang.org/appengine/internal"
|
||||
pb "google.golang.org/appengine/internal/datastore"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidEntityType is returned when functions like Get or Next are
|
||||
// passed a dst or src argument of invalid type.
|
||||
ErrInvalidEntityType = errors.New("datastore: invalid entity type")
|
||||
// ErrInvalidKey is returned when an invalid key is presented.
|
||||
ErrInvalidKey = errors.New("datastore: invalid key")
|
||||
// ErrNoSuchEntity is returned when no entity was found for a given key.
|
||||
ErrNoSuchEntity = errors.New("datastore: no such entity")
|
||||
)
|
||||
|
||||
// ErrFieldMismatch is returned when a field is to be loaded into a different
|
||||
// type than the one it was stored from, or when a field is missing or
|
||||
// unexported in the destination struct.
|
||||
// StructType is the type of the struct pointed to by the destination argument
|
||||
// passed to Get or to Iterator.Next.
|
||||
type ErrFieldMismatch struct {
|
||||
StructType reflect.Type
|
||||
FieldName string
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e *ErrFieldMismatch) Error() string {
|
||||
return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
|
||||
e.FieldName, e.StructType, e.Reason)
|
||||
}
|
||||
|
||||
// protoToKey converts a Reference proto to a *Key. If the key is invalid,
|
||||
// protoToKey will return the invalid key along with ErrInvalidKey.
|
||||
func protoToKey(r *pb.Reference) (k *Key, err error) {
|
||||
appID := r.GetApp()
|
||||
namespace := r.GetNameSpace()
|
||||
for _, e := range r.Path.Element {
|
||||
k = &Key{
|
||||
kind: e.GetType(),
|
||||
stringID: e.GetName(),
|
||||
intID: e.GetId(),
|
||||
parent: k,
|
||||
appID: appID,
|
||||
namespace: namespace,
|
||||
}
|
||||
if !k.valid() {
|
||||
return k, ErrInvalidKey
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// keyToProto converts a *Key to a Reference proto.
|
||||
func keyToProto(defaultAppID string, k *Key) *pb.Reference {
|
||||
appID := k.appID
|
||||
if appID == "" {
|
||||
appID = defaultAppID
|
||||
}
|
||||
n := 0
|
||||
for i := k; i != nil; i = i.parent {
|
||||
n++
|
||||
}
|
||||
e := make([]*pb.Path_Element, n)
|
||||
for i := k; i != nil; i = i.parent {
|
||||
n--
|
||||
e[n] = &pb.Path_Element{
|
||||
Type: &i.kind,
|
||||
}
|
||||
// At most one of {Name,Id} should be set.
|
||||
// Neither will be set for incomplete keys.
|
||||
if i.stringID != "" {
|
||||
e[n].Name = &i.stringID
|
||||
} else if i.intID != 0 {
|
||||
e[n].Id = &i.intID
|
||||
}
|
||||
}
|
||||
var namespace *string
|
||||
if k.namespace != "" {
|
||||
namespace = proto.String(k.namespace)
|
||||
}
|
||||
return &pb.Reference{
|
||||
App: proto.String(appID),
|
||||
NameSpace: namespace,
|
||||
Path: &pb.Path{
|
||||
Element: e,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// multiKeyToProto is a batch version of keyToProto.
|
||||
func multiKeyToProto(appID string, key []*Key) []*pb.Reference {
|
||||
ret := make([]*pb.Reference, len(key))
|
||||
for i, k := range key {
|
||||
ret[i] = keyToProto(appID, k)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// multiValid is a batch version of Key.valid. It returns an error, not a
|
||||
// []bool.
|
||||
func multiValid(key []*Key) error {
|
||||
invalid := false
|
||||
for _, k := range key {
|
||||
if !k.valid() {
|
||||
invalid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !invalid {
|
||||
return nil
|
||||
}
|
||||
err := make(appengine.MultiError, len(key))
|
||||
for i, k := range key {
|
||||
if !k.valid() {
|
||||
err[i] = ErrInvalidKey
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// It's unfortunate that the two semantically equivalent concepts pb.Reference
|
||||
// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the
|
||||
// two have different protobuf field numbers.
|
||||
|
||||
// referenceValueToKey is the same as protoToKey except the input is a
|
||||
// PropertyValue_ReferenceValue instead of a Reference.
|
||||
func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) {
|
||||
appID := r.GetApp()
|
||||
namespace := r.GetNameSpace()
|
||||
for _, e := range r.Pathelement {
|
||||
k = &Key{
|
||||
kind: e.GetType(),
|
||||
stringID: e.GetName(),
|
||||
intID: e.GetId(),
|
||||
parent: k,
|
||||
appID: appID,
|
||||
namespace: namespace,
|
||||
}
|
||||
if !k.valid() {
|
||||
return nil, ErrInvalidKey
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// keyToReferenceValue is the same as keyToProto except the output is a
|
||||
// PropertyValue_ReferenceValue instead of a Reference.
|
||||
func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue {
|
||||
ref := keyToProto(defaultAppID, k)
|
||||
pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element))
|
||||
for i, e := range ref.Path.Element {
|
||||
pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{
|
||||
Type: e.Type,
|
||||
Id: e.Id,
|
||||
Name: e.Name,
|
||||
}
|
||||
}
|
||||
return &pb.PropertyValue_ReferenceValue{
|
||||
App: ref.App,
|
||||
NameSpace: ref.NameSpace,
|
||||
Pathelement: pe,
|
||||
}
|
||||
}
|
||||
|
||||
type multiArgType int
|
||||
|
||||
const (
|
||||
multiArgTypeInvalid multiArgType = iota
|
||||
multiArgTypePropertyLoadSaver
|
||||
multiArgTypeStruct
|
||||
multiArgTypeStructPtr
|
||||
multiArgTypeInterface
|
||||
)
|
||||
|
||||
// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
|
||||
// type S, for some interface type I, or some non-interface non-pointer type P
|
||||
// such that P or *P implements PropertyLoadSaver.
|
||||
//
|
||||
// It returns what category the slice's elements are, and the reflect.Type
|
||||
// that represents S, I or P.
|
||||
//
|
||||
// As a special case, PropertyList is an invalid type for v.
|
||||
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
|
||||
if v.Kind() != reflect.Slice {
|
||||
return multiArgTypeInvalid, nil
|
||||
}
|
||||
if v.Type() == typeOfPropertyList {
|
||||
return multiArgTypeInvalid, nil
|
||||
}
|
||||
elemType = v.Type().Elem()
|
||||
if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
|
||||
return multiArgTypePropertyLoadSaver, elemType
|
||||
}
|
||||
switch elemType.Kind() {
|
||||
case reflect.Struct:
|
||||
return multiArgTypeStruct, elemType
|
||||
case reflect.Interface:
|
||||
return multiArgTypeInterface, elemType
|
||||
case reflect.Ptr:
|
||||
elemType = elemType.Elem()
|
||||
if elemType.Kind() == reflect.Struct {
|
||||
return multiArgTypeStructPtr, elemType
|
||||
}
|
||||
}
|
||||
return multiArgTypeInvalid, nil
|
||||
}
|
||||
|
||||
// Get loads the entity stored for k into dst, which must be a struct pointer
|
||||
// or implement PropertyLoadSaver. If there is no such entity for the key, Get
|
||||
// returns ErrNoSuchEntity.
|
||||
//
|
||||
// The values of dst's unmatched struct fields are not modified, and matching
|
||||
// slice-typed fields are not reset before appending to them. In particular, it
|
||||
// is recommended to pass a pointer to a zero valued struct on each Get call.
|
||||
//
|
||||
// ErrFieldMismatch is returned when a field is to be loaded into a different
|
||||
// type than the one it was stored from, or when a field is missing or
|
||||
// unexported in the destination struct. ErrFieldMismatch is only returned if
|
||||
// dst is a struct pointer.
|
||||
func Get(c context.Context, key *Key, dst interface{}) error {
|
||||
if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
err := GetMulti(c, []*Key{key}, []interface{}{dst})
|
||||
if me, ok := err.(appengine.MultiError); ok {
|
||||
return me[0]
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetMulti is a batch version of Get.
|
||||
//
|
||||
// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
|
||||
// type I, or some non-interface non-pointer type P such that P or *P
|
||||
// implements PropertyLoadSaver. If an []I, each element must be a valid dst
|
||||
// for Get: it must be a struct pointer or implement PropertyLoadSaver.
|
||||
//
|
||||
// As a special case, PropertyList is an invalid type for dst, even though a
|
||||
// PropertyList is a slice of structs. It is treated as invalid to avoid being
|
||||
// mistakenly passed when []PropertyList was intended.
|
||||
func GetMulti(c context.Context, key []*Key, dst interface{}) error {
|
||||
v := reflect.ValueOf(dst)
|
||||
multiArgType, _ := checkMultiArg(v)
|
||||
if multiArgType == multiArgTypeInvalid {
|
||||
return errors.New("datastore: dst has invalid type")
|
||||
}
|
||||
if len(key) != v.Len() {
|
||||
return errors.New("datastore: key and dst slices have different length")
|
||||
}
|
||||
if len(key) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := multiValid(key); err != nil {
|
||||
return err
|
||||
}
|
||||
req := &pb.GetRequest{
|
||||
Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
|
||||
}
|
||||
res := &pb.GetResponse{}
|
||||
if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(key) != len(res.Entity) {
|
||||
return errors.New("datastore: internal error: server returned the wrong number of entities")
|
||||
}
|
||||
multiErr, any := make(appengine.MultiError, len(key)), false
|
||||
for i, e := range res.Entity {
|
||||
if e.Entity == nil {
|
||||
multiErr[i] = ErrNoSuchEntity
|
||||
} else {
|
||||
elem := v.Index(i)
|
||||
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
|
||||
elem = elem.Addr()
|
||||
}
|
||||
if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
|
||||
elem.Set(reflect.New(elem.Type().Elem()))
|
||||
}
|
||||
multiErr[i] = loadEntity(elem.Interface(), e.Entity)
|
||||
}
|
||||
if multiErr[i] != nil {
|
||||
any = true
|
||||
}
|
||||
}
|
||||
if any {
|
||||
return multiErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put saves the entity src into the datastore with key k. src must be a struct
|
||||
// pointer or implement PropertyLoadSaver; if a struct pointer then any
|
||||
// unexported fields of that struct will be skipped. If k is an incomplete key,
|
||||
// the returned key will be a unique key generated by the datastore.
|
||||
func Put(c context.Context, key *Key, src interface{}) (*Key, error) {
|
||||
k, err := PutMulti(c, []*Key{key}, []interface{}{src})
|
||||
if err != nil {
|
||||
if me, ok := err.(appengine.MultiError); ok {
|
||||
return nil, me[0]
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return k[0], nil
|
||||
}
|
||||
|
||||
// PutMulti is a batch version of Put.
|
||||
//
|
||||
// src must satisfy the same conditions as the dst argument to GetMulti.
|
||||
func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) {
|
||||
v := reflect.ValueOf(src)
|
||||
multiArgType, _ := checkMultiArg(v)
|
||||
if multiArgType == multiArgTypeInvalid {
|
||||
return nil, errors.New("datastore: src has invalid type")
|
||||
}
|
||||
if len(key) != v.Len() {
|
||||
return nil, errors.New("datastore: key and src slices have different length")
|
||||
}
|
||||
if len(key) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
appID := internal.FullyQualifiedAppID(c)
|
||||
if err := multiValid(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req := &pb.PutRequest{}
|
||||
for i := range key {
|
||||
elem := v.Index(i)
|
||||
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
|
||||
elem = elem.Addr()
|
||||
}
|
||||
sProto, err := saveEntity(appID, key[i], elem.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Entity = append(req.Entity, sProto)
|
||||
}
|
||||
res := &pb.PutResponse{}
|
||||
if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(key) != len(res.Key) {
|
||||
return nil, errors.New("datastore: internal error: server returned the wrong number of keys")
|
||||
}
|
||||
ret := make([]*Key, len(key))
|
||||
for i := range ret {
|
||||
var err error
|
||||
ret[i], err = protoToKey(res.Key[i])
|
||||
if err != nil || ret[i].Incomplete() {
|
||||
return nil, errors.New("datastore: internal error: server returned an invalid key")
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Delete deletes the entity for the given key.
|
||||
func Delete(c context.Context, key *Key) error {
|
||||
err := DeleteMulti(c, []*Key{key})
|
||||
if me, ok := err.(appengine.MultiError); ok {
|
||||
return me[0]
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteMulti is a batch version of Delete.
|
||||
func DeleteMulti(c context.Context, key []*Key) error {
|
||||
if len(key) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := multiValid(key); err != nil {
|
||||
return err
|
||||
}
|
||||
req := &pb.DeleteRequest{
|
||||
Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
|
||||
}
|
||||
res := &pb.DeleteResponse{}
|
||||
return internal.Call(c, "datastore_v3", "Delete", req, res)
|
||||
}
|
||||
|
||||
func namespaceMod(m proto.Message, namespace string) {
|
||||
// pb.Query is the only type that has a name_space field.
|
||||
// All other namespace support in datastore is in the keys.
|
||||
switch m := m.(type) {
|
||||
case *pb.Query:
|
||||
if m.NameSpace == nil {
|
||||
m.NameSpace = &namespace
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.NamespaceMods["datastore_v3"] = namespaceMod
|
||||
internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name)
|
||||
internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT))
|
||||
}
|
361
vendor/google.golang.org/appengine/datastore/doc.go
generated
vendored
Normal file
361
vendor/google.golang.org/appengine/datastore/doc.go
generated
vendored
Normal file
@ -0,0 +1,361 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package datastore provides a client for App Engine's datastore service.
|
||||
|
||||
|
||||
Basic Operations
|
||||
|
||||
Entities are the unit of storage and are associated with a key. A key
|
||||
consists of an optional parent key, a string application ID, a string kind
|
||||
(also known as an entity type), and either a StringID or an IntID. A
|
||||
StringID is also known as an entity name or key name.
|
||||
|
||||
It is valid to create a key with a zero StringID and a zero IntID; this is
|
||||
called an incomplete key, and does not refer to any saved entity. Putting an
|
||||
entity into the datastore under an incomplete key will cause a unique key
|
||||
to be generated for that entity, with a non-zero IntID.
|
||||
|
||||
An entity's contents are a mapping from case-sensitive field names to values.
|
||||
Valid value types are:
|
||||
- signed integers (int, int8, int16, int32 and int64),
|
||||
- bool,
|
||||
- string,
|
||||
- float32 and float64,
|
||||
- []byte (up to 1 megabyte in length),
|
||||
- any type whose underlying type is one of the above predeclared types,
|
||||
- ByteString,
|
||||
- *Key,
|
||||
- time.Time (stored with microsecond precision),
|
||||
- appengine.BlobKey,
|
||||
- appengine.GeoPoint,
|
||||
- structs whose fields are all valid value types,
|
||||
- slices of any of the above.
|
||||
|
||||
Slices of structs are valid, as are structs that contain slices. However, if
|
||||
one struct contains another, then at most one of those can be repeated. This
|
||||
disqualifies recursively defined struct types: any struct T that (directly or
|
||||
indirectly) contains a []T.
|
||||
|
||||
The Get and Put functions load and save an entity's contents. An entity's
|
||||
contents are typically represented by a struct pointer.
|
||||
|
||||
Example code:
|
||||
|
||||
type Entity struct {
|
||||
Value string
|
||||
}
|
||||
|
||||
func handle(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := appengine.NewContext(r)
|
||||
|
||||
k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil)
|
||||
e := new(Entity)
|
||||
if err := datastore.Get(ctx, k, e); err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
old := e.Value
|
||||
e.Value = r.URL.Path
|
||||
|
||||
if _, err := datastore.Put(ctx, k, e); err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value)
|
||||
}
|
||||
|
||||
GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
|
||||
Delete functions. They take a []*Key instead of a *Key, and may return an
|
||||
appengine.MultiError when encountering partial failure.
|
||||
|
||||
|
||||
Properties
|
||||
|
||||
An entity's contents can be represented by a variety of types. These are
|
||||
typically struct pointers, but can also be any type that implements the
|
||||
PropertyLoadSaver interface. If using a struct pointer, you do not have to
|
||||
explicitly implement the PropertyLoadSaver interface; the datastore will
|
||||
automatically convert via reflection. If a struct pointer does implement that
|
||||
interface then those methods will be used in preference to the default
|
||||
behavior for struct pointers. Struct pointers are more strongly typed and are
|
||||
easier to use; PropertyLoadSavers are more flexible.
|
||||
|
||||
The actual types passed do not have to match between Get and Put calls or even
|
||||
across different calls to datastore. It is valid to put a *PropertyList and
|
||||
get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
|
||||
Conceptually, any entity is saved as a sequence of properties, and is loaded
|
||||
into the destination value on a property-by-property basis. When loading into
|
||||
a struct pointer, an entity that cannot be completely represented (such as a
|
||||
missing field) will result in an ErrFieldMismatch error but it is up to the
|
||||
caller whether this error is fatal, recoverable or ignorable.
|
||||
|
||||
By default, for struct pointers, all properties are potentially indexed, and
|
||||
the property name is the same as the field name (and hence must start with an
|
||||
upper case letter).
|
||||
|
||||
Fields may have a `datastore:"name,options"` tag. The tag name is the
|
||||
property name, which must be one or more valid Go identifiers joined by ".",
|
||||
but may start with a lower case letter. An empty tag name means to just use the
|
||||
field name. A "-" tag name means that the datastore will ignore that field.
|
||||
|
||||
The only valid options are "omitempty" and "noindex".
|
||||
|
||||
If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save.
|
||||
The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero.
|
||||
Struct field values will never be empty.
|
||||
|
||||
If options include "noindex" then the field will not be indexed. All fields are indexed
|
||||
by default. Strings or byte slices longer than 1500 bytes cannot be indexed;
|
||||
fields used to store long strings and byte slices must be tagged with "noindex"
|
||||
or they will cause Put operations to fail.
|
||||
|
||||
To use multiple options together, separate them by a comma.
|
||||
The order does not matter.
|
||||
|
||||
If the options is "" then the comma may be omitted.
|
||||
|
||||
Example code:
|
||||
|
||||
// A and B are renamed to a and b.
|
||||
// A, C and J are not indexed.
|
||||
// D's tag is equivalent to having no tag at all (E).
|
||||
// I is ignored entirely by the datastore.
|
||||
// J has tag information for both the datastore and json packages.
|
||||
type TaggedStruct struct {
|
||||
A int `datastore:"a,noindex"`
|
||||
B int `datastore:"b"`
|
||||
C int `datastore:",noindex"`
|
||||
D int `datastore:""`
|
||||
E int
|
||||
I int `datastore:"-"`
|
||||
J int `datastore:",noindex" json:"j"`
|
||||
}
|
||||
|
||||
|
||||
Structured Properties
|
||||
|
||||
If the struct pointed to contains other structs, then the nested or embedded
|
||||
structs are flattened. For example, given these definitions:
|
||||
|
||||
type Inner1 struct {
|
||||
W int32
|
||||
X string
|
||||
}
|
||||
|
||||
type Inner2 struct {
|
||||
Y float64
|
||||
}
|
||||
|
||||
type Inner3 struct {
|
||||
Z bool
|
||||
}
|
||||
|
||||
type Outer struct {
|
||||
A int16
|
||||
I []Inner1
|
||||
J Inner2
|
||||
Inner3
|
||||
}
|
||||
|
||||
then an Outer's properties would be equivalent to those of:
|
||||
|
||||
type OuterEquivalent struct {
|
||||
A int16
|
||||
IDotW []int32 `datastore:"I.W"`
|
||||
IDotX []string `datastore:"I.X"`
|
||||
JDotY float64 `datastore:"J.Y"`
|
||||
Z bool
|
||||
}
|
||||
|
||||
If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the
|
||||
equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`.
|
||||
|
||||
If an outer struct is tagged "noindex" then all of its implicit flattened
|
||||
fields are effectively "noindex".
|
||||
|
||||
|
||||
The PropertyLoadSaver Interface
|
||||
|
||||
An entity's contents can also be represented by any type that implements the
|
||||
PropertyLoadSaver interface. This type may be a struct pointer, but it does
|
||||
not have to be. The datastore package will call Load when getting the entity's
|
||||
contents, and Save when putting the entity's contents.
|
||||
Possible uses include deriving non-stored fields, verifying fields, or indexing
|
||||
a field only if its value is positive.
|
||||
|
||||
Example code:
|
||||
|
||||
type CustomPropsExample struct {
|
||||
I, J int
|
||||
// Sum is not stored, but should always be equal to I + J.
|
||||
Sum int `datastore:"-"`
|
||||
}
|
||||
|
||||
func (x *CustomPropsExample) Load(ps []datastore.Property) error {
|
||||
// Load I and J as usual.
|
||||
if err := datastore.LoadStruct(x, ps); err != nil {
|
||||
return err
|
||||
}
|
||||
// Derive the Sum field.
|
||||
x.Sum = x.I + x.J
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *CustomPropsExample) Save() ([]datastore.Property, error) {
|
||||
// Validate the Sum field.
|
||||
if x.Sum != x.I + x.J {
|
||||
return nil, errors.New("CustomPropsExample has inconsistent sum")
|
||||
}
|
||||
// Save I and J as usual. The code below is equivalent to calling
|
||||
// "return datastore.SaveStruct(x)", but is done manually for
|
||||
// demonstration purposes.
|
||||
return []datastore.Property{
|
||||
{
|
||||
Name: "I",
|
||||
Value: int64(x.I),
|
||||
},
|
||||
{
|
||||
Name: "J",
|
||||
Value: int64(x.J),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
|
||||
arbitrary entity's contents.
|
||||
|
||||
|
||||
Queries
|
||||
|
||||
Queries retrieve entities based on their properties or key's ancestry. Running
|
||||
a query yields an iterator of results: either keys or (key, entity) pairs.
|
||||
Queries are re-usable and it is safe to call Query.Run from concurrent
|
||||
goroutines. Iterators are not safe for concurrent use.
|
||||
|
||||
Queries are immutable, and are either created by calling NewQuery, or derived
|
||||
from an existing query by calling a method like Filter or Order that returns a
|
||||
new query value. A query is typically constructed by calling NewQuery followed
|
||||
by a chain of zero or more such methods. These methods are:
|
||||
- Ancestor and Filter constrain the entities returned by running a query.
|
||||
- Order affects the order in which they are returned.
|
||||
- Project constrains the fields returned.
|
||||
- Distinct de-duplicates projected entities.
|
||||
- KeysOnly makes the iterator return only keys, not (key, entity) pairs.
|
||||
- Start, End, Offset and Limit define which sub-sequence of matching entities
|
||||
to return. Start and End take cursors, Offset and Limit take integers. Start
|
||||
and Offset affect the first result, End and Limit affect the last result.
|
||||
If both Start and Offset are set, then the offset is relative to Start.
|
||||
If both End and Limit are set, then the earliest constraint wins. Limit is
|
||||
relative to Start+Offset, not relative to End. As a special case, a
|
||||
negative limit means unlimited.
|
||||
|
||||
Example code:
|
||||
|
||||
type Widget struct {
|
||||
Description string
|
||||
Price int
|
||||
}
|
||||
|
||||
func handle(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := appengine.NewContext(r)
|
||||
q := datastore.NewQuery("Widget").
|
||||
Filter("Price <", 1000).
|
||||
Order("-Price")
|
||||
b := new(bytes.Buffer)
|
||||
for t := q.Run(ctx); ; {
|
||||
var x Widget
|
||||
key, err := t.Next(&x)
|
||||
if err == datastore.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
serveError(ctx, w, err)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x)
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
io.Copy(w, b)
|
||||
}
|
||||
|
||||
|
||||
Transactions
|
||||
|
||||
RunInTransaction runs a function in a transaction.
|
||||
|
||||
Example code:
|
||||
|
||||
type Counter struct {
|
||||
Count int
|
||||
}
|
||||
|
||||
func inc(ctx context.Context, key *datastore.Key) (int, error) {
|
||||
var x Counter
|
||||
if err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity {
|
||||
return 0, err
|
||||
}
|
||||
x.Count++
|
||||
if _, err := datastore.Put(ctx, key, &x); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return x.Count, nil
|
||||
}
|
||||
|
||||
func handle(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := appengine.NewContext(r)
|
||||
var count int
|
||||
err := datastore.RunInTransaction(ctx, func(ctx context.Context) error {
|
||||
var err1 error
|
||||
count, err1 = inc(ctx, datastore.NewKey(ctx, "Counter", "singleton", 0, nil))
|
||||
return err1
|
||||
}, nil)
|
||||
if err != nil {
|
||||
serveError(ctx, w, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
fmt.Fprintf(w, "Count=%d", count)
|
||||
}
|
||||
|
||||
|
||||
Metadata
|
||||
|
||||
The datastore package provides access to some of App Engine's datastore
|
||||
metadata. This metadata includes information about the entity groups,
|
||||
namespaces, entity kinds, and properties in the datastore, as well as the
|
||||
property representations for each property.
|
||||
|
||||
Example code:
|
||||
|
||||
func handle(w http.ResponseWriter, r *http.Request) {
|
||||
// Print all the kinds in the datastore, with all the indexed
|
||||
// properties (and their representations) for each.
|
||||
ctx := appengine.NewContext(r)
|
||||
|
||||
kinds, err := datastore.Kinds(ctx)
|
||||
if err != nil {
|
||||
serveError(ctx, w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
for _, kind := range kinds {
|
||||
fmt.Fprintf(w, "%s:\n", kind)
|
||||
props, err := datastore.KindProperties(ctx, kind)
|
||||
if err != nil {
|
||||
fmt.Fprintln(w, "\t(unable to retrieve properties)")
|
||||
continue
|
||||
}
|
||||
for p, rep := range props {
|
||||
fmt.Fprintf(w, "\t-%s (%s)\n", p, strings.Join(rep, ", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
package datastore // import "google.golang.org/appengine/datastore"
|
120
vendor/google.golang.org/appengine/datastore/internal/cloudkey/cloudkey.go
generated
vendored
Normal file
120
vendor/google.golang.org/appengine/datastore/internal/cloudkey/cloudkey.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
// Copyright 2019 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cloudpb is a subset of types and functions, copied from cloud.google.com/go/datastore.
|
||||
//
|
||||
// They are copied here to provide compatibility to decode keys generated by the cloud.google.com/go/datastore package.
|
||||
package cloudkey
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
cloudpb "google.golang.org/appengine/datastore/internal/cloudpb"
|
||||
)
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// Code below is copied from https://github.com/googleapis/google-cloud-go/blob/master/datastore/datastore.go
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
var (
|
||||
// ErrInvalidKey is returned when an invalid key is presented.
|
||||
ErrInvalidKey = errors.New("datastore: invalid key")
|
||||
)
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// Code below is copied from https://github.com/googleapis/google-cloud-go/blob/master/datastore/key.go
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Key represents the datastore key for a stored entity.
|
||||
type Key struct {
|
||||
// Kind cannot be empty.
|
||||
Kind string
|
||||
// Either ID or Name must be zero for the Key to be valid.
|
||||
// If both are zero, the Key is incomplete.
|
||||
ID int64
|
||||
Name string
|
||||
// Parent must either be a complete Key or nil.
|
||||
Parent *Key
|
||||
|
||||
// Namespace provides the ability to partition your data for multiple
|
||||
// tenants. In most cases, it is not necessary to specify a namespace.
|
||||
// See docs on datastore multitenancy for details:
|
||||
// https://cloud.google.com/datastore/docs/concepts/multitenancy
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// DecodeKey decodes a key from the opaque representation returned by Encode.
|
||||
func DecodeKey(encoded string) (*Key, error) {
|
||||
// Re-add padding.
|
||||
if m := len(encoded) % 4; m != 0 {
|
||||
encoded += strings.Repeat("=", 4-m)
|
||||
}
|
||||
|
||||
b, err := base64.URLEncoding.DecodeString(encoded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pKey := new(cloudpb.Key)
|
||||
if err := proto.Unmarshal(b, pKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return protoToKey(pKey)
|
||||
}
|
||||
|
||||
// valid returns whether the key is valid.
|
||||
func (k *Key) valid() bool {
|
||||
if k == nil {
|
||||
return false
|
||||
}
|
||||
for ; k != nil; k = k.Parent {
|
||||
if k.Kind == "" {
|
||||
return false
|
||||
}
|
||||
if k.Name != "" && k.ID != 0 {
|
||||
return false
|
||||
}
|
||||
if k.Parent != nil {
|
||||
if k.Parent.Incomplete() {
|
||||
return false
|
||||
}
|
||||
if k.Parent.Namespace != k.Namespace {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Incomplete reports whether the key does not refer to a stored entity.
|
||||
func (k *Key) Incomplete() bool {
|
||||
return k.Name == "" && k.ID == 0
|
||||
}
|
||||
|
||||
// protoToKey decodes a protocol buffer representation of a key into an
|
||||
// equivalent *Key object. If the key is invalid, protoToKey will return the
|
||||
// invalid key along with ErrInvalidKey.
|
||||
func protoToKey(p *cloudpb.Key) (*Key, error) {
|
||||
var key *Key
|
||||
var namespace string
|
||||
if partition := p.PartitionId; partition != nil {
|
||||
namespace = partition.NamespaceId
|
||||
}
|
||||
for _, el := range p.Path {
|
||||
key = &Key{
|
||||
Namespace: namespace,
|
||||
Kind: el.Kind,
|
||||
ID: el.GetId(),
|
||||
Name: el.GetName(),
|
||||
Parent: key,
|
||||
}
|
||||
}
|
||||
if !key.valid() { // Also detects key == nil.
|
||||
return key, ErrInvalidKey
|
||||
}
|
||||
return key, nil
|
||||
}
|
344
vendor/google.golang.org/appengine/datastore/internal/cloudpb/entity.pb.go
generated
vendored
Normal file
344
vendor/google.golang.org/appengine/datastore/internal/cloudpb/entity.pb.go
generated
vendored
Normal file
@ -0,0 +1,344 @@
|
||||
// Copyright 2019 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cloudpb is a subset of protobufs, copied from google.golang.org/genproto/googleapis/datastore/v1.
|
||||
//
|
||||
// They are copied here to provide compatibility to decode keys generated by the cloud.google.com/go/datastore package.
|
||||
package cloudpb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// A partition ID identifies a grouping of entities. The grouping is always
|
||||
// by project and namespace, however the namespace ID may be empty.
|
||||
//
|
||||
// A partition ID contains several dimensions:
|
||||
// project ID and namespace ID.
|
||||
//
|
||||
// Partition dimensions:
|
||||
//
|
||||
// - May be `""`.
|
||||
// - Must be valid UTF-8 bytes.
|
||||
// - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}`
|
||||
// If the value of any dimension matches regex `__.*__`, the partition is
|
||||
// reserved/read-only.
|
||||
// A reserved/read-only partition ID is forbidden in certain documented
|
||||
// contexts.
|
||||
//
|
||||
// Foreign partition IDs (in which the project ID does
|
||||
// not match the context project ID ) are discouraged.
|
||||
// Reads and writes of foreign partition IDs may fail if the project is not in
|
||||
// an active state.
|
||||
type PartitionId struct {
|
||||
// The ID of the project to which the entities belong.
|
||||
ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
|
||||
// If not empty, the ID of the namespace to which the entities belong.
|
||||
NamespaceId string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PartitionId) Reset() { *m = PartitionId{} }
|
||||
func (m *PartitionId) String() string { return proto.CompactTextString(m) }
|
||||
func (*PartitionId) ProtoMessage() {}
|
||||
func (*PartitionId) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_entity_096a297364b049a5, []int{0}
|
||||
}
|
||||
func (m *PartitionId) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PartitionId.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PartitionId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PartitionId.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *PartitionId) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PartitionId.Merge(dst, src)
|
||||
}
|
||||
func (m *PartitionId) XXX_Size() int {
|
||||
return xxx_messageInfo_PartitionId.Size(m)
|
||||
}
|
||||
func (m *PartitionId) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PartitionId.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PartitionId proto.InternalMessageInfo
|
||||
|
||||
func (m *PartitionId) GetProjectId() string {
|
||||
if m != nil {
|
||||
return m.ProjectId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PartitionId) GetNamespaceId() string {
|
||||
if m != nil {
|
||||
return m.NamespaceId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// A unique identifier for an entity.
|
||||
// If a key's partition ID or any of its path kinds or names are
|
||||
// reserved/read-only, the key is reserved/read-only.
|
||||
// A reserved/read-only key is forbidden in certain documented contexts.
|
||||
type Key struct {
|
||||
// Entities are partitioned into subsets, currently identified by a project
|
||||
// ID and namespace ID.
|
||||
// Queries are scoped to a single partition.
|
||||
PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id,json=partitionId,proto3" json:"partition_id,omitempty"`
|
||||
// The entity path.
|
||||
// An entity path consists of one or more elements composed of a kind and a
|
||||
// string or numerical identifier, which identify entities. The first
|
||||
// element identifies a _root entity_, the second element identifies
|
||||
// a _child_ of the root entity, the third element identifies a child of the
|
||||
// second entity, and so forth. The entities identified by all prefixes of
|
||||
// the path are called the element's _ancestors_.
|
||||
//
|
||||
// An entity path is always fully complete: *all* of the entity's ancestors
|
||||
// are required to be in the path along with the entity identifier itself.
|
||||
// The only exception is that in some documented cases, the identifier in the
|
||||
// last path element (for the entity) itself may be omitted. For example,
|
||||
// the last path element of the key of `Mutation.insert` may have no
|
||||
// identifier.
|
||||
//
|
||||
// A path can never be empty, and a path can have at most 100 elements.
|
||||
Path []*Key_PathElement `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Key) Reset() { *m = Key{} }
|
||||
func (m *Key) String() string { return proto.CompactTextString(m) }
|
||||
func (*Key) ProtoMessage() {}
|
||||
func (*Key) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_entity_096a297364b049a5, []int{1}
|
||||
}
|
||||
func (m *Key) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Key.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Key) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Key.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Key) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Key.Merge(dst, src)
|
||||
}
|
||||
func (m *Key) XXX_Size() int {
|
||||
return xxx_messageInfo_Key.Size(m)
|
||||
}
|
||||
func (m *Key) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Key.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
// A (kind, ID/name) pair used to construct a key path.
|
||||
//
|
||||
// If either name or ID is set, the element is complete.
|
||||
// If neither is set, the element is incomplete.
|
||||
type Key_PathElement struct {
|
||||
// The kind of the entity.
|
||||
// A kind matching regex `__.*__` is reserved/read-only.
|
||||
// A kind must not contain more than 1500 bytes when UTF-8 encoded.
|
||||
// Cannot be `""`.
|
||||
Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
|
||||
// The type of ID.
|
||||
//
|
||||
// Types that are valid to be assigned to IdType:
|
||||
// *Key_PathElement_Id
|
||||
// *Key_PathElement_Name
|
||||
IdType isKey_PathElement_IdType `protobuf_oneof:"id_type"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Key_PathElement) Reset() { *m = Key_PathElement{} }
|
||||
func (m *Key_PathElement) String() string { return proto.CompactTextString(m) }
|
||||
func (*Key_PathElement) ProtoMessage() {}
|
||||
func (*Key_PathElement) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_entity_096a297364b049a5, []int{1, 0}
|
||||
}
|
||||
func (m *Key_PathElement) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Key_PathElement.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Key_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Key_PathElement.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Key_PathElement) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Key_PathElement.Merge(dst, src)
|
||||
}
|
||||
func (m *Key_PathElement) XXX_Size() int {
|
||||
return xxx_messageInfo_Key_PathElement.Size(m)
|
||||
}
|
||||
func (m *Key_PathElement) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Key_PathElement.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Key_PathElement proto.InternalMessageInfo
|
||||
|
||||
func (m *Key_PathElement) GetKind() string {
|
||||
if m != nil {
|
||||
return m.Kind
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type isKey_PathElement_IdType interface {
|
||||
isKey_PathElement_IdType()
|
||||
}
|
||||
|
||||
type Key_PathElement_Id struct {
|
||||
Id int64 `protobuf:"varint,2,opt,name=id,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Key_PathElement_Name struct {
|
||||
Name string `protobuf:"bytes,3,opt,name=name,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*Key_PathElement_Id) isKey_PathElement_IdType() {}
|
||||
|
||||
func (*Key_PathElement_Name) isKey_PathElement_IdType() {}
|
||||
|
||||
func (m *Key_PathElement) GetIdType() isKey_PathElement_IdType {
|
||||
if m != nil {
|
||||
return m.IdType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Key_PathElement) GetId() int64 {
|
||||
if x, ok := m.GetIdType().(*Key_PathElement_Id); ok {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Key_PathElement) GetName() string {
|
||||
if x, ok := m.GetIdType().(*Key_PathElement_Name); ok {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*Key_PathElement) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _Key_PathElement_OneofMarshaler, _Key_PathElement_OneofUnmarshaler, _Key_PathElement_OneofSizer, []interface{}{
|
||||
(*Key_PathElement_Id)(nil),
|
||||
(*Key_PathElement_Name)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func _Key_PathElement_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*Key_PathElement)
|
||||
// id_type
|
||||
switch x := m.IdType.(type) {
|
||||
case *Key_PathElement_Id:
|
||||
b.EncodeVarint(2<<3 | proto.WireVarint)
|
||||
b.EncodeVarint(uint64(x.Id))
|
||||
case *Key_PathElement_Name:
|
||||
b.EncodeVarint(3<<3 | proto.WireBytes)
|
||||
b.EncodeStringBytes(x.Name)
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("Key_PathElement.IdType has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _Key_PathElement_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*Key_PathElement)
|
||||
switch tag {
|
||||
case 2: // id_type.id
|
||||
if wire != proto.WireVarint {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeVarint()
|
||||
m.IdType = &Key_PathElement_Id{int64(x)}
|
||||
return true, err
|
||||
case 3: // id_type.name
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeStringBytes()
|
||||
m.IdType = &Key_PathElement_Name{x}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _Key_PathElement_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*Key_PathElement)
|
||||
// id_type
|
||||
switch x := m.IdType.(type) {
|
||||
case *Key_PathElement_Id:
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(x.Id))
|
||||
case *Key_PathElement_Name:
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(len(x.Name)))
|
||||
n += len(x.Name)
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
var fileDescriptor_entity_096a297364b049a5 = []byte{
|
||||
// 780 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x94, 0xff, 0x6e, 0xdc, 0x44,
|
||||
0x10, 0xc7, 0xed, 0xbb, 0x5c, 0x1a, 0x8f, 0xdd, 0xa4, 0x6c, 0x2a, 0x61, 0x02, 0x28, 0x26, 0x80,
|
||||
0x74, 0x02, 0xc9, 0x6e, 0xc2, 0x1f, 0x54, 0x14, 0xa4, 0x72, 0x25, 0xe0, 0x28, 0x15, 0x9c, 0x56,
|
||||
0x55, 0x24, 0x50, 0xa4, 0xd3, 0xde, 0x79, 0xeb, 0x2e, 0x67, 0xef, 0x5a, 0xf6, 0x3a, 0xaa, 0xdf,
|
||||
0x05, 0xf1, 0x00, 0x3c, 0x0a, 0x8f, 0x80, 0x78, 0x18, 0xb4, 0x3f, 0xec, 0x0b, 0xed, 0x35, 0xff,
|
||||
0x79, 0x67, 0x3e, 0xdf, 0xd9, 0xef, 0xec, 0xce, 0x1a, 0xa2, 0x5c, 0x88, 0xbc, 0xa0, 0x49, 0x46,
|
||||
0x24, 0x69, 0xa4, 0xa8, 0x69, 0x72, 0x73, 0x9a, 0x50, 0x2e, 0x99, 0xec, 0xe2, 0xaa, 0x16, 0x52,
|
||||
0xa0, 0x43, 0x43, 0xc4, 0x03, 0x11, 0xdf, 0x9c, 0x1e, 0x7d, 0x64, 0x65, 0xa4, 0x62, 0x09, 0xe1,
|
||||
0x5c, 0x48, 0x22, 0x99, 0xe0, 0x8d, 0x91, 0x0c, 0x59, 0xbd, 0x5a, 0xb6, 0x2f, 0x93, 0x46, 0xd6,
|
||||
0xed, 0x4a, 0xda, 0xec, 0xf1, 0x9b, 0x59, 0xc9, 0x4a, 0xda, 0x48, 0x52, 0x56, 0x16, 0x08, 0x2d,
|
||||
0x20, 0xbb, 0x8a, 0x26, 0x05, 0x91, 0x05, 0xcf, 0x4d, 0xe6, 0xe4, 0x17, 0xf0, 0xe7, 0xa4, 0x96,
|
||||
0x4c, 0x6d, 0x76, 0x91, 0xa1, 0x8f, 0x01, 0xaa, 0x5a, 0xfc, 0x4e, 0x57, 0x72, 0xc1, 0xb2, 0x70,
|
||||
0x14, 0xb9, 0x53, 0x0f, 0x7b, 0x36, 0x72, 0x91, 0xa1, 0x4f, 0x20, 0xe0, 0xa4, 0xa4, 0x4d, 0x45,
|
||||
0x56, 0x54, 0x01, 0x3b, 0x1a, 0xf0, 0x87, 0xd8, 0x45, 0x76, 0xf2, 0x8f, 0x0b, 0xe3, 0x4b, 0xda,
|
||||
0xa1, 0x67, 0x10, 0x54, 0x7d, 0x61, 0x85, 0xba, 0x91, 0x3b, 0xf5, 0xcf, 0xa2, 0x78, 0x4b, 0xef,
|
||||
0xf1, 0x2d, 0x07, 0xd8, 0xaf, 0x6e, 0xd9, 0x79, 0x0c, 0x3b, 0x15, 0x91, 0xaf, 0xc2, 0x51, 0x34,
|
||||
0x9e, 0xfa, 0x67, 0x9f, 0x6d, 0x15, 0x5f, 0xd2, 0x2e, 0x9e, 0x13, 0xf9, 0xea, 0xbc, 0xa0, 0x25,
|
||||
0xe5, 0x12, 0x6b, 0xc5, 0xd1, 0x0b, 0xd5, 0xd7, 0x10, 0x44, 0x08, 0x76, 0xd6, 0x8c, 0x1b, 0x17,
|
||||
0x1e, 0xd6, 0xdf, 0xe8, 0x01, 0x8c, 0x6c, 0x8f, 0xe3, 0xd4, 0xc1, 0x23, 0x96, 0xa1, 0x87, 0xb0,
|
||||
0xa3, 0x5a, 0x09, 0xc7, 0x8a, 0x4a, 0x1d, 0xac, 0x57, 0x33, 0x0f, 0xee, 0xb1, 0x6c, 0xa1, 0x8e,
|
||||
0xee, 0xe4, 0x29, 0xc0, 0xf7, 0x75, 0x4d, 0xba, 0x2b, 0x52, 0xb4, 0x14, 0x9d, 0xc1, 0xee, 0x8d,
|
||||
0xfa, 0x68, 0x42, 0x57, 0xfb, 0x3b, 0xda, 0xea, 0x4f, 0xb3, 0xd8, 0x92, 0x27, 0x7f, 0x4c, 0x60,
|
||||
0x62, 0xd4, 0x4f, 0x00, 0x78, 0x5b, 0x14, 0x0b, 0x9d, 0x08, 0xfd, 0xc8, 0x9d, 0xee, 0x6f, 0x2a,
|
||||
0xf4, 0x37, 0x19, 0xff, 0xdc, 0x16, 0x85, 0xe6, 0x53, 0x07, 0x7b, 0xbc, 0x5f, 0xa0, 0xcf, 0xe1,
|
||||
0xfe, 0x52, 0x88, 0x82, 0x12, 0x6e, 0xf5, 0xaa, 0xb1, 0xbd, 0xd4, 0xc1, 0x81, 0x0d, 0x0f, 0x18,
|
||||
0xe3, 0x92, 0xe6, 0xb4, 0xb6, 0x58, 0xdf, 0x6d, 0x60, 0xc3, 0x06, 0xfb, 0x14, 0x82, 0x4c, 0xb4,
|
||||
0xcb, 0x82, 0x5a, 0x4a, 0xf5, 0xef, 0xa6, 0x0e, 0xf6, 0x4d, 0xd4, 0x40, 0xe7, 0x70, 0x30, 0x8c,
|
||||
0x95, 0xe5, 0x40, 0xdf, 0xe9, 0xdb, 0xa6, 0x5f, 0xf4, 0x5c, 0xea, 0xe0, 0xfd, 0x41, 0x64, 0xca,
|
||||
0x7c, 0x0d, 0xde, 0x9a, 0x76, 0xb6, 0xc0, 0x44, 0x17, 0x08, 0xdf, 0x75, 0xaf, 0xa9, 0x83, 0xf7,
|
||||
0xd6, 0xb4, 0x1b, 0x4c, 0x36, 0xb2, 0x66, 0x3c, 0xb7, 0xda, 0xf7, 0xec, 0x25, 0xf9, 0x26, 0x6a,
|
||||
0xa0, 0x63, 0x80, 0x65, 0x21, 0x96, 0x16, 0x41, 0x91, 0x3b, 0x0d, 0xd4, 0xc1, 0xa9, 0x98, 0x01,
|
||||
0xbe, 0x83, 0x83, 0x9c, 0x8a, 0x45, 0x25, 0x18, 0x97, 0x96, 0xda, 0xd3, 0x26, 0x0e, 0x7b, 0x13,
|
||||
0xea, 0xa2, 0xe3, 0xe7, 0x44, 0x3e, 0xe7, 0x79, 0xea, 0xe0, 0xfb, 0x39, 0x15, 0x73, 0x05, 0x1b,
|
||||
0xf9, 0x53, 0x08, 0xcc, 0x53, 0xb6, 0xda, 0x5d, 0xad, 0xfd, 0x70, 0x6b, 0x03, 0xe7, 0x1a, 0x54,
|
||||
0x0e, 0x8d, 0xc4, 0x54, 0x98, 0x81, 0x4f, 0xd4, 0x08, 0xd9, 0x02, 0x9e, 0x2e, 0x70, 0xbc, 0xb5,
|
||||
0xc0, 0x66, 0xd4, 0x52, 0x07, 0x03, 0xd9, 0x0c, 0x5e, 0x08, 0xf7, 0x4a, 0x4a, 0x38, 0xe3, 0x79,
|
||||
0xb8, 0x1f, 0xb9, 0xd3, 0x09, 0xee, 0x97, 0xe8, 0x11, 0x3c, 0xa4, 0xaf, 0x57, 0x45, 0x9b, 0xd1,
|
||||
0xc5, 0xcb, 0x5a, 0x94, 0x0b, 0xc6, 0x33, 0xfa, 0x9a, 0x36, 0xe1, 0xa1, 0x1a, 0x0f, 0x8c, 0x6c,
|
||||
0xee, 0xc7, 0x5a, 0x94, 0x17, 0x26, 0x33, 0x0b, 0x00, 0xb4, 0x13, 0x33, 0xe0, 0xff, 0xba, 0xb0,
|
||||
0x6b, 0x7c, 0xa3, 0x2f, 0x60, 0xbc, 0xa6, 0x9d, 0x7d, 0xb7, 0xef, 0xbc, 0x22, 0xac, 0x20, 0x74,
|
||||
0xa9, 0x7f, 0x1b, 0x15, 0xad, 0x25, 0xa3, 0x4d, 0x38, 0xd6, 0xaf, 0xe1, 0xcb, 0x3b, 0x0e, 0x25,
|
||||
0x9e, 0x0f, 0xf4, 0x39, 0x97, 0x75, 0x87, 0x6f, 0xc9, 0x8f, 0x7e, 0x85, 0x83, 0x37, 0xd2, 0xe8,
|
||||
0xc1, 0xc6, 0x8b, 0x67, 0x76, 0x7c, 0x04, 0x93, 0xcd, 0x44, 0xdf, 0xfd, 0xf4, 0x0c, 0xf8, 0xcd,
|
||||
0xe8, 0xb1, 0x3b, 0xfb, 0xd3, 0x85, 0xf7, 0x57, 0xa2, 0xdc, 0x06, 0xcf, 0x7c, 0x63, 0x6d, 0xae,
|
||||
0x86, 0x78, 0xee, 0xfe, 0xf6, 0xad, 0x65, 0x72, 0x51, 0x10, 0x9e, 0xc7, 0xa2, 0xce, 0x93, 0x9c,
|
||||
0x72, 0x3d, 0xe2, 0x89, 0x49, 0x91, 0x8a, 0x35, 0xff, 0xfb, 0xcb, 0x3f, 0x19, 0x16, 0x7f, 0x8d,
|
||||
0x3e, 0xf8, 0xc9, 0xc8, 0x9f, 0x15, 0xa2, 0xcd, 0xe2, 0x1f, 0x86, 0x8d, 0xae, 0x4e, 0xff, 0xee,
|
||||
0x73, 0xd7, 0x3a, 0x77, 0x3d, 0xe4, 0xae, 0xaf, 0x4e, 0x97, 0xbb, 0x7a, 0x83, 0xaf, 0xfe, 0x0b,
|
||||
0x00, 0x00, 0xff, 0xff, 0xf3, 0xdd, 0x11, 0x96, 0x45, 0x06, 0x00, 0x00,
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Key proto.InternalMessageInfo
|
400
vendor/google.golang.org/appengine/datastore/key.go
generated
vendored
Normal file
400
vendor/google.golang.org/appengine/datastore/key.go
generated
vendored
Normal file
@ -0,0 +1,400 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/appengine/internal"
|
||||
pb "google.golang.org/appengine/internal/datastore"
|
||||
)
|
||||
|
||||
type KeyRangeCollisionError struct {
|
||||
start int64
|
||||
end int64
|
||||
}
|
||||
|
||||
func (e *KeyRangeCollisionError) Error() string {
|
||||
return fmt.Sprintf("datastore: Collision when attempting to allocate range [%d, %d]",
|
||||
e.start, e.end)
|
||||
}
|
||||
|
||||
type KeyRangeContentionError struct {
|
||||
start int64
|
||||
end int64
|
||||
}
|
||||
|
||||
func (e *KeyRangeContentionError) Error() string {
|
||||
return fmt.Sprintf("datastore: Contention when attempting to allocate range [%d, %d]",
|
||||
e.start, e.end)
|
||||
}
|
||||
|
||||
// Key represents the datastore key for a stored entity, and is immutable.
|
||||
type Key struct {
|
||||
kind string
|
||||
stringID string
|
||||
intID int64
|
||||
parent *Key
|
||||
appID string
|
||||
namespace string
|
||||
}
|
||||
|
||||
// Kind returns the key's kind (also known as entity type).
|
||||
func (k *Key) Kind() string {
|
||||
return k.kind
|
||||
}
|
||||
|
||||
// StringID returns the key's string ID (also known as an entity name or key
|
||||
// name), which may be "".
|
||||
func (k *Key) StringID() string {
|
||||
return k.stringID
|
||||
}
|
||||
|
||||
// IntID returns the key's integer ID, which may be 0.
|
||||
func (k *Key) IntID() int64 {
|
||||
return k.intID
|
||||
}
|
||||
|
||||
// Parent returns the key's parent key, which may be nil.
|
||||
func (k *Key) Parent() *Key {
|
||||
return k.parent
|
||||
}
|
||||
|
||||
// AppID returns the key's application ID.
|
||||
func (k *Key) AppID() string {
|
||||
return k.appID
|
||||
}
|
||||
|
||||
// Namespace returns the key's namespace.
|
||||
func (k *Key) Namespace() string {
|
||||
return k.namespace
|
||||
}
|
||||
|
||||
// Incomplete returns whether the key does not refer to a stored entity.
|
||||
// In particular, whether the key has a zero StringID and a zero IntID.
|
||||
func (k *Key) Incomplete() bool {
|
||||
return k.stringID == "" && k.intID == 0
|
||||
}
|
||||
|
||||
// valid returns whether the key is valid.
|
||||
func (k *Key) valid() bool {
|
||||
if k == nil {
|
||||
return false
|
||||
}
|
||||
for ; k != nil; k = k.parent {
|
||||
if k.kind == "" || k.appID == "" {
|
||||
return false
|
||||
}
|
||||
if k.stringID != "" && k.intID != 0 {
|
||||
return false
|
||||
}
|
||||
if k.parent != nil {
|
||||
if k.parent.Incomplete() {
|
||||
return false
|
||||
}
|
||||
if k.parent.appID != k.appID || k.parent.namespace != k.namespace {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns whether two keys are equal.
|
||||
func (k *Key) Equal(o *Key) bool {
|
||||
for k != nil && o != nil {
|
||||
if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace {
|
||||
return false
|
||||
}
|
||||
k, o = k.parent, o.parent
|
||||
}
|
||||
return k == o
|
||||
}
|
||||
|
||||
// root returns the furthest ancestor of a key, which may be itself.
|
||||
func (k *Key) root() *Key {
|
||||
for k.parent != nil {
|
||||
k = k.parent
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
// marshal marshals the key's string representation to the buffer.
|
||||
func (k *Key) marshal(b *bytes.Buffer) {
|
||||
if k.parent != nil {
|
||||
k.parent.marshal(b)
|
||||
}
|
||||
b.WriteByte('/')
|
||||
b.WriteString(k.kind)
|
||||
b.WriteByte(',')
|
||||
if k.stringID != "" {
|
||||
b.WriteString(k.stringID)
|
||||
} else {
|
||||
b.WriteString(strconv.FormatInt(k.intID, 10))
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of the key.
|
||||
func (k *Key) String() string {
|
||||
if k == nil {
|
||||
return ""
|
||||
}
|
||||
b := bytes.NewBuffer(make([]byte, 0, 512))
|
||||
k.marshal(b)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
type gobKey struct {
|
||||
Kind string
|
||||
StringID string
|
||||
IntID int64
|
||||
Parent *gobKey
|
||||
AppID string
|
||||
Namespace string
|
||||
}
|
||||
|
||||
func keyToGobKey(k *Key) *gobKey {
|
||||
if k == nil {
|
||||
return nil
|
||||
}
|
||||
return &gobKey{
|
||||
Kind: k.kind,
|
||||
StringID: k.stringID,
|
||||
IntID: k.intID,
|
||||
Parent: keyToGobKey(k.parent),
|
||||
AppID: k.appID,
|
||||
Namespace: k.namespace,
|
||||
}
|
||||
}
|
||||
|
||||
func gobKeyToKey(gk *gobKey) *Key {
|
||||
if gk == nil {
|
||||
return nil
|
||||
}
|
||||
return &Key{
|
||||
kind: gk.Kind,
|
||||
stringID: gk.StringID,
|
||||
intID: gk.IntID,
|
||||
parent: gobKeyToKey(gk.Parent),
|
||||
appID: gk.AppID,
|
||||
namespace: gk.Namespace,
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Key) GobEncode() ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (k *Key) GobDecode(buf []byte) error {
|
||||
gk := new(gobKey)
|
||||
if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
|
||||
return err
|
||||
}
|
||||
*k = *gobKeyToKey(gk)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Key) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + k.Encode() + `"`), nil
|
||||
}
|
||||
|
||||
func (k *Key) UnmarshalJSON(buf []byte) error {
|
||||
if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
|
||||
return errors.New("datastore: bad JSON key")
|
||||
}
|
||||
k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*k = *k2
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encode returns an opaque representation of the key
|
||||
// suitable for use in HTML and URLs.
|
||||
// This is compatible with the Python and Java runtimes.
|
||||
func (k *Key) Encode() string {
|
||||
ref := keyToProto("", k)
|
||||
|
||||
b, err := proto.Marshal(ref)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Trailing padding is stripped.
|
||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
|
||||
}
|
||||
|
||||
// DecodeKey decodes a key from the opaque representation returned by Encode.
|
||||
func DecodeKey(encoded string) (*Key, error) {
|
||||
// Re-add padding.
|
||||
if m := len(encoded) % 4; m != 0 {
|
||||
encoded += strings.Repeat("=", 4-m)
|
||||
}
|
||||
|
||||
b, err := base64.URLEncoding.DecodeString(encoded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ref := new(pb.Reference)
|
||||
if err := proto.Unmarshal(b, ref); err != nil {
|
||||
// Couldn't decode it as an App Engine key, try decoding it as a key encoded by cloud.google.com/go/datastore.
|
||||
if k := decodeCloudKey(encoded); k != nil {
|
||||
return k, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return protoToKey(ref)
|
||||
}
|
||||
|
||||
// NewIncompleteKey creates a new incomplete key.
|
||||
// kind cannot be empty.
|
||||
func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key {
|
||||
return NewKey(c, kind, "", 0, parent)
|
||||
}
|
||||
|
||||
// NewKey creates a new key.
|
||||
// kind cannot be empty.
|
||||
// Either one or both of stringID and intID must be zero. If both are zero,
|
||||
// the key returned is incomplete.
|
||||
// parent must either be a complete key or nil.
|
||||
func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key {
|
||||
// If there's a parent key, use its namespace.
|
||||
// Otherwise, use any namespace attached to the context.
|
||||
var namespace string
|
||||
if parent != nil {
|
||||
namespace = parent.namespace
|
||||
} else {
|
||||
namespace = internal.NamespaceFromContext(c)
|
||||
}
|
||||
|
||||
return &Key{
|
||||
kind: kind,
|
||||
stringID: stringID,
|
||||
intID: intID,
|
||||
parent: parent,
|
||||
appID: internal.FullyQualifiedAppID(c),
|
||||
namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// AllocateIDs returns a range of n integer IDs with the given kind and parent
|
||||
// combination. kind cannot be empty; parent may be nil. The IDs in the range
|
||||
// returned will not be used by the datastore's automatic ID sequence generator
|
||||
// and may be used with NewKey without conflict.
|
||||
//
|
||||
// The range is inclusive at the low end and exclusive at the high end. In
|
||||
// other words, valid intIDs x satisfy low <= x && x < high.
|
||||
//
|
||||
// If no error is returned, low + n == high.
|
||||
func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) {
|
||||
if kind == "" {
|
||||
return 0, 0, errors.New("datastore: AllocateIDs given an empty kind")
|
||||
}
|
||||
if n < 0 {
|
||||
return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n)
|
||||
}
|
||||
if n == 0 {
|
||||
return 0, 0, nil
|
||||
}
|
||||
req := &pb.AllocateIdsRequest{
|
||||
ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
|
||||
Size: proto.Int64(int64(n)),
|
||||
}
|
||||
res := &pb.AllocateIdsResponse{}
|
||||
if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
// The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops)
|
||||
// is inclusive at the low end and exclusive at the high end, so we add 1.
|
||||
low = res.GetStart()
|
||||
high = res.GetEnd() + 1
|
||||
if low+int64(n) != high {
|
||||
return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n)
|
||||
}
|
||||
return low, high, nil
|
||||
}
|
||||
|
||||
// AllocateIDRange allocates a range of IDs with specific endpoints.
|
||||
// The range is inclusive at both the low and high end. Once these IDs have been
|
||||
// allocated, you can manually assign them to newly created entities.
|
||||
//
|
||||
// The Datastore's automatic ID allocator never assigns a key that has already
|
||||
// been allocated (either through automatic ID allocation or through an explicit
|
||||
// AllocateIDs call). As a result, entities written to the given key range will
|
||||
// never be overwritten. However, writing entities with manually assigned keys in
|
||||
// this range may overwrite existing entities (or new entities written by a separate
|
||||
// request), depending on the error returned.
|
||||
//
|
||||
// Use this only if you have an existing numeric ID range that you want to reserve
|
||||
// (for example, bulk loading entities that already have IDs). If you don't care
|
||||
// about which IDs you receive, use AllocateIDs instead.
|
||||
//
|
||||
// AllocateIDRange returns nil if the range is successfully allocated. If one or more
|
||||
// entities with an ID in the given range already exist, it returns a KeyRangeCollisionError.
|
||||
// If the Datastore has already cached IDs in this range (e.g. from a previous call to
|
||||
// AllocateIDRange), it returns a KeyRangeContentionError. Errors of other types indicate
|
||||
// problems with arguments or an error returned directly from the Datastore.
|
||||
func AllocateIDRange(c context.Context, kind string, parent *Key, start, end int64) (err error) {
|
||||
if kind == "" {
|
||||
return errors.New("datastore: AllocateIDRange given an empty kind")
|
||||
}
|
||||
|
||||
if start < 1 || end < 1 {
|
||||
return errors.New("datastore: AllocateIDRange start and end must both be greater than 0")
|
||||
}
|
||||
|
||||
if start > end {
|
||||
return errors.New("datastore: AllocateIDRange start must be before end")
|
||||
}
|
||||
|
||||
req := &pb.AllocateIdsRequest{
|
||||
ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
|
||||
Max: proto.Int64(end),
|
||||
}
|
||||
res := &pb.AllocateIdsResponse{}
|
||||
if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for collisions, i.e. existing entities with IDs in this range.
|
||||
// We could do this before the allocation, but we'd still have to do it
|
||||
// afterward as well to catch the race condition where an entity is inserted
|
||||
// after that initial check but before the allocation. Skip the up-front check
|
||||
// and just do it once.
|
||||
q := NewQuery(kind).Filter("__key__ >=", NewKey(c, kind, "", start, parent)).
|
||||
Filter("__key__ <=", NewKey(c, kind, "", end, parent)).KeysOnly().Limit(1)
|
||||
|
||||
keys, err := q.GetAll(c, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(keys) != 0 {
|
||||
return &KeyRangeCollisionError{start: start, end: end}
|
||||
}
|
||||
|
||||
// Check for a race condition, i.e. cases where the datastore may have
|
||||
// cached ID batches that contain IDs in this range.
|
||||
if start < res.GetStart() {
|
||||
return &KeyRangeContentionError{start: start, end: end}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
89
vendor/google.golang.org/appengine/datastore/keycompat.go
generated
vendored
Normal file
89
vendor/google.golang.org/appengine/datastore/keycompat.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
// Copyright 2019 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/appengine/datastore/internal/cloudkey"
|
||||
"google.golang.org/appengine/internal"
|
||||
)
|
||||
|
||||
var keyConversion struct {
|
||||
mu sync.RWMutex
|
||||
appID string // read using getKeyConversionAppID
|
||||
}
|
||||
|
||||
// EnableKeyConversion enables encoded key compatibility with the Cloud
|
||||
// Datastore client library (cloud.google.com/go/datastore). Encoded keys
|
||||
// generated by the Cloud Datastore client library will be decoded into App
|
||||
// Engine datastore keys.
|
||||
//
|
||||
// The context provided must be an App Engine context if running in App Engine
|
||||
// first generation runtime. This can be called in the /_ah/start handler. It is
|
||||
// safe to call multiple times, and is cheap to call, so can also be inserted as
|
||||
// middleware.
|
||||
//
|
||||
// Enabling key compatibility does not affect the encoding format used by
|
||||
// Key.Encode, it only expands the type of keys that are able to be decoded with
|
||||
// DecodeKey.
|
||||
func EnableKeyConversion(ctx context.Context) {
|
||||
// Only attempt to set appID if it's unset.
|
||||
// If already set, ignore.
|
||||
if getKeyConversionAppID() != "" {
|
||||
return
|
||||
}
|
||||
|
||||
keyConversion.mu.Lock()
|
||||
// Check again to avoid race where another goroutine set appID between the call
|
||||
// to getKeyConversionAppID above and taking the write lock.
|
||||
if keyConversion.appID == "" {
|
||||
keyConversion.appID = internal.FullyQualifiedAppID(ctx)
|
||||
}
|
||||
keyConversion.mu.Unlock()
|
||||
}
|
||||
|
||||
func getKeyConversionAppID() string {
|
||||
keyConversion.mu.RLock()
|
||||
appID := keyConversion.appID
|
||||
keyConversion.mu.RUnlock()
|
||||
return appID
|
||||
}
|
||||
|
||||
// decodeCloudKey attempts to decode the given encoded key generated by the
|
||||
// Cloud Datastore client library (cloud.google.com/go/datastore), returning nil
|
||||
// if the key couldn't be decoded.
|
||||
func decodeCloudKey(encoded string) *Key {
|
||||
appID := getKeyConversionAppID()
|
||||
if appID == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
k, err := cloudkey.DecodeKey(encoded)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return convertCloudKey(k, appID)
|
||||
}
|
||||
|
||||
// convertCloudKey converts a Cloud Datastore key and converts it to an App
|
||||
// Engine Datastore key. Cloud Datastore keys don't include the project/app ID,
|
||||
// so we must add it back in.
|
||||
func convertCloudKey(key *cloudkey.Key, appID string) *Key {
|
||||
if key == nil {
|
||||
return nil
|
||||
}
|
||||
k := &Key{
|
||||
intID: key.ID,
|
||||
kind: key.Kind,
|
||||
namespace: key.Namespace,
|
||||
parent: convertCloudKey(key.Parent, appID),
|
||||
stringID: key.Name,
|
||||
appID: appID,
|
||||
}
|
||||
return k
|
||||
}
|
429
vendor/google.golang.org/appengine/datastore/load.go
generated
vendored
Normal file
429
vendor/google.golang.org/appengine/datastore/load.go
generated
vendored
Normal file
@ -0,0 +1,429 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/appengine"
|
||||
pb "google.golang.org/appengine/internal/datastore"
|
||||
)
|
||||
|
||||
var (
|
||||
typeOfBlobKey = reflect.TypeOf(appengine.BlobKey(""))
|
||||
typeOfByteSlice = reflect.TypeOf([]byte(nil))
|
||||
typeOfByteString = reflect.TypeOf(ByteString(nil))
|
||||
typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{})
|
||||
typeOfTime = reflect.TypeOf(time.Time{})
|
||||
typeOfKeyPtr = reflect.TypeOf(&Key{})
|
||||
typeOfEntityPtr = reflect.TypeOf(&Entity{})
|
||||
)
|
||||
|
||||
// typeMismatchReason returns a string explaining why the property p could not
|
||||
// be stored in an entity field of type v.Type().
|
||||
func typeMismatchReason(pValue interface{}, v reflect.Value) string {
|
||||
entityType := "empty"
|
||||
switch pValue.(type) {
|
||||
case int64:
|
||||
entityType = "int"
|
||||
case bool:
|
||||
entityType = "bool"
|
||||
case string:
|
||||
entityType = "string"
|
||||
case float64:
|
||||
entityType = "float"
|
||||
case *Key:
|
||||
entityType = "*datastore.Key"
|
||||
case time.Time:
|
||||
entityType = "time.Time"
|
||||
case appengine.BlobKey:
|
||||
entityType = "appengine.BlobKey"
|
||||
case appengine.GeoPoint:
|
||||
entityType = "appengine.GeoPoint"
|
||||
case ByteString:
|
||||
entityType = "datastore.ByteString"
|
||||
case []byte:
|
||||
entityType = "[]byte"
|
||||
}
|
||||
return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
|
||||
}
|
||||
|
||||
type propertyLoader struct {
|
||||
// m holds the number of times a substruct field like "Foo.Bar.Baz" has
|
||||
// been seen so far. The map is constructed lazily.
|
||||
m map[string]int
|
||||
}
|
||||
|
||||
func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string {
|
||||
var v reflect.Value
|
||||
var sliceIndex int
|
||||
|
||||
name := p.Name
|
||||
|
||||
// If name ends with a '.', the last field is anonymous.
|
||||
// In this case, strings.Split will give us "" as the
|
||||
// last element of our fields slice, which will match the ""
|
||||
// field name in the substruct codec.
|
||||
fields := strings.Split(name, ".")
|
||||
|
||||
for len(fields) > 0 {
|
||||
var decoder fieldCodec
|
||||
var ok bool
|
||||
|
||||
// Cut off the last field (delimited by ".") and find its parent
|
||||
// in the codec.
|
||||
// eg. for name "A.B.C.D", split off "A.B.C" and try to
|
||||
// find a field in the codec with this name.
|
||||
// Loop again with "A.B", etc.
|
||||
for i := len(fields); i > 0; i-- {
|
||||
parent := strings.Join(fields[:i], ".")
|
||||
decoder, ok = codec.fields[parent]
|
||||
if ok {
|
||||
fields = fields[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If we never found a matching field in the codec, return
|
||||
// error message.
|
||||
if !ok {
|
||||
return "no such struct field"
|
||||
}
|
||||
|
||||
v = initField(structValue, decoder.path)
|
||||
if !v.IsValid() {
|
||||
return "no such struct field"
|
||||
}
|
||||
if !v.CanSet() {
|
||||
return "cannot set struct field"
|
||||
}
|
||||
|
||||
if decoder.structCodec != nil {
|
||||
codec = decoder.structCodec
|
||||
structValue = v
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice {
|
||||
if l.m == nil {
|
||||
l.m = make(map[string]int)
|
||||
}
|
||||
sliceIndex = l.m[p.Name]
|
||||
l.m[p.Name] = sliceIndex + 1
|
||||
for v.Len() <= sliceIndex {
|
||||
v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
|
||||
}
|
||||
structValue = v.Index(sliceIndex)
|
||||
requireSlice = false
|
||||
}
|
||||
}
|
||||
|
||||
var slice reflect.Value
|
||||
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
|
||||
slice = v
|
||||
v = reflect.New(v.Type().Elem()).Elem()
|
||||
} else if requireSlice {
|
||||
return "multiple-valued property requires a slice field type"
|
||||
}
|
||||
|
||||
// Convert indexValues to a Go value with a meaning derived from the
|
||||
// destination type.
|
||||
pValue := p.Value
|
||||
if iv, ok := pValue.(indexValue); ok {
|
||||
meaning := pb.Property_NO_MEANING
|
||||
switch v.Type() {
|
||||
case typeOfBlobKey:
|
||||
meaning = pb.Property_BLOBKEY
|
||||
case typeOfByteSlice:
|
||||
meaning = pb.Property_BLOB
|
||||
case typeOfByteString:
|
||||
meaning = pb.Property_BYTESTRING
|
||||
case typeOfGeoPoint:
|
||||
meaning = pb.Property_GEORSS_POINT
|
||||
case typeOfTime:
|
||||
meaning = pb.Property_GD_WHEN
|
||||
case typeOfEntityPtr:
|
||||
meaning = pb.Property_ENTITY_PROTO
|
||||
}
|
||||
var err error
|
||||
pValue, err = propValue(iv.value, meaning)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
}
|
||||
|
||||
if errReason := setVal(v, pValue); errReason != "" {
|
||||
// Set the slice back to its zero value.
|
||||
if slice.IsValid() {
|
||||
slice.Set(reflect.Zero(slice.Type()))
|
||||
}
|
||||
return errReason
|
||||
}
|
||||
|
||||
if slice.IsValid() {
|
||||
slice.Index(sliceIndex).Set(v)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// setVal sets v to the value pValue.
|
||||
func setVal(v reflect.Value, pValue interface{}) string {
|
||||
switch v.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x, ok := pValue.(int64)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(pValue, v)
|
||||
}
|
||||
if v.OverflowInt(x) {
|
||||
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
|
||||
}
|
||||
v.SetInt(x)
|
||||
case reflect.Bool:
|
||||
x, ok := pValue.(bool)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(pValue, v)
|
||||
}
|
||||
v.SetBool(x)
|
||||
case reflect.String:
|
||||
switch x := pValue.(type) {
|
||||
case appengine.BlobKey:
|
||||
v.SetString(string(x))
|
||||
case ByteString:
|
||||
v.SetString(string(x))
|
||||
case string:
|
||||
v.SetString(x)
|
||||
default:
|
||||
if pValue != nil {
|
||||
return typeMismatchReason(pValue, v)
|
||||
}
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
x, ok := pValue.(float64)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(pValue, v)
|
||||
}
|
||||
if v.OverflowFloat(x) {
|
||||
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
|
||||
}
|
||||
v.SetFloat(x)
|
||||
case reflect.Ptr:
|
||||
x, ok := pValue.(*Key)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(pValue, v)
|
||||
}
|
||||
if _, ok := v.Interface().(*Key); !ok {
|
||||
return typeMismatchReason(pValue, v)
|
||||
}
|
||||
v.Set(reflect.ValueOf(x))
|
||||
case reflect.Struct:
|
||||
switch v.Type() {
|
||||
case typeOfTime:
|
||||
x, ok := pValue.(time.Time)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(pValue, v)
|
||||
}
|
||||
v.Set(reflect.ValueOf(x))
|
||||
case typeOfGeoPoint:
|
||||
x, ok := pValue.(appengine.GeoPoint)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(pValue, v)
|
||||
}
|
||||
v.Set(reflect.ValueOf(x))
|
||||
default:
|
||||
ent, ok := pValue.(*Entity)
|
||||
if !ok {
|
||||
return typeMismatchReason(pValue, v)
|
||||
}
|
||||
|
||||
// Recursively load nested struct
|
||||
pls, err := newStructPLS(v.Addr().Interface())
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
// if ent has a Key value and our struct has a Key field,
|
||||
// load the Entity's Key value into the Key field on the struct.
|
||||
if ent.Key != nil && pls.codec.keyField != -1 {
|
||||
|
||||
pls.v.Field(pls.codec.keyField).Set(reflect.ValueOf(ent.Key))
|
||||
}
|
||||
|
||||
err = pls.Load(ent.Properties)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
x, ok := pValue.([]byte)
|
||||
if !ok {
|
||||
if y, yok := pValue.(ByteString); yok {
|
||||
x, ok = []byte(y), true
|
||||
}
|
||||
}
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(pValue, v)
|
||||
}
|
||||
if v.Type().Elem().Kind() != reflect.Uint8 {
|
||||
return typeMismatchReason(pValue, v)
|
||||
}
|
||||
v.SetBytes(x)
|
||||
default:
|
||||
return typeMismatchReason(pValue, v)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// initField is similar to reflect's Value.FieldByIndex, in that it
|
||||
// returns the nested struct field corresponding to index, but it
|
||||
// initialises any nil pointers encountered when traversing the structure.
|
||||
func initField(val reflect.Value, index []int) reflect.Value {
|
||||
for _, i := range index[:len(index)-1] {
|
||||
val = val.Field(i)
|
||||
if val.Kind() == reflect.Ptr {
|
||||
if val.IsNil() {
|
||||
val.Set(reflect.New(val.Type().Elem()))
|
||||
}
|
||||
val = val.Elem()
|
||||
}
|
||||
}
|
||||
return val.Field(index[len(index)-1])
|
||||
}
|
||||
|
||||
// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.
|
||||
func loadEntity(dst interface{}, src *pb.EntityProto) (err error) {
|
||||
ent, err := protoToEntity(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e, ok := dst.(PropertyLoadSaver); ok {
|
||||
return e.Load(ent.Properties)
|
||||
}
|
||||
return LoadStruct(dst, ent.Properties)
|
||||
}
|
||||
|
||||
func (s structPLS) Load(props []Property) error {
|
||||
var fieldName, reason string
|
||||
var l propertyLoader
|
||||
for _, p := range props {
|
||||
if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" {
|
||||
// We don't return early, as we try to load as many properties as possible.
|
||||
// It is valid to load an entity into a struct that cannot fully represent it.
|
||||
// That case returns an error, but the caller is free to ignore it.
|
||||
fieldName, reason = p.Name, errStr
|
||||
}
|
||||
}
|
||||
if reason != "" {
|
||||
return &ErrFieldMismatch{
|
||||
StructType: s.v.Type(),
|
||||
FieldName: fieldName,
|
||||
Reason: reason,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func protoToEntity(src *pb.EntityProto) (*Entity, error) {
|
||||
props, rawProps := src.Property, src.RawProperty
|
||||
outProps := make([]Property, 0, len(props)+len(rawProps))
|
||||
for {
|
||||
var (
|
||||
x *pb.Property
|
||||
noIndex bool
|
||||
)
|
||||
if len(props) > 0 {
|
||||
x, props = props[0], props[1:]
|
||||
} else if len(rawProps) > 0 {
|
||||
x, rawProps = rawProps[0], rawProps[1:]
|
||||
noIndex = true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
var value interface{}
|
||||
if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE {
|
||||
value = indexValue{x.Value}
|
||||
} else {
|
||||
var err error
|
||||
value, err = propValue(x.Value, x.GetMeaning())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
outProps = append(outProps, Property{
|
||||
Name: x.GetName(),
|
||||
Value: value,
|
||||
NoIndex: noIndex,
|
||||
Multiple: x.GetMultiple(),
|
||||
})
|
||||
}
|
||||
|
||||
var key *Key
|
||||
if src.Key != nil {
|
||||
// Ignore any error, since nested entity values
|
||||
// are allowed to have an invalid key.
|
||||
key, _ = protoToKey(src.Key)
|
||||
}
|
||||
return &Entity{key, outProps}, nil
|
||||
}
|
||||
|
||||
// propValue returns a Go value that combines the raw PropertyValue with a
|
||||
// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time.
|
||||
func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) {
|
||||
switch {
|
||||
case v.Int64Value != nil:
|
||||
if m == pb.Property_GD_WHEN {
|
||||
return fromUnixMicro(*v.Int64Value), nil
|
||||
} else {
|
||||
return *v.Int64Value, nil
|
||||
}
|
||||
case v.BooleanValue != nil:
|
||||
return *v.BooleanValue, nil
|
||||
case v.StringValue != nil:
|
||||
if m == pb.Property_BLOB {
|
||||
return []byte(*v.StringValue), nil
|
||||
} else if m == pb.Property_BLOBKEY {
|
||||
return appengine.BlobKey(*v.StringValue), nil
|
||||
} else if m == pb.Property_BYTESTRING {
|
||||
return ByteString(*v.StringValue), nil
|
||||
} else if m == pb.Property_ENTITY_PROTO {
|
||||
var ent pb.EntityProto
|
||||
err := proto.Unmarshal([]byte(*v.StringValue), &ent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return protoToEntity(&ent)
|
||||
} else {
|
||||
return *v.StringValue, nil
|
||||
}
|
||||
case v.DoubleValue != nil:
|
||||
return *v.DoubleValue, nil
|
||||
case v.Referencevalue != nil:
|
||||
key, err := referenceValueToKey(v.Referencevalue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
case v.Pointvalue != nil:
|
||||
// NOTE: Strangely, latitude maps to X, longitude to Y.
|
||||
return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// indexValue is a Property value that is created when entities are loaded from
|
||||
// an index, such as from a projection query.
|
||||
//
|
||||
// Such Property values do not contain all of the metadata required to be
|
||||
// faithfully represented as a Go value, and are instead represented as an
|
||||
// opaque indexValue. Load the properties into a concrete struct type (e.g. by
|
||||
// passing a struct pointer to Iterator.Next) to reconstruct actual Go values
|
||||
// of type int, string, time.Time, etc.
|
||||
type indexValue struct {
|
||||
value *pb.PropertyValue
|
||||
}
|
78
vendor/google.golang.org/appengine/datastore/metadata.go
generated
vendored
Normal file
78
vendor/google.golang.org/appengine/datastore/metadata.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package datastore
|
||||
|
||||
import "golang.org/x/net/context"
|
||||
|
||||
// Datastore kinds for the metadata entities.
|
||||
const (
|
||||
namespaceKind = "__namespace__"
|
||||
kindKind = "__kind__"
|
||||
propertyKind = "__property__"
|
||||
)
|
||||
|
||||
// Namespaces returns all the datastore namespaces.
|
||||
func Namespaces(ctx context.Context) ([]string, error) {
|
||||
// TODO(djd): Support range queries.
|
||||
q := NewQuery(namespaceKind).KeysOnly()
|
||||
keys, err := q.GetAll(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The empty namespace key uses a numeric ID (==1), but luckily
|
||||
// the string ID defaults to "" for numeric IDs anyway.
|
||||
return keyNames(keys), nil
|
||||
}
|
||||
|
||||
// Kinds returns the names of all the kinds in the current namespace.
|
||||
func Kinds(ctx context.Context) ([]string, error) {
|
||||
// TODO(djd): Support range queries.
|
||||
q := NewQuery(kindKind).KeysOnly()
|
||||
keys, err := q.GetAll(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return keyNames(keys), nil
|
||||
}
|
||||
|
||||
// keyNames returns a slice of the provided keys' names (string IDs).
|
||||
func keyNames(keys []*Key) []string {
|
||||
n := make([]string, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
n = append(n, k.StringID())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// KindProperties returns all the indexed properties for the given kind.
|
||||
// The properties are returned as a map of property names to a slice of the
|
||||
// representation types. The representation types for the supported Go property
|
||||
// types are:
|
||||
// "INT64": signed integers and time.Time
|
||||
// "DOUBLE": float32 and float64
|
||||
// "BOOLEAN": bool
|
||||
// "STRING": string, []byte and ByteString
|
||||
// "POINT": appengine.GeoPoint
|
||||
// "REFERENCE": *Key
|
||||
// "USER": (not used in the Go runtime)
|
||||
func KindProperties(ctx context.Context, kind string) (map[string][]string, error) {
|
||||
// TODO(djd): Support range queries.
|
||||
kindKey := NewKey(ctx, kindKind, kind, 0, nil)
|
||||
q := NewQuery(propertyKind).Ancestor(kindKey)
|
||||
|
||||
propMap := map[string][]string{}
|
||||
props := []struct {
|
||||
Repr []string `datastore:"property_representation"`
|
||||
}{}
|
||||
|
||||
keys, err := q.GetAll(ctx, &props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, p := range props {
|
||||
propMap[keys[i].StringID()] = p.Repr
|
||||
}
|
||||
return propMap, nil
|
||||
}
|
330
vendor/google.golang.org/appengine/datastore/prop.go
generated
vendored
Normal file
330
vendor/google.golang.org/appengine/datastore/prop.go
generated
vendored
Normal file
@ -0,0 +1,330 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Entities with more than this many indexed properties will not be saved.
|
||||
const maxIndexedProperties = 20000
|
||||
|
||||
// []byte fields more than 1 megabyte long will not be loaded or saved.
|
||||
const maxBlobLen = 1 << 20
|
||||
|
||||
// Property is a name/value pair plus some metadata. A datastore entity's
|
||||
// contents are loaded and saved as a sequence of Properties. An entity can
|
||||
// have multiple Properties with the same name, provided that p.Multiple is
|
||||
// true on all of that entity's Properties with that name.
|
||||
type Property struct {
|
||||
// Name is the property name.
|
||||
Name string
|
||||
// Value is the property value. The valid types are:
|
||||
// - int64
|
||||
// - bool
|
||||
// - string
|
||||
// - float64
|
||||
// - ByteString
|
||||
// - *Key
|
||||
// - time.Time
|
||||
// - appengine.BlobKey
|
||||
// - appengine.GeoPoint
|
||||
// - []byte (up to 1 megabyte in length)
|
||||
// - *Entity (representing a nested struct)
|
||||
// This set is smaller than the set of valid struct field types that the
|
||||
// datastore can load and save. A Property Value cannot be a slice (apart
|
||||
// from []byte); use multiple Properties instead. Also, a Value's type
|
||||
// must be explicitly on the list above; it is not sufficient for the
|
||||
// underlying type to be on that list. For example, a Value of "type
|
||||
// myInt64 int64" is invalid. Smaller-width integers and floats are also
|
||||
// invalid. Again, this is more restrictive than the set of valid struct
|
||||
// field types.
|
||||
//
|
||||
// A Value will have an opaque type when loading entities from an index,
|
||||
// such as via a projection query. Load entities into a struct instead
|
||||
// of a PropertyLoadSaver when using a projection query.
|
||||
//
|
||||
// A Value may also be the nil interface value; this is equivalent to
|
||||
// Python's None but not directly representable by a Go struct. Loading
|
||||
// a nil-valued property into a struct will set that field to the zero
|
||||
// value.
|
||||
Value interface{}
|
||||
// NoIndex is whether the datastore cannot index this property.
|
||||
NoIndex bool
|
||||
// Multiple is whether the entity can have multiple properties with
|
||||
// the same name. Even if a particular instance only has one property with
|
||||
// a certain name, Multiple should be true if a struct would best represent
|
||||
// it as a field of type []T instead of type T.
|
||||
Multiple bool
|
||||
}
|
||||
|
||||
// An Entity is the value type for a nested struct.
|
||||
// This type is only used for a Property's Value.
|
||||
type Entity struct {
|
||||
Key *Key
|
||||
Properties []Property
|
||||
}
|
||||
|
||||
// ByteString is a short byte slice (up to 1500 bytes) that can be indexed.
|
||||
type ByteString []byte
|
||||
|
||||
// PropertyLoadSaver can be converted from and to a slice of Properties.
|
||||
type PropertyLoadSaver interface {
|
||||
Load([]Property) error
|
||||
Save() ([]Property, error)
|
||||
}
|
||||
|
||||
// PropertyList converts a []Property to implement PropertyLoadSaver.
|
||||
type PropertyList []Property
|
||||
|
||||
var (
|
||||
typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
|
||||
typeOfPropertyList = reflect.TypeOf(PropertyList(nil))
|
||||
)
|
||||
|
||||
// Load loads all of the provided properties into l.
|
||||
// It does not first reset *l to an empty slice.
|
||||
func (l *PropertyList) Load(p []Property) error {
|
||||
*l = append(*l, p...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save saves all of l's properties as a slice or Properties.
|
||||
func (l *PropertyList) Save() ([]Property, error) {
|
||||
return *l, nil
|
||||
}
|
||||
|
||||
// validPropertyName returns whether name consists of one or more valid Go
|
||||
// identifiers joined by ".".
|
||||
func validPropertyName(name string) bool {
|
||||
if name == "" {
|
||||
return false
|
||||
}
|
||||
for _, s := range strings.Split(name, ".") {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
first := true
|
||||
for _, c := range s {
|
||||
if first {
|
||||
first = false
|
||||
if c != '_' && !unicode.IsLetter(c) {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// structCodec describes how to convert a struct to and from a sequence of
|
||||
// properties.
|
||||
type structCodec struct {
|
||||
// fields gives the field codec for the structTag with the given name.
|
||||
fields map[string]fieldCodec
|
||||
// hasSlice is whether a struct or any of its nested or embedded structs
|
||||
// has a slice-typed field (other than []byte).
|
||||
hasSlice bool
|
||||
// keyField is the index of a *Key field with structTag __key__.
|
||||
// This field is not relevant for the top level struct, only for
|
||||
// nested structs.
|
||||
keyField int
|
||||
// complete is whether the structCodec is complete. An incomplete
|
||||
// structCodec may be encountered when walking a recursive struct.
|
||||
complete bool
|
||||
}
|
||||
|
||||
// fieldCodec is a struct field's index and, if that struct field's type is
|
||||
// itself a struct, that substruct's structCodec.
|
||||
type fieldCodec struct {
|
||||
// path is the index path to the field
|
||||
path []int
|
||||
noIndex bool
|
||||
// omitEmpty indicates that the field should be omitted on save
|
||||
// if empty.
|
||||
omitEmpty bool
|
||||
// structCodec is the codec fot the struct field at index 'path',
|
||||
// or nil if the field is not a struct.
|
||||
structCodec *structCodec
|
||||
}
|
||||
|
||||
// structCodecs collects the structCodecs that have already been calculated.
|
||||
var (
|
||||
structCodecsMutex sync.Mutex
|
||||
structCodecs = make(map[reflect.Type]*structCodec)
|
||||
)
|
||||
|
||||
// getStructCodec returns the structCodec for the given struct type.
|
||||
func getStructCodec(t reflect.Type) (*structCodec, error) {
|
||||
structCodecsMutex.Lock()
|
||||
defer structCodecsMutex.Unlock()
|
||||
return getStructCodecLocked(t)
|
||||
}
|
||||
|
||||
// getStructCodecLocked implements getStructCodec. The structCodecsMutex must
|
||||
// be held when calling this function.
|
||||
func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {
|
||||
c, ok := structCodecs[t]
|
||||
if ok {
|
||||
return c, nil
|
||||
}
|
||||
c = &structCodec{
|
||||
fields: make(map[string]fieldCodec),
|
||||
// We initialize keyField to -1 so that the zero-value is not
|
||||
// misinterpreted as index 0.
|
||||
keyField: -1,
|
||||
}
|
||||
|
||||
// Add c to the structCodecs map before we are sure it is good. If t is
|
||||
// a recursive type, it needs to find the incomplete entry for itself in
|
||||
// the map.
|
||||
structCodecs[t] = c
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
delete(structCodecs, t)
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
// Skip unexported fields.
|
||||
// Note that if f is an anonymous, unexported struct field,
|
||||
// we will promote its fields.
|
||||
if f.PkgPath != "" && !f.Anonymous {
|
||||
continue
|
||||
}
|
||||
|
||||
tags := strings.Split(f.Tag.Get("datastore"), ",")
|
||||
name := tags[0]
|
||||
opts := make(map[string]bool)
|
||||
for _, t := range tags[1:] {
|
||||
opts[t] = true
|
||||
}
|
||||
switch {
|
||||
case name == "":
|
||||
if !f.Anonymous {
|
||||
name = f.Name
|
||||
}
|
||||
case name == "-":
|
||||
continue
|
||||
case name == "__key__":
|
||||
if f.Type != typeOfKeyPtr {
|
||||
return nil, fmt.Errorf("datastore: __key__ field on struct %v is not a *datastore.Key", t)
|
||||
}
|
||||
c.keyField = i
|
||||
case !validPropertyName(name):
|
||||
return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name)
|
||||
}
|
||||
|
||||
substructType, fIsSlice := reflect.Type(nil), false
|
||||
switch f.Type.Kind() {
|
||||
case reflect.Struct:
|
||||
substructType = f.Type
|
||||
case reflect.Slice:
|
||||
if f.Type.Elem().Kind() == reflect.Struct {
|
||||
substructType = f.Type.Elem()
|
||||
}
|
||||
fIsSlice = f.Type != typeOfByteSlice
|
||||
c.hasSlice = c.hasSlice || fIsSlice
|
||||
}
|
||||
|
||||
var sub *structCodec
|
||||
if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint {
|
||||
var err error
|
||||
sub, err = getStructCodecLocked(substructType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !sub.complete {
|
||||
return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name)
|
||||
}
|
||||
if fIsSlice && sub.hasSlice {
|
||||
return nil, fmt.Errorf(
|
||||
"datastore: flattening nested structs leads to a slice of slices: field %q", f.Name)
|
||||
}
|
||||
c.hasSlice = c.hasSlice || sub.hasSlice
|
||||
// If f is an anonymous struct field, we promote the substruct's fields up to this level
|
||||
// in the linked list of struct codecs.
|
||||
if f.Anonymous {
|
||||
for subname, subfield := range sub.fields {
|
||||
if name != "" {
|
||||
subname = name + "." + subname
|
||||
}
|
||||
if _, ok := c.fields[subname]; ok {
|
||||
return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", subname)
|
||||
}
|
||||
c.fields[subname] = fieldCodec{
|
||||
path: append([]int{i}, subfield.path...),
|
||||
noIndex: subfield.noIndex || opts["noindex"],
|
||||
omitEmpty: subfield.omitEmpty,
|
||||
structCodec: subfield.structCodec,
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := c.fields[name]; ok {
|
||||
return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name)
|
||||
}
|
||||
c.fields[name] = fieldCodec{
|
||||
path: []int{i},
|
||||
noIndex: opts["noindex"],
|
||||
omitEmpty: opts["omitempty"],
|
||||
structCodec: sub,
|
||||
}
|
||||
}
|
||||
c.complete = true
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// structPLS adapts a struct to be a PropertyLoadSaver.
|
||||
type structPLS struct {
|
||||
v reflect.Value
|
||||
codec *structCodec
|
||||
}
|
||||
|
||||
// newStructPLS returns a structPLS, which implements the
|
||||
// PropertyLoadSaver interface, for the struct pointer p.
|
||||
func newStructPLS(p interface{}) (*structPLS, error) {
|
||||
v := reflect.ValueOf(p)
|
||||
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
|
||||
return nil, ErrInvalidEntityType
|
||||
}
|
||||
v = v.Elem()
|
||||
codec, err := getStructCodec(v.Type())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &structPLS{v, codec}, nil
|
||||
}
|
||||
|
||||
// LoadStruct loads the properties from p to dst.
|
||||
// dst must be a struct pointer.
|
||||
func LoadStruct(dst interface{}, p []Property) error {
|
||||
x, err := newStructPLS(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return x.Load(p)
|
||||
}
|
||||
|
||||
// SaveStruct returns the properties from src as a slice of Properties.
|
||||
// src must be a struct pointer.
|
||||
func SaveStruct(src interface{}) ([]Property, error) {
|
||||
x, err := newStructPLS(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x.Save()
|
||||
}
|
774
vendor/google.golang.org/appengine/datastore/query.go
generated
vendored
Normal file
774
vendor/google.golang.org/appengine/datastore/query.go
generated
vendored
Normal file
@ -0,0 +1,774 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/appengine/internal"
|
||||
pb "google.golang.org/appengine/internal/datastore"
|
||||
)
|
||||
|
||||
type operator int
|
||||
|
||||
const (
|
||||
lessThan operator = iota
|
||||
lessEq
|
||||
equal
|
||||
greaterEq
|
||||
greaterThan
|
||||
)
|
||||
|
||||
var operatorToProto = map[operator]*pb.Query_Filter_Operator{
|
||||
lessThan: pb.Query_Filter_LESS_THAN.Enum(),
|
||||
lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(),
|
||||
equal: pb.Query_Filter_EQUAL.Enum(),
|
||||
greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(),
|
||||
greaterThan: pb.Query_Filter_GREATER_THAN.Enum(),
|
||||
}
|
||||
|
||||
// filter is a conditional filter on query results.
|
||||
type filter struct {
|
||||
FieldName string
|
||||
Op operator
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
type sortDirection int
|
||||
|
||||
const (
|
||||
ascending sortDirection = iota
|
||||
descending
|
||||
)
|
||||
|
||||
var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{
|
||||
ascending: pb.Query_Order_ASCENDING.Enum(),
|
||||
descending: pb.Query_Order_DESCENDING.Enum(),
|
||||
}
|
||||
|
||||
// order is a sort order on query results.
|
||||
type order struct {
|
||||
FieldName string
|
||||
Direction sortDirection
|
||||
}
|
||||
|
||||
// NewQuery creates a new Query for a specific entity kind.
|
||||
//
|
||||
// An empty kind means to return all entities, including entities created and
|
||||
// managed by other App Engine features, and is called a kindless query.
|
||||
// Kindless queries cannot include filters or sort orders on property values.
|
||||
func NewQuery(kind string) *Query {
|
||||
return &Query{
|
||||
kind: kind,
|
||||
limit: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// Query represents a datastore query.
|
||||
type Query struct {
|
||||
kind string
|
||||
ancestor *Key
|
||||
filter []filter
|
||||
order []order
|
||||
projection []string
|
||||
|
||||
distinct bool
|
||||
distinctOn []string
|
||||
keysOnly bool
|
||||
eventual bool
|
||||
limit int32
|
||||
offset int32
|
||||
count int32
|
||||
start *pb.CompiledCursor
|
||||
end *pb.CompiledCursor
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (q *Query) clone() *Query {
|
||||
x := *q
|
||||
// Copy the contents of the slice-typed fields to a new backing store.
|
||||
if len(q.filter) > 0 {
|
||||
x.filter = make([]filter, len(q.filter))
|
||||
copy(x.filter, q.filter)
|
||||
}
|
||||
if len(q.order) > 0 {
|
||||
x.order = make([]order, len(q.order))
|
||||
copy(x.order, q.order)
|
||||
}
|
||||
return &x
|
||||
}
|
||||
|
||||
// Ancestor returns a derivative query with an ancestor filter.
|
||||
// The ancestor should not be nil.
|
||||
func (q *Query) Ancestor(ancestor *Key) *Query {
|
||||
q = q.clone()
|
||||
if ancestor == nil {
|
||||
q.err = errors.New("datastore: nil query ancestor")
|
||||
return q
|
||||
}
|
||||
q.ancestor = ancestor
|
||||
return q
|
||||
}
|
||||
|
||||
// EventualConsistency returns a derivative query that returns eventually
|
||||
// consistent results.
|
||||
// It only has an effect on ancestor queries.
|
||||
func (q *Query) EventualConsistency() *Query {
|
||||
q = q.clone()
|
||||
q.eventual = true
|
||||
return q
|
||||
}
|
||||
|
||||
// Filter returns a derivative query with a field-based filter.
|
||||
// The filterStr argument must be a field name followed by optional space,
|
||||
// followed by an operator, one of ">", "<", ">=", "<=", or "=".
|
||||
// Fields are compared against the provided value using the operator.
|
||||
// Multiple filters are AND'ed together.
|
||||
func (q *Query) Filter(filterStr string, value interface{}) *Query {
|
||||
q = q.clone()
|
||||
filterStr = strings.TrimSpace(filterStr)
|
||||
if len(filterStr) < 1 {
|
||||
q.err = errors.New("datastore: invalid filter: " + filterStr)
|
||||
return q
|
||||
}
|
||||
f := filter{
|
||||
FieldName: strings.TrimRight(filterStr, " ><=!"),
|
||||
Value: value,
|
||||
}
|
||||
switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
|
||||
case "<=":
|
||||
f.Op = lessEq
|
||||
case ">=":
|
||||
f.Op = greaterEq
|
||||
case "<":
|
||||
f.Op = lessThan
|
||||
case ">":
|
||||
f.Op = greaterThan
|
||||
case "=":
|
||||
f.Op = equal
|
||||
default:
|
||||
q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
|
||||
return q
|
||||
}
|
||||
q.filter = append(q.filter, f)
|
||||
return q
|
||||
}
|
||||
|
||||
// Order returns a derivative query with a field-based sort order. Orders are
|
||||
// applied in the order they are added. The default order is ascending; to sort
|
||||
// in descending order prefix the fieldName with a minus sign (-).
|
||||
func (q *Query) Order(fieldName string) *Query {
|
||||
q = q.clone()
|
||||
fieldName = strings.TrimSpace(fieldName)
|
||||
o := order{
|
||||
Direction: ascending,
|
||||
FieldName: fieldName,
|
||||
}
|
||||
if strings.HasPrefix(fieldName, "-") {
|
||||
o.Direction = descending
|
||||
o.FieldName = strings.TrimSpace(fieldName[1:])
|
||||
} else if strings.HasPrefix(fieldName, "+") {
|
||||
q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
|
||||
return q
|
||||
}
|
||||
if len(o.FieldName) == 0 {
|
||||
q.err = errors.New("datastore: empty order")
|
||||
return q
|
||||
}
|
||||
q.order = append(q.order, o)
|
||||
return q
|
||||
}
|
||||
|
||||
// Project returns a derivative query that yields only the given fields. It
|
||||
// cannot be used with KeysOnly.
|
||||
func (q *Query) Project(fieldNames ...string) *Query {
|
||||
q = q.clone()
|
||||
q.projection = append([]string(nil), fieldNames...)
|
||||
return q
|
||||
}
|
||||
|
||||
// Distinct returns a derivative query that yields de-duplicated entities with
|
||||
// respect to the set of projected fields. It is only used for projection
|
||||
// queries. Distinct cannot be used with DistinctOn.
|
||||
func (q *Query) Distinct() *Query {
|
||||
q = q.clone()
|
||||
q.distinct = true
|
||||
return q
|
||||
}
|
||||
|
||||
// DistinctOn returns a derivative query that yields de-duplicated entities with
|
||||
// respect to the set of the specified fields. It is only used for projection
|
||||
// queries. The field list should be a subset of the projected field list.
|
||||
// DistinctOn cannot be used with Distinct.
|
||||
func (q *Query) DistinctOn(fieldNames ...string) *Query {
|
||||
q = q.clone()
|
||||
q.distinctOn = fieldNames
|
||||
return q
|
||||
}
|
||||
|
||||
// KeysOnly returns a derivative query that yields only keys, not keys and
|
||||
// entities. It cannot be used with projection queries.
|
||||
func (q *Query) KeysOnly() *Query {
|
||||
q = q.clone()
|
||||
q.keysOnly = true
|
||||
return q
|
||||
}
|
||||
|
||||
// Limit returns a derivative query that has a limit on the number of results
|
||||
// returned. A negative value means unlimited.
|
||||
func (q *Query) Limit(limit int) *Query {
|
||||
q = q.clone()
|
||||
if limit < math.MinInt32 || limit > math.MaxInt32 {
|
||||
q.err = errors.New("datastore: query limit overflow")
|
||||
return q
|
||||
}
|
||||
q.limit = int32(limit)
|
||||
return q
|
||||
}
|
||||
|
||||
// Offset returns a derivative query that has an offset of how many keys to
|
||||
// skip over before returning results. A negative value is invalid.
|
||||
func (q *Query) Offset(offset int) *Query {
|
||||
q = q.clone()
|
||||
if offset < 0 {
|
||||
q.err = errors.New("datastore: negative query offset")
|
||||
return q
|
||||
}
|
||||
if offset > math.MaxInt32 {
|
||||
q.err = errors.New("datastore: query offset overflow")
|
||||
return q
|
||||
}
|
||||
q.offset = int32(offset)
|
||||
return q
|
||||
}
|
||||
|
||||
// BatchSize returns a derivative query to fetch the supplied number of results
|
||||
// at once. This value should be greater than zero, and equal to or less than
|
||||
// the Limit.
|
||||
func (q *Query) BatchSize(size int) *Query {
|
||||
q = q.clone()
|
||||
if size <= 0 || size > math.MaxInt32 {
|
||||
q.err = errors.New("datastore: query batch size overflow")
|
||||
return q
|
||||
}
|
||||
q.count = int32(size)
|
||||
return q
|
||||
}
|
||||
|
||||
// Start returns a derivative query with the given start point.
|
||||
func (q *Query) Start(c Cursor) *Query {
|
||||
q = q.clone()
|
||||
if c.cc == nil {
|
||||
q.err = errors.New("datastore: invalid cursor")
|
||||
return q
|
||||
}
|
||||
q.start = c.cc
|
||||
return q
|
||||
}
|
||||
|
||||
// End returns a derivative query with the given end point.
|
||||
func (q *Query) End(c Cursor) *Query {
|
||||
q = q.clone()
|
||||
if c.cc == nil {
|
||||
q.err = errors.New("datastore: invalid cursor")
|
||||
return q
|
||||
}
|
||||
q.end = c.cc
|
||||
return q
|
||||
}
|
||||
|
||||
// toProto converts the query to a protocol buffer.
|
||||
func (q *Query) toProto(dst *pb.Query, appID string) error {
|
||||
if len(q.projection) != 0 && q.keysOnly {
|
||||
return errors.New("datastore: query cannot both project and be keys-only")
|
||||
}
|
||||
if len(q.distinctOn) != 0 && q.distinct {
|
||||
return errors.New("datastore: query cannot be both distinct and distinct-on")
|
||||
}
|
||||
dst.Reset()
|
||||
dst.App = proto.String(appID)
|
||||
if q.kind != "" {
|
||||
dst.Kind = proto.String(q.kind)
|
||||
}
|
||||
if q.ancestor != nil {
|
||||
dst.Ancestor = keyToProto(appID, q.ancestor)
|
||||
if q.eventual {
|
||||
dst.Strong = proto.Bool(false)
|
||||
}
|
||||
}
|
||||
if q.projection != nil {
|
||||
dst.PropertyName = q.projection
|
||||
if len(q.distinctOn) != 0 {
|
||||
dst.GroupByPropertyName = q.distinctOn
|
||||
}
|
||||
if q.distinct {
|
||||
dst.GroupByPropertyName = q.projection
|
||||
}
|
||||
}
|
||||
if q.keysOnly {
|
||||
dst.KeysOnly = proto.Bool(true)
|
||||
dst.RequirePerfectPlan = proto.Bool(true)
|
||||
}
|
||||
for _, qf := range q.filter {
|
||||
if qf.FieldName == "" {
|
||||
return errors.New("datastore: empty query filter field name")
|
||||
}
|
||||
p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false)
|
||||
if errStr != "" {
|
||||
return errors.New("datastore: bad query filter value type: " + errStr)
|
||||
}
|
||||
xf := &pb.Query_Filter{
|
||||
Op: operatorToProto[qf.Op],
|
||||
Property: []*pb.Property{p},
|
||||
}
|
||||
if xf.Op == nil {
|
||||
return errors.New("datastore: unknown query filter operator")
|
||||
}
|
||||
dst.Filter = append(dst.Filter, xf)
|
||||
}
|
||||
for _, qo := range q.order {
|
||||
if qo.FieldName == "" {
|
||||
return errors.New("datastore: empty query order field name")
|
||||
}
|
||||
xo := &pb.Query_Order{
|
||||
Property: proto.String(qo.FieldName),
|
||||
Direction: sortDirectionToProto[qo.Direction],
|
||||
}
|
||||
if xo.Direction == nil {
|
||||
return errors.New("datastore: unknown query order direction")
|
||||
}
|
||||
dst.Order = append(dst.Order, xo)
|
||||
}
|
||||
if q.limit >= 0 {
|
||||
dst.Limit = proto.Int32(q.limit)
|
||||
}
|
||||
if q.offset != 0 {
|
||||
dst.Offset = proto.Int32(q.offset)
|
||||
}
|
||||
if q.count != 0 {
|
||||
dst.Count = proto.Int32(q.count)
|
||||
}
|
||||
dst.CompiledCursor = q.start
|
||||
dst.EndCompiledCursor = q.end
|
||||
dst.Compile = proto.Bool(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Count returns the number of results for the query.
|
||||
//
|
||||
// The running time and number of API calls made by Count scale linearly with
|
||||
// the sum of the query's offset and limit. Unless the result count is
|
||||
// expected to be small, it is best to specify a limit; otherwise Count will
|
||||
// continue until it finishes counting or the provided context expires.
|
||||
func (q *Query) Count(c context.Context) (int, error) {
|
||||
// Check that the query is well-formed.
|
||||
if q.err != nil {
|
||||
return 0, q.err
|
||||
}
|
||||
|
||||
// Run a copy of the query, with keysOnly true (if we're not a projection,
|
||||
// since the two are incompatible), and an adjusted offset. We also set the
|
||||
// limit to zero, as we don't want any actual entity data, just the number
|
||||
// of skipped results.
|
||||
newQ := q.clone()
|
||||
newQ.keysOnly = len(newQ.projection) == 0
|
||||
newQ.limit = 0
|
||||
if q.limit < 0 {
|
||||
// If the original query was unlimited, set the new query's offset to maximum.
|
||||
newQ.offset = math.MaxInt32
|
||||
} else {
|
||||
newQ.offset = q.offset + q.limit
|
||||
if newQ.offset < 0 {
|
||||
// Do the best we can, in the presence of overflow.
|
||||
newQ.offset = math.MaxInt32
|
||||
}
|
||||
}
|
||||
req := &pb.Query{}
|
||||
if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
res := &pb.QueryResult{}
|
||||
if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// n is the count we will return. For example, suppose that our original
|
||||
// query had an offset of 4 and a limit of 2008: the count will be 2008,
|
||||
// provided that there are at least 2012 matching entities. However, the
|
||||
// RPCs will only skip 1000 results at a time. The RPC sequence is:
|
||||
// call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset
|
||||
// response has (skippedResults, moreResults) = (1000, true)
|
||||
// n += 1000 // n == 1000
|
||||
// call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n
|
||||
// response has (skippedResults, moreResults) = (1000, true)
|
||||
// n += 1000 // n == 2000
|
||||
// call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n
|
||||
// response has (skippedResults, moreResults) = (12, false)
|
||||
// n += 12 // n == 2012
|
||||
// // exit the loop
|
||||
// n -= 4 // n == 2008
|
||||
var n int32
|
||||
for {
|
||||
// The QueryResult should have no actual entity data, just skipped results.
|
||||
if len(res.Result) != 0 {
|
||||
return 0, errors.New("datastore: internal error: Count request returned too much data")
|
||||
}
|
||||
n += res.GetSkippedResults()
|
||||
if !res.GetMoreResults() {
|
||||
break
|
||||
}
|
||||
if err := callNext(c, res, newQ.offset-n, q.count); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
n -= q.offset
|
||||
if n < 0 {
|
||||
// If the offset was greater than the number of matching entities,
|
||||
// return 0 instead of negative.
|
||||
n = 0
|
||||
}
|
||||
return int(n), nil
|
||||
}
|
||||
|
||||
// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that
|
||||
// returned by a query with more results.
|
||||
func callNext(c context.Context, res *pb.QueryResult, offset, count int32) error {
|
||||
if res.Cursor == nil {
|
||||
return errors.New("datastore: internal error: server did not return a cursor")
|
||||
}
|
||||
req := &pb.NextRequest{
|
||||
Cursor: res.Cursor,
|
||||
}
|
||||
if count >= 0 {
|
||||
req.Count = proto.Int32(count)
|
||||
}
|
||||
if offset != 0 {
|
||||
req.Offset = proto.Int32(offset)
|
||||
}
|
||||
if res.CompiledCursor != nil {
|
||||
req.Compile = proto.Bool(true)
|
||||
}
|
||||
res.Reset()
|
||||
return internal.Call(c, "datastore_v3", "Next", req, res)
|
||||
}
|
||||
|
||||
// GetAll runs the query in the given context and returns all keys that match
|
||||
// that query, as well as appending the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
|
||||
// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
|
||||
//
|
||||
// As a special case, *PropertyList is an invalid type for dst, even though a
|
||||
// PropertyList is a slice of structs. It is treated as invalid to avoid being
|
||||
// mistakenly passed when *[]PropertyList was intended.
|
||||
//
|
||||
// The keys returned by GetAll will be in a 1-1 correspondence with the entities
|
||||
// added to dst.
|
||||
//
|
||||
// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
|
||||
//
|
||||
// The running time and number of API calls made by GetAll scale linearly with
|
||||
// the sum of the query's offset and limit. Unless the result count is
|
||||
// expected to be small, it is best to specify a limit; otherwise GetAll will
|
||||
// continue until it finishes collecting results or the provided context
|
||||
// expires.
|
||||
func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) {
|
||||
var (
|
||||
dv reflect.Value
|
||||
mat multiArgType
|
||||
elemType reflect.Type
|
||||
errFieldMismatch error
|
||||
)
|
||||
if !q.keysOnly {
|
||||
dv = reflect.ValueOf(dst)
|
||||
if dv.Kind() != reflect.Ptr || dv.IsNil() {
|
||||
return nil, ErrInvalidEntityType
|
||||
}
|
||||
dv = dv.Elem()
|
||||
mat, elemType = checkMultiArg(dv)
|
||||
if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
|
||||
return nil, ErrInvalidEntityType
|
||||
}
|
||||
}
|
||||
|
||||
var keys []*Key
|
||||
for t := q.Run(c); ; {
|
||||
k, e, err := t.next()
|
||||
if err == Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return keys, err
|
||||
}
|
||||
if !q.keysOnly {
|
||||
ev := reflect.New(elemType)
|
||||
if elemType.Kind() == reflect.Map {
|
||||
// This is a special case. The zero values of a map type are
|
||||
// not immediately useful; they have to be make'd.
|
||||
//
|
||||
// Funcs and channels are similar, in that a zero value is not useful,
|
||||
// but even a freshly make'd channel isn't useful: there's no fixed
|
||||
// channel buffer size that is always going to be large enough, and
|
||||
// there's no goroutine to drain the other end. Theoretically, these
|
||||
// types could be supported, for example by sniffing for a constructor
|
||||
// method or requiring prior registration, but for now it's not a
|
||||
// frequent enough concern to be worth it. Programmers can work around
|
||||
// it by explicitly using Iterator.Next instead of the Query.GetAll
|
||||
// convenience method.
|
||||
x := reflect.MakeMap(elemType)
|
||||
ev.Elem().Set(x)
|
||||
}
|
||||
if err = loadEntity(ev.Interface(), e); err != nil {
|
||||
if _, ok := err.(*ErrFieldMismatch); ok {
|
||||
// We continue loading entities even in the face of field mismatch errors.
|
||||
// If we encounter any other error, that other error is returned. Otherwise,
|
||||
// an ErrFieldMismatch is returned.
|
||||
errFieldMismatch = err
|
||||
} else {
|
||||
return keys, err
|
||||
}
|
||||
}
|
||||
if mat != multiArgTypeStructPtr {
|
||||
ev = ev.Elem()
|
||||
}
|
||||
dv.Set(reflect.Append(dv, ev))
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys, errFieldMismatch
|
||||
}
|
||||
|
||||
// Run runs the query in the given context.
|
||||
func (q *Query) Run(c context.Context) *Iterator {
|
||||
if q.err != nil {
|
||||
return &Iterator{err: q.err}
|
||||
}
|
||||
t := &Iterator{
|
||||
c: c,
|
||||
limit: q.limit,
|
||||
count: q.count,
|
||||
q: q,
|
||||
prevCC: q.start,
|
||||
}
|
||||
var req pb.Query
|
||||
if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil {
|
||||
t.err = err
|
||||
return t
|
||||
}
|
||||
if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil {
|
||||
t.err = err
|
||||
return t
|
||||
}
|
||||
offset := q.offset - t.res.GetSkippedResults()
|
||||
var count int32
|
||||
if t.count > 0 && (t.limit < 0 || t.count < t.limit) {
|
||||
count = t.count
|
||||
} else {
|
||||
count = t.limit
|
||||
}
|
||||
for offset > 0 && t.res.GetMoreResults() {
|
||||
t.prevCC = t.res.CompiledCursor
|
||||
if err := callNext(t.c, &t.res, offset, count); err != nil {
|
||||
t.err = err
|
||||
break
|
||||
}
|
||||
skip := t.res.GetSkippedResults()
|
||||
if skip < 0 {
|
||||
t.err = errors.New("datastore: internal error: negative number of skipped_results")
|
||||
break
|
||||
}
|
||||
offset -= skip
|
||||
}
|
||||
if offset < 0 {
|
||||
t.err = errors.New("datastore: internal error: query offset was overshot")
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Iterator is the result of running a query.
|
||||
type Iterator struct {
|
||||
c context.Context
|
||||
err error
|
||||
// res is the result of the most recent RunQuery or Next API call.
|
||||
res pb.QueryResult
|
||||
// i is how many elements of res.Result we have iterated over.
|
||||
i int
|
||||
// limit is the limit on the number of results this iterator should return.
|
||||
// A negative value means unlimited.
|
||||
limit int32
|
||||
// count is the number of results this iterator should fetch at once. This
|
||||
// should be equal to or greater than zero.
|
||||
count int32
|
||||
// q is the original query which yielded this iterator.
|
||||
q *Query
|
||||
// prevCC is the compiled cursor that marks the end of the previous batch
|
||||
// of results.
|
||||
prevCC *pb.CompiledCursor
|
||||
}
|
||||
|
||||
// Done is returned when a query iteration has completed.
|
||||
var Done = errors.New("datastore: query has no more results")
|
||||
|
||||
// Next returns the key of the next result. When there are no more results,
|
||||
// Done is returned as the error.
|
||||
//
|
||||
// If the query is not keys only and dst is non-nil, it also loads the entity
|
||||
// stored for that key into the struct pointer or PropertyLoadSaver dst, with
|
||||
// the same semantics and possible errors as for the Get function.
|
||||
func (t *Iterator) Next(dst interface{}) (*Key, error) {
|
||||
k, e, err := t.next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dst != nil && !t.q.keysOnly {
|
||||
err = loadEntity(dst, e)
|
||||
}
|
||||
return k, err
|
||||
}
|
||||
|
||||
func (t *Iterator) next() (*Key, *pb.EntityProto, error) {
|
||||
if t.err != nil {
|
||||
return nil, nil, t.err
|
||||
}
|
||||
|
||||
// Issue datastore_v3/Next RPCs as necessary.
|
||||
for t.i == len(t.res.Result) {
|
||||
if !t.res.GetMoreResults() {
|
||||
t.err = Done
|
||||
return nil, nil, t.err
|
||||
}
|
||||
t.prevCC = t.res.CompiledCursor
|
||||
var count int32
|
||||
if t.count > 0 && (t.limit < 0 || t.count < t.limit) {
|
||||
count = t.count
|
||||
} else {
|
||||
count = t.limit
|
||||
}
|
||||
if err := callNext(t.c, &t.res, 0, count); err != nil {
|
||||
t.err = err
|
||||
return nil, nil, t.err
|
||||
}
|
||||
if t.res.GetSkippedResults() != 0 {
|
||||
t.err = errors.New("datastore: internal error: iterator has skipped results")
|
||||
return nil, nil, t.err
|
||||
}
|
||||
t.i = 0
|
||||
if t.limit >= 0 {
|
||||
t.limit -= int32(len(t.res.Result))
|
||||
if t.limit < 0 {
|
||||
t.err = errors.New("datastore: internal error: query returned more results than the limit")
|
||||
return nil, nil, t.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract the key from the t.i'th element of t.res.Result.
|
||||
e := t.res.Result[t.i]
|
||||
t.i++
|
||||
if e.Key == nil {
|
||||
return nil, nil, errors.New("datastore: internal error: server did not return a key")
|
||||
}
|
||||
k, err := protoToKey(e.Key)
|
||||
if err != nil || k.Incomplete() {
|
||||
return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
|
||||
}
|
||||
return k, e, nil
|
||||
}
|
||||
|
||||
// Cursor returns a cursor for the iterator's current location.
|
||||
func (t *Iterator) Cursor() (Cursor, error) {
|
||||
if t.err != nil && t.err != Done {
|
||||
return Cursor{}, t.err
|
||||
}
|
||||
// If we are at either end of the current batch of results,
|
||||
// return the compiled cursor at that end.
|
||||
skipped := t.res.GetSkippedResults()
|
||||
if t.i == 0 && skipped == 0 {
|
||||
if t.prevCC == nil {
|
||||
// A nil pointer (of type *pb.CompiledCursor) means no constraint:
|
||||
// passing it as the end cursor of a new query means unlimited results
|
||||
// (glossing over the integer limit parameter for now).
|
||||
// A non-nil pointer to an empty pb.CompiledCursor means the start:
|
||||
// passing it as the end cursor of a new query means 0 results.
|
||||
// If prevCC was nil, then the original query had no start cursor, but
|
||||
// Iterator.Cursor should return "the start" instead of unlimited.
|
||||
return Cursor{&zeroCC}, nil
|
||||
}
|
||||
return Cursor{t.prevCC}, nil
|
||||
}
|
||||
if t.i == len(t.res.Result) {
|
||||
return Cursor{t.res.CompiledCursor}, nil
|
||||
}
|
||||
// Otherwise, re-run the query offset to this iterator's position, starting from
|
||||
// the most recent compiled cursor. This is done on a best-effort basis, as it
|
||||
// is racy; if a concurrent process has added or removed entities, then the
|
||||
// cursor returned may be inconsistent.
|
||||
q := t.q.clone()
|
||||
q.start = t.prevCC
|
||||
q.offset = skipped + int32(t.i)
|
||||
q.limit = 0
|
||||
q.keysOnly = len(q.projection) == 0
|
||||
t1 := q.Run(t.c)
|
||||
_, _, err := t1.next()
|
||||
if err != Done {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results")
|
||||
}
|
||||
return Cursor{}, err
|
||||
}
|
||||
return Cursor{t1.res.CompiledCursor}, nil
|
||||
}
|
||||
|
||||
var zeroCC pb.CompiledCursor
|
||||
|
||||
// Cursor is an iterator's position. It can be converted to and from an opaque
|
||||
// string. A cursor can be used from different HTTP requests, but only with a
|
||||
// query with the same kind, ancestor, filter and order constraints.
|
||||
type Cursor struct {
|
||||
cc *pb.CompiledCursor
|
||||
}
|
||||
|
||||
// String returns a base-64 string representation of a cursor.
|
||||
func (c Cursor) String() string {
|
||||
if c.cc == nil {
|
||||
return ""
|
||||
}
|
||||
b, err := proto.Marshal(c.cc)
|
||||
if err != nil {
|
||||
// The only way to construct a Cursor with a non-nil cc field is to
|
||||
// unmarshal from the byte representation. We panic if the unmarshal
|
||||
// succeeds but the marshaling of the unchanged protobuf value fails.
|
||||
panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err))
|
||||
}
|
||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
|
||||
}
|
||||
|
||||
// Decode decodes a cursor from its base-64 string representation.
|
||||
func DecodeCursor(s string) (Cursor, error) {
|
||||
if s == "" {
|
||||
return Cursor{&zeroCC}, nil
|
||||
}
|
||||
if n := len(s) % 4; n != 0 {
|
||||
s += strings.Repeat("=", 4-n)
|
||||
}
|
||||
b, err := base64.URLEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return Cursor{}, err
|
||||
}
|
||||
cc := &pb.CompiledCursor{}
|
||||
if err := proto.Unmarshal(b, cc); err != nil {
|
||||
return Cursor{}, err
|
||||
}
|
||||
return Cursor{cc}, nil
|
||||
}
|
333
vendor/google.golang.org/appengine/datastore/save.go
generated
vendored
Normal file
333
vendor/google.golang.org/appengine/datastore/save.go
generated
vendored
Normal file
@ -0,0 +1,333 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"google.golang.org/appengine"
|
||||
pb "google.golang.org/appengine/internal/datastore"
|
||||
)
|
||||
|
||||
func toUnixMicro(t time.Time) int64 {
|
||||
// We cannot use t.UnixNano() / 1e3 because we want to handle times more than
|
||||
// 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
|
||||
// be represented in the numerator of a single int64 divide.
|
||||
return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
|
||||
}
|
||||
|
||||
func fromUnixMicro(t int64) time.Time {
|
||||
return time.Unix(t/1e6, (t%1e6)*1e3).UTC()
|
||||
}
|
||||
|
||||
var (
|
||||
minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
|
||||
maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
|
||||
)
|
||||
|
||||
// valueToProto converts a named value to a newly allocated Property.
|
||||
// The returned error string is empty on success.
|
||||
func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) {
|
||||
var (
|
||||
pv pb.PropertyValue
|
||||
unsupported bool
|
||||
)
|
||||
switch v.Kind() {
|
||||
case reflect.Invalid:
|
||||
// No-op.
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
pv.Int64Value = proto.Int64(v.Int())
|
||||
case reflect.Bool:
|
||||
pv.BooleanValue = proto.Bool(v.Bool())
|
||||
case reflect.String:
|
||||
pv.StringValue = proto.String(v.String())
|
||||
case reflect.Float32, reflect.Float64:
|
||||
pv.DoubleValue = proto.Float64(v.Float())
|
||||
case reflect.Ptr:
|
||||
if k, ok := v.Interface().(*Key); ok {
|
||||
if k != nil {
|
||||
pv.Referencevalue = keyToReferenceValue(defaultAppID, k)
|
||||
}
|
||||
} else {
|
||||
unsupported = true
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch t := v.Interface().(type) {
|
||||
case time.Time:
|
||||
if t.Before(minTime) || t.After(maxTime) {
|
||||
return nil, "time value out of range"
|
||||
}
|
||||
pv.Int64Value = proto.Int64(toUnixMicro(t))
|
||||
case appengine.GeoPoint:
|
||||
if !t.Valid() {
|
||||
return nil, "invalid GeoPoint value"
|
||||
}
|
||||
// NOTE: Strangely, latitude maps to X, longitude to Y.
|
||||
pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng}
|
||||
default:
|
||||
unsupported = true
|
||||
}
|
||||
case reflect.Slice:
|
||||
if b, ok := v.Interface().([]byte); ok {
|
||||
pv.StringValue = proto.String(string(b))
|
||||
} else {
|
||||
// nvToProto should already catch slice values.
|
||||
// If we get here, we have a slice of slice values.
|
||||
unsupported = true
|
||||
}
|
||||
default:
|
||||
unsupported = true
|
||||
}
|
||||
if unsupported {
|
||||
return nil, "unsupported datastore value type: " + v.Type().String()
|
||||
}
|
||||
p = &pb.Property{
|
||||
Name: proto.String(name),
|
||||
Value: &pv,
|
||||
Multiple: proto.Bool(multiple),
|
||||
}
|
||||
if v.IsValid() {
|
||||
switch v.Interface().(type) {
|
||||
case []byte:
|
||||
p.Meaning = pb.Property_BLOB.Enum()
|
||||
case ByteString:
|
||||
p.Meaning = pb.Property_BYTESTRING.Enum()
|
||||
case appengine.BlobKey:
|
||||
p.Meaning = pb.Property_BLOBKEY.Enum()
|
||||
case time.Time:
|
||||
p.Meaning = pb.Property_GD_WHEN.Enum()
|
||||
case appengine.GeoPoint:
|
||||
p.Meaning = pb.Property_GEORSS_POINT.Enum()
|
||||
}
|
||||
}
|
||||
return p, ""
|
||||
}
|
||||
|
||||
type saveOpts struct {
|
||||
noIndex bool
|
||||
multiple bool
|
||||
omitEmpty bool
|
||||
}
|
||||
|
||||
// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
|
||||
func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) {
|
||||
var err error
|
||||
var props []Property
|
||||
if e, ok := src.(PropertyLoadSaver); ok {
|
||||
props, err = e.Save()
|
||||
} else {
|
||||
props, err = SaveStruct(src)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return propertiesToProto(defaultAppID, key, props)
|
||||
}
|
||||
|
||||
func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error {
|
||||
if opts.omitEmpty && isEmptyValue(v) {
|
||||
return nil
|
||||
}
|
||||
p := Property{
|
||||
Name: name,
|
||||
NoIndex: opts.noIndex,
|
||||
Multiple: opts.multiple,
|
||||
}
|
||||
switch x := v.Interface().(type) {
|
||||
case *Key:
|
||||
p.Value = x
|
||||
case time.Time:
|
||||
p.Value = x
|
||||
case appengine.BlobKey:
|
||||
p.Value = x
|
||||
case appengine.GeoPoint:
|
||||
p.Value = x
|
||||
case ByteString:
|
||||
p.Value = x
|
||||
default:
|
||||
switch v.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
p.Value = v.Int()
|
||||
case reflect.Bool:
|
||||
p.Value = v.Bool()
|
||||
case reflect.String:
|
||||
p.Value = v.String()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
p.Value = v.Float()
|
||||
case reflect.Slice:
|
||||
if v.Type().Elem().Kind() == reflect.Uint8 {
|
||||
p.NoIndex = true
|
||||
p.Value = v.Bytes()
|
||||
}
|
||||
case reflect.Struct:
|
||||
if !v.CanAddr() {
|
||||
return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
|
||||
}
|
||||
sub, err := newStructPLS(v.Addr().Interface())
|
||||
if err != nil {
|
||||
return fmt.Errorf("datastore: unsupported struct field: %v", err)
|
||||
}
|
||||
return sub.save(props, name+".", opts)
|
||||
}
|
||||
}
|
||||
if p.Value == nil {
|
||||
return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
|
||||
}
|
||||
*props = append(*props, p)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s structPLS) Save() ([]Property, error) {
|
||||
var props []Property
|
||||
if err := s.save(&props, "", saveOpts{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return props, nil
|
||||
}
|
||||
|
||||
func (s structPLS) save(props *[]Property, prefix string, opts saveOpts) error {
|
||||
for name, f := range s.codec.fields {
|
||||
name = prefix + name
|
||||
v := s.v.FieldByIndex(f.path)
|
||||
if !v.IsValid() || !v.CanSet() {
|
||||
continue
|
||||
}
|
||||
var opts1 saveOpts
|
||||
opts1.noIndex = opts.noIndex || f.noIndex
|
||||
opts1.multiple = opts.multiple
|
||||
opts1.omitEmpty = f.omitEmpty // don't propagate
|
||||
// For slice fields that aren't []byte, save each element.
|
||||
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
|
||||
opts1.multiple = true
|
||||
for j := 0; j < v.Len(); j++ {
|
||||
if err := saveStructProperty(props, name, opts1, v.Index(j)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Otherwise, save the field itself.
|
||||
if err := saveStructProperty(props, name, opts1, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) {
|
||||
e := &pb.EntityProto{
|
||||
Key: keyToProto(defaultAppID, key),
|
||||
}
|
||||
if key.parent == nil {
|
||||
e.EntityGroup = &pb.Path{}
|
||||
} else {
|
||||
e.EntityGroup = keyToProto(defaultAppID, key.root()).Path
|
||||
}
|
||||
prevMultiple := make(map[string]bool)
|
||||
|
||||
for _, p := range props {
|
||||
if pm, ok := prevMultiple[p.Name]; ok {
|
||||
if !pm || !p.Multiple {
|
||||
return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name)
|
||||
}
|
||||
} else {
|
||||
prevMultiple[p.Name] = p.Multiple
|
||||
}
|
||||
|
||||
x := &pb.Property{
|
||||
Name: proto.String(p.Name),
|
||||
Value: new(pb.PropertyValue),
|
||||
Multiple: proto.Bool(p.Multiple),
|
||||
}
|
||||
switch v := p.Value.(type) {
|
||||
case int64:
|
||||
x.Value.Int64Value = proto.Int64(v)
|
||||
case bool:
|
||||
x.Value.BooleanValue = proto.Bool(v)
|
||||
case string:
|
||||
x.Value.StringValue = proto.String(v)
|
||||
if p.NoIndex {
|
||||
x.Meaning = pb.Property_TEXT.Enum()
|
||||
}
|
||||
case float64:
|
||||
x.Value.DoubleValue = proto.Float64(v)
|
||||
case *Key:
|
||||
if v != nil {
|
||||
x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v)
|
||||
}
|
||||
case time.Time:
|
||||
if v.Before(minTime) || v.After(maxTime) {
|
||||
return nil, fmt.Errorf("datastore: time value out of range")
|
||||
}
|
||||
x.Value.Int64Value = proto.Int64(toUnixMicro(v))
|
||||
x.Meaning = pb.Property_GD_WHEN.Enum()
|
||||
case appengine.BlobKey:
|
||||
x.Value.StringValue = proto.String(string(v))
|
||||
x.Meaning = pb.Property_BLOBKEY.Enum()
|
||||
case appengine.GeoPoint:
|
||||
if !v.Valid() {
|
||||
return nil, fmt.Errorf("datastore: invalid GeoPoint value")
|
||||
}
|
||||
// NOTE: Strangely, latitude maps to X, longitude to Y.
|
||||
x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng}
|
||||
x.Meaning = pb.Property_GEORSS_POINT.Enum()
|
||||
case []byte:
|
||||
x.Value.StringValue = proto.String(string(v))
|
||||
x.Meaning = pb.Property_BLOB.Enum()
|
||||
if !p.NoIndex {
|
||||
return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name)
|
||||
}
|
||||
case ByteString:
|
||||
x.Value.StringValue = proto.String(string(v))
|
||||
x.Meaning = pb.Property_BYTESTRING.Enum()
|
||||
default:
|
||||
if p.Value != nil {
|
||||
return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if p.NoIndex {
|
||||
e.RawProperty = append(e.RawProperty, x)
|
||||
} else {
|
||||
e.Property = append(e.Property, x)
|
||||
if len(e.Property) > maxIndexedProperties {
|
||||
return nil, errors.New("datastore: too many indexed properties")
|
||||
}
|
||||
}
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// isEmptyValue is taken from the encoding/json package in the standard library.
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
// TODO(perfomance): Only reflect.String needed, other property types are not supported (copy/paste from json package)
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
// TODO(perfomance): Uint* are unsupported property types - should be removed (copy/paste from json package)
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
case reflect.Struct:
|
||||
switch x := v.Interface().(type) {
|
||||
case time.Time:
|
||||
return x.IsZero()
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
96
vendor/google.golang.org/appengine/datastore/transaction.go
generated
vendored
Normal file
96
vendor/google.golang.org/appengine/datastore/transaction.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/appengine/internal"
|
||||
pb "google.golang.org/appengine/internal/datastore"
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) {
|
||||
x.Transaction = t
|
||||
})
|
||||
internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) {
|
||||
x.Transaction = t
|
||||
})
|
||||
internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) {
|
||||
x.Transaction = t
|
||||
})
|
||||
internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) {
|
||||
x.Transaction = t
|
||||
})
|
||||
}
|
||||
|
||||
// ErrConcurrentTransaction is returned when a transaction is rolled back due
|
||||
// to a conflict with a concurrent transaction.
|
||||
var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
|
||||
|
||||
// RunInTransaction runs f in a transaction. It calls f with a transaction
|
||||
// context tc that f should use for all App Engine operations.
|
||||
//
|
||||
// If f returns nil, RunInTransaction attempts to commit the transaction,
|
||||
// returning nil if it succeeds. If the commit fails due to a conflicting
|
||||
// transaction, RunInTransaction retries f, each time with a new transaction
|
||||
// context. It gives up and returns ErrConcurrentTransaction after three
|
||||
// failed attempts. The number of attempts can be configured by specifying
|
||||
// TransactionOptions.Attempts.
|
||||
//
|
||||
// If f returns non-nil, then any datastore changes will not be applied and
|
||||
// RunInTransaction returns that same error. The function f is not retried.
|
||||
//
|
||||
// Note that when f returns, the transaction is not yet committed. Calling code
|
||||
// must be careful not to assume that any of f's changes have been committed
|
||||
// until RunInTransaction returns nil.
|
||||
//
|
||||
// Since f may be called multiple times, f should usually be idempotent.
|
||||
// datastore.Get is not idempotent when unmarshaling slice fields.
|
||||
//
|
||||
// Nested transactions are not supported; c may not be a transaction context.
|
||||
func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error {
|
||||
xg := false
|
||||
if opts != nil {
|
||||
xg = opts.XG
|
||||
}
|
||||
readOnly := false
|
||||
if opts != nil {
|
||||
readOnly = opts.ReadOnly
|
||||
}
|
||||
attempts := 3
|
||||
if opts != nil && opts.Attempts > 0 {
|
||||
attempts = opts.Attempts
|
||||
}
|
||||
var t *pb.Transaction
|
||||
var err error
|
||||
for i := 0; i < attempts; i++ {
|
||||
if t, err = internal.RunTransactionOnce(c, f, xg, readOnly, t); err != internal.ErrConcurrentTransaction {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ErrConcurrentTransaction
|
||||
}
|
||||
|
||||
// TransactionOptions are the options for running a transaction.
|
||||
type TransactionOptions struct {
|
||||
// XG is whether the transaction can cross multiple entity groups. In
|
||||
// comparison, a single group transaction is one where all datastore keys
|
||||
// used have the same root key. Note that cross group transactions do not
|
||||
// have the same behavior as single group transactions. In particular, it
|
||||
// is much more likely to see partially applied transactions in different
|
||||
// entity groups, in global queries.
|
||||
// It is valid to set XG to true even if the transaction is within a
|
||||
// single entity group.
|
||||
XG bool
|
||||
// Attempts controls the number of retries to perform when commits fail
|
||||
// due to a conflicting transaction. If omitted, it defaults to 3.
|
||||
Attempts int
|
||||
// ReadOnly controls whether the transaction is a read only transaction.
|
||||
// Read only transactions are potentially more efficient.
|
||||
ReadOnly bool
|
||||
}
|
Reference in New Issue
Block a user