support tagging based policy conditions (#15763)

This commit is contained in:
Harshavardhana 2022-09-28 11:25:46 -07:00 committed by GitHub
parent 4f1ff9c4d9
commit 41b633f5ea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 189 additions and 117 deletions

View File

@ -27,6 +27,7 @@ import (
jsoniter "github.com/json-iterator/go"
miniogopolicy "github.com/minio/minio-go/v7/pkg/policy"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/handlers"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
@ -125,6 +126,20 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
cloneHeader := r.Header.Clone()
if userTags := cloneHeader.Get(xhttp.AmzObjectTagging); userTags != "" {
tag, _ := tags.ParseObjectTags(userTags)
if tag != nil {
tagMap := tag.ToMap()
keys := make([]string, 0, len(tagMap))
for k, v := range tagMap {
args[pathJoin("ExistingObjectTag", k)] = []string{v}
args[pathJoin("RequestObjectTag", k)] = []string{v}
keys = append(keys, k)
}
args["RequestObjectTagKeys"] = keys
}
}
for _, objLock := range []string{
xhttp.AmzObjectLockMode,
xhttp.AmzObjectLockLegalHold,
@ -137,6 +152,9 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
}
for key, values := range cloneHeader {
if strings.EqualFold(key, xhttp.AmzObjectTagging) {
continue
}
if existingValues, found := args[key]; found {
args[key] = append(existingValues, values...)
} else {

View File

@ -418,6 +418,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
return checkPreconditions(ctx, w, r, oi, opts)
}
var proxy proxyResult
gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts)
if err != nil {
@ -463,6 +464,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID}
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)}
}
QueueReplicationHeal(ctx, bucket, gr.ObjInfo)
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
@ -472,29 +474,29 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
objInfo := gr.ObjInfo
// Automatically remove the object/version is an expiry lifecycle rule can be applied
if lc, err := globalLifecycleSys.Get(bucket); err == nil {
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
action := evalActionFromLifecycle(ctx, *lc, rcfg, objInfo, false)
var success bool
switch action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
success = applyExpiryRule(objInfo, false, action == lifecycle.DeleteVersionAction)
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
// Restored object delete would be still allowed to proceed as success
// since transition behavior is slightly different.
applyExpiryRule(objInfo, true, action == lifecycle.DeleteRestoredVersionAction)
}
if success {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey))
return
if !proxy.Proxy { // apply lifecycle rules only for local requests
// Automatically remove the object/version is an expiry lifecycle rule can be applied
if lc, err := globalLifecycleSys.Get(bucket); err == nil {
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
action := evalActionFromLifecycle(ctx, *lc, rcfg, objInfo, false)
var success bool
switch action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
success = applyExpiryRule(objInfo, false, action == lifecycle.DeleteVersionAction)
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
// Restored object delete would be still allowed to proceed as success
// since transition behavior is slightly different.
applyExpiryRule(objInfo, true, action == lifecycle.DeleteRestoredVersionAction)
}
if success {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey))
return
}
}
QueueReplicationHeal(ctx, bucket, gr.ObjInfo)
}
// Queue failed/pending replication automatically
if !proxy.Proxy {
QueueReplicationHeal(ctx, bucket, objInfo)
}
// filter object lock metadata if permission does not permit
getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object)
legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object)
@ -632,6 +634,36 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
return
}
// Get request range.
var rs *HTTPRangeSpec
rangeHeader := r.Header.Get(xhttp.Range)
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
var proxy proxyResult
if err != nil {
// proxy HEAD to replication target if active-active replication configured on bucket
proxytgts := getProxyTargets(ctx, bucket, object, opts)
if !proxytgts.Empty() {
if rangeHeader != "" {
rs, _ = parseRequestRangeSpec(rangeHeader)
}
var oi ObjectInfo
oi, proxy = proxyHeadToReplicationTarget(ctx, bucket, object, rs, opts, proxytgts)
if proxy.Proxy {
objInfo = oi
}
if proxy.Err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, proxy.Err), r.URL)
return
}
}
}
if err == nil && objInfo.UserTags != "" {
// Set this such that authorization policies can be applied on the object tags.
r.Header.Set(xhttp.AmzObjectTagging, objInfo.UserTags)
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
if getRequestAuthType(r) == authTypeAnonymous {
// As per "Permission" section in
@ -653,7 +685,6 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
ConditionValues: getConditionValues(r, "", "", nil),
IsOwner: false,
}) {
_, err = getObjectInfo(ctx, bucket, object, opts)
if toAPIError(ctx, err).Code == "NoSuchKey" {
s3Error = ErrNoSuchKey
}
@ -662,67 +693,46 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
return
}
// Get request range.
var rs *HTTPRangeSpec
rangeHeader := r.Header.Get(xhttp.Range)
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
var proxy proxyResult
if err != nil {
var oi ObjectInfo
// proxy HEAD to replication target if active-active replication configured on bucket
proxytgts := getProxyTargets(ctx, bucket, object, opts)
if !proxytgts.Empty() {
if rangeHeader != "" {
rs, _ = parseRequestRangeSpec(rangeHeader)
if err != nil && !proxy.Proxy {
if globalBucketVersioningSys.PrefixEnabled(bucket, object) {
switch {
case !objInfo.VersionPurgeStatus.Empty():
w.Header()[xhttp.MinIODeleteReplicationStatus] = []string{string(objInfo.VersionPurgeStatus)}
case !objInfo.ReplicationStatus.Empty() && objInfo.DeleteMarker:
w.Header()[xhttp.MinIODeleteMarkerReplicationStatus] = []string{string(objInfo.ReplicationStatus)}
}
oi, proxy = proxyHeadToReplicationTarget(ctx, bucket, object, rs, opts, proxytgts)
if proxy.Proxy {
objInfo = oi
}
}
if !proxy.Proxy {
if globalBucketVersioningSys.PrefixEnabled(bucket, object) {
switch {
case !objInfo.VersionPurgeStatus.Empty():
w.Header()[xhttp.MinIODeleteReplicationStatus] = []string{string(objInfo.VersionPurgeStatus)}
case !objInfo.ReplicationStatus.Empty() && objInfo.DeleteMarker:
w.Header()[xhttp.MinIODeleteMarkerReplicationStatus] = []string{string(objInfo.ReplicationStatus)}
}
// Versioning enabled quite possibly object is deleted might be delete-marker
// if present set the headers, no idea why AWS S3 sets these headers.
if objInfo.VersionID != "" && objInfo.DeleteMarker {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)}
}
// Versioning enabled quite possibly object is deleted might be delete-marker
// if present set the headers, no idea why AWS S3 sets these headers.
if objInfo.VersionID != "" && objInfo.DeleteMarker {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)}
}
QueueReplicationHeal(ctx, bucket, objInfo)
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
// Automatically remove the object/version is an expiry lifecycle rule can be applied
if lc, err := globalLifecycleSys.Get(bucket); err == nil {
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
action := evalActionFromLifecycle(ctx, *lc, rcfg, objInfo, false)
var success bool
switch action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
success = applyExpiryRule(objInfo, false, action == lifecycle.DeleteVersionAction)
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
// Restored object delete would be still allowed to proceed as success
// since transition behavior is slightly different.
applyExpiryRule(objInfo, true, action == lifecycle.DeleteRestoredVersionAction)
if !proxy.Proxy { // apply lifecycle rules only locally not for proxied requests
// Automatically remove the object/version is an expiry lifecycle rule can be applied
if lc, err := globalLifecycleSys.Get(bucket); err == nil {
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
action := evalActionFromLifecycle(ctx, *lc, rcfg, objInfo, false)
var success bool
switch action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
success = applyExpiryRule(objInfo, false, action == lifecycle.DeleteVersionAction)
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
// Restored object delete would be still allowed to proceed as success
// since transition behavior is slightly different.
applyExpiryRule(objInfo, true, action == lifecycle.DeleteRestoredVersionAction)
}
if success {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey))
return
}
}
if success {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey))
return
}
}
// Queue failed/pending replication automatically
if !proxy.Proxy {
QueueReplicationHeal(ctx, bucket, objInfo)
}
@ -3197,15 +3207,19 @@ func (api objectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *h
return
}
// Allow putObjectTagging if policy action is set
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectTaggingAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
// Tags XML will not be bigger than 1MiB in size, fail if its bigger.
tags, err := tags.ParseObjectXML(io.LimitReader(r.Body, 1<<20))
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
tags, err := tags.ParseObjectXML(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
// Set this such that authorization policies can be applied on the object tags.
r.Header.Set(xhttp.AmzObjectTagging, tags.String())
// Allow putObjectTagging if policy action is set
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectTaggingAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
@ -3283,12 +3297,6 @@ func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r
return
}
// Allow deleteObjectTagging if policy action is set
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteObjectTaggingAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
@ -3300,6 +3308,18 @@ func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if userTags := oi.UserTags; userTags != "" {
// Set this such that authorization policies can be applied on the object tags.
r.Header.Set(xhttp.AmzObjectTagging, oi.UserTags)
}
// Allow deleteObjectTagging if policy action is set
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteObjectTaggingAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType, opts))
if dsc.ReplicateAny() {
opts.UserDefined = make(map[string]string)

View File

@ -30,6 +30,7 @@ import (
"time"
"github.com/gorilla/mux"
"github.com/minio/minio-go/v7/pkg/tags"
sse "github.com/minio/minio/internal/bucket/encryption"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
@ -129,6 +130,20 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
return
}
if objTags := r.Header.Get(xhttp.AmzObjectTagging); objTags != "" {
if !objectAPI.IsTaggingSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if _, err := tags.ParseObjectTags(objTags); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
metadata[xhttp.AmzObjectTagging] = objTags
}
retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction)
holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction)

4
go.mod
View File

@ -48,8 +48,8 @@ require (
github.com/minio/highwayhash v1.0.2
github.com/minio/kes v0.21.0
github.com/minio/madmin-go v1.4.29
github.com/minio/minio-go/v7 v7.0.37
github.com/minio/pkg v1.4.0
github.com/minio/minio-go/v7 v7.0.40-0.20220928095841-8848d8affe8a
github.com/minio/pkg v1.4.2
github.com/minio/selfupdate v0.5.0
github.com/minio/sha256-simd v1.0.0
github.com/minio/simdjson-go v0.4.2

8
go.sum
View File

@ -657,11 +657,11 @@ github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77Z
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.23/go.mod h1:ei5JjmxwHaMrgsMrn4U/+Nmg+d8MKS1U2DAn1ou4+Do=
github.com/minio/minio-go/v7 v7.0.37 h1:aJvYMbtpVPSFBck6guyvOkxK03MycxDOCs49ZBuY5M8=
github.com/minio/minio-go/v7 v7.0.37/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
github.com/minio/minio-go/v7 v7.0.40-0.20220928095841-8848d8affe8a h1:COFh7S3tOKmJNYtKKFAuHQFH7MAaXxg4aAluXC9KQgc=
github.com/minio/minio-go/v7 v7.0.40-0.20220928095841-8848d8affe8a/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
github.com/minio/pkg v1.1.20/go.mod h1:Xo7LQshlxGa9shKwJ7NzQbgW4s8T/Wc1cOStR/eUiMY=
github.com/minio/pkg v1.4.0 h1:bVdmz8vgv5bvLysHY2kuviuof0kteY6YPMRXCjDqJU4=
github.com/minio/pkg v1.4.0/go.mod h1:mxCLAG+fOGIQr6odQ5Ukqc6qv9Zj6v1d6TD3NP82B7Y=
github.com/minio/pkg v1.4.2 h1:QEToJld+cy+mMLDv084kIOgzjJQMbM+ioI/WotHeYQY=
github.com/minio/pkg v1.4.2/go.mod h1:mxCLAG+fOGIQr6odQ5Ukqc6qv9Zj6v1d6TD3NP82B7Y=
github.com/minio/selfupdate v0.5.0 h1:0UH1HlL49+2XByhovKl5FpYTjKfvrQ2sgL1zEXK6mfI=
github.com/minio/selfupdate v0.5.0/go.mod h1:mcDkzMgq8PRcpCRJo/NlPY7U45O5dfYl2Y0Rg7IustY=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=

View File

@ -20,7 +20,6 @@ package lifecycle
import (
"encoding/xml"
"io"
"log"
"github.com/minio/minio-go/v7/pkg/tags"
)
@ -172,7 +171,6 @@ func (f Filter) TestTags(userTags string) bool {
parsedTags, err := tags.ParseObjectTags(userTags)
if err != nil {
log.Printf("Unexpected object tag found: `%s`\n", userTags)
return false
}
tagsMap := parsedTags.ToMap()

View File

@ -19,6 +19,8 @@ package replication
import (
"encoding/xml"
"github.com/minio/minio-go/v7/pkg/tags"
)
var errInvalidFilter = Errorf("Filter must have exactly one of Prefix, Tag, or And specified")
@ -29,8 +31,9 @@ type Filter struct {
Prefix string
And And
Tag Tag
// Caching tags, only once
cachedTags map[string]struct{}
cachedTags map[string]string
}
// IsEmpty returns true if filter is not set
@ -93,27 +96,43 @@ func (f Filter) Validate() error {
// TestTags tests if the object tags satisfy the Filter tags requirement,
// it returns true if there is no tags in the underlying Filter.
func (f *Filter) TestTags(ttags []string) bool {
func (f *Filter) TestTags(userTags string) bool {
if f.cachedTags == nil {
tags := make(map[string]struct{})
cached := make(map[string]string)
for _, t := range append(f.And.Tags, f.Tag) {
if !t.IsEmpty() {
tags[t.String()] = struct{}{}
cached[t.Key] = t.Value
}
}
f.cachedTags = tags
f.cachedTags = cached
}
for ct := range f.cachedTags {
foundTag := false
for _, t := range ttags {
if ct == t {
foundTag = true
break
}
}
if !foundTag {
return false
// This filter does not have any tags, always return true
if len(f.cachedTags) == 0 {
return true
}
parsedTags, err := tags.ParseObjectTags(userTags)
if err != nil {
return false
}
tagsMap := parsedTags.ToMap()
// This filter has tags configured but this object
// does not have any tag, skip this object
if len(tagsMap) == 0 {
return false
}
// Both filter and object have tags, find a match,
// skip this object otherwise
for k, cv := range f.cachedTags {
v, ok := tagsMap[k]
if ok && v == cv {
return true
}
}
return true
return false
}

View File

@ -199,7 +199,7 @@ func (c Config) FilterActionableRules(obj ObjectOpts) []Rule {
if !strings.HasPrefix(obj.Name, rule.Prefix()) {
continue
}
if rule.Filter.TestTags(strings.Split(obj.UserTags, "&")) {
if rule.Filter.TestTags(obj.UserTags) {
rules = append(rules, rule)
}
}

View File

@ -295,12 +295,14 @@ func TestReplicate(t *testing.T) {
{ObjectOpts{Name: "xa/c5test", UserTags: "k1=v1", Replica: false}, cfgs[4], true}, // 40. replica syncing disabled, this object is NOT a replica
}
for i, testCase := range testCases {
result := testCase.c.Replicate(testCase.opts)
if result != testCase.expectedResult {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.opts.Name, func(t *testing.T) {
result := testCase.c.Replicate(testCase.opts)
if result != testCase.expectedResult {
t.Errorf("expected: %v, got: %v", testCase.expectedResult, result)
}
})
}
}