Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: support multi files upload & update dependency #83

Merged
merged 6 commits into from
Aug 30, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
# Changelog

## v0.1.0

FEATURES
* [#83](https://github.com/bnb-chain/greenfield-cmd/pull/83) support uploading multiple files or folder by one command

## v0.1.0-alpha.2

FEATURES
* [#80](https://github.com/bnb-chain/greenfield-cmd/pull/80) update depenency and support group new API, including "group ls ", "group ls-member", "group ls-belong" and "policy ls"


## v0.1.0-alpha.1

FEATURES
Expand Down
31 changes: 30 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ To obtain the latest release, please visit the following URL: https://github.com
git clone https://github.com/bnb-chain/greenfield-cmd.git
cd greenfield-cmd
# Find the latest release here: https://github.com/bnb-chain/greenfield-cmd/releases
git checkout -b branch-name v0.0.9
git checkout -b branch-name v0.1.0
make build
cd build
./gnfd-cmd -h
Expand Down Expand Up @@ -180,6 +180,30 @@ gnfd-cmd object get gnfd://gnfd-bucket/gnfd-object file-path
The filepath can be a specific file path, a directory path, or not set at all.
If not set, the command will download the content to a file with the same name as the object name in the current directory. If it is set as a directory, the command will download the object file into the directory.

(3) create empty folder

Please note that the object name corresponding to the folder needs to end with "/" as suffix
```
gnfd-cmd object put gnfd://gnfd-bucket/folder/
```

(4) upload local folder

To upload a local folder (including all the files in it), you can use --recursive flag and specify the local folder path
```
gnfd-cmd object put --recursive local-folder-path gnfd://gnfd-bucket
```

(5) upload multiple files

To upload multiple files by one command, you can specify all the file paths that need to be uploaded one by one.
The files will be uploaded to the same bucket.

```
gnfd-cmd object put filepath1 filepath2 ... gnfd://gnfd-bucket
```


#### Group Operations

The group commands is used to create group, update group members, delete group and query group info.
Expand Down Expand Up @@ -242,6 +266,11 @@ gnfd-cmd bucket ls
// list objects of the bucket
gnfd-cmd object ls gnfd://gnfd-bucket

// list objects of the bucket in a recursive way
gnfd-cmd object ls --recursive gnfd://gnfd-bucket

// list the objects by prefix
gnfd-cmd object ls --recursive gnfd://gnfd-bucket/prefixName
```
#### Delete Operations
```
Expand Down
189 changes: 144 additions & 45 deletions cmd/cmd_object.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,17 @@ func cmdPutObj() *cli.Command {
Name: "put",
Action: putObject,
Usage: "create object on chain and upload payload of object to SP",
ArgsUsage: "[filePath] OBJECT-URL",
ArgsUsage: "[filePath]... OBJECT-URL",
Description: `
Send createObject txn to chain and upload the payload of object to the storage provider.
The command need to pass the file path inorder to compute hash roots on client
The command need to pass the file path inorder to compute hash roots on client.
Note that the uploading with recursive flag only support folder.

Examples:
# create object and upload file to storage provider, the corresponding object is gnfd-object
$ gnfd-cmd object put file.txt gnfd://gnfd-bucket/gnfd-object`,
$ gnfd-cmd object put file.txt gnfd://gnfd-bucket/gnfd-object,
# upload the files inside the folders
$ gnfd-cmd object put --recursive folderName gnfd://bucket-name`,
Flags: []cli.Flag{
&cli.StringFlag{
Name: secondarySPFlag,
Expand Down Expand Up @@ -65,6 +68,11 @@ $ gnfd-cmd object put file.txt gnfd://gnfd-bucket/gnfd-object`,
"a file in multiple parts, where each part is uploaded separately.This allows the upload to be resumed from " +
"where it left off in case of interruptions or failures, rather than starting the entire upload process from the beginning.",
},
&cli.BoolFlag{
Name: recursiveFlag,
Value: false,
Usage: "performed on all files or objects under the specified directory or prefix in a recursive way",
},
},
}
}
Expand Down Expand Up @@ -224,71 +232,153 @@ $ gnfd-cmd object mirror --destChainId 97 --bucketName yourBucketName --objectNa

// putObject upload the payload of file, finish the third stage of putObject
func putObject(ctx *cli.Context) error {
if ctx.NArg() != 1 && ctx.NArg() != 2 {
if ctx.NArg() < 1 {
return toCmdErr(fmt.Errorf("args number error"))
}

var (
uploadFolder = false
isUploadSingleFolder bool
bucketName, objectName, filePath string
fileReader io.Reader
objectSize int64
err error
exists bool
urlInfo string
)

gnfdClient, err := NewClient(ctx, false)
if err != nil {
return err
}

supportRecursive := ctx.Bool(recursiveFlag)

if ctx.NArg() == 1 {
// upload an empty folder
urlInfo = ctx.Args().Get(0)
bucketName, objectName, err = getObjAndBucketNames(urlInfo)
if err != nil {
return toCmdErr(err)
}
if strings.HasSuffix(objectName, "/") {
uploadFolder = true
isUploadSingleFolder = true
} else {
return toCmdErr(errors.New("no file path to upload, if you need create a folder, the folder name should be end with /"))
}

if err = uploadFile(bucketName, objectName, filePath, urlInfo, ctx, gnfdClient, isUploadSingleFolder, true, 0); err != nil {
return toCmdErr(err)
}

} else {
// read the local file payload
filePath = ctx.Args().Get(0)
exists, objectSize, err = pathExists(filePath)
if err != nil {
return err
// upload files in folder in a recursive way
if supportRecursive {
urlInfo = ctx.Args().Get(1)
if err = uploadFolder(urlInfo, ctx, gnfdClient); err != nil {
return toCmdErr(err)
}
return nil
}
if !exists {
return fmt.Errorf("upload file not exists")
} else if objectSize > int64(maxFileSize) {
return fmt.Errorf("upload file larger than 10G ")

filePathList := make([]string, 0)
argNum := ctx.Args().Len()
for i := 0; i < argNum-1; i++ {
filePathList = append(filePathList, ctx.Args().Get(i))
}

// Open the referenced file.
file, err := os.Open(filePath)
if err != nil {
return err
var needUploadMutiFiles bool
if len(filePathList) > 1 {
needUploadMutiFiles = true
}
defer file.Close()
fileReader = file

urlInfo = ctx.Args().Get(1)
bucketName, objectName, err = getObjAndBucketNames(urlInfo)
if err != nil {
// upload multiple files
if needUploadMutiFiles {
urlInfo = ctx.Args().Get(argNum - 1)
bucketName = ParseBucket(urlInfo)
if bucketName == "" {
return toCmdErr(errors.New("fail to parse bucket name"))
}
// if the object name has not been set, set the file name as object name
objectName = filepath.Base(filePath)

for idx, fileName := range filePathList {
nameList := strings.Split(fileName, "/")
objectName = nameList[len(nameList)-1]
objectSize, err = parseFileByArg(ctx, idx)
if err != nil {
return toCmdErr(err)
}

if err = uploadFile(bucketName, objectName, fileName, urlInfo, ctx, gnfdClient, false, false, objectSize); err != nil {
fmt.Println("upload object:", objectName, "err", err)
}
}
} else {
// upload single file
objectSize, err = parseFileByArg(ctx, 0)
if err != nil {
return toCmdErr(err)
}
urlInfo = ctx.Args().Get(1)
bucketName, objectName, err = getObjAndBucketNames(urlInfo)
if err != nil {
bucketName = ParseBucket(urlInfo)
if bucketName == "" {
return toCmdErr(errors.New("fail to parse bucket name"))
}
// if the object name has not been set, set the file name as object name
objectName = filepath.Base(filePathList[0])
}
if err = uploadFile(bucketName, objectName, filePathList[0], urlInfo, ctx, gnfdClient, false, true, objectSize); err != nil {
return toCmdErr(err)
}
}
}

gnfdClient, err := NewClient(ctx, false)
return nil
}

// uploadFolder upload folder and the files inside to bucket in a recursive way
func uploadFolder(urlInfo string, ctx *cli.Context,
gnfdClient client.Client) error {
// upload folder with recursive flag
bucketName := ParseBucket(urlInfo)
if bucketName == "" {
return errors.New("fail to parse bucket name")
}

folderName := ctx.Args().Get(0)
fileInfo, err := os.Stat(folderName)
if err != nil {
return err
}

c, cancelCreateBucket := context.WithCancel(globalContext)
defer cancelCreateBucket()
if !fileInfo.IsDir() {
return errors.New("failed to parse folder path with recursive flag")
}
fileInfos := make([]os.FileInfo, 0)
filePaths := make([]string, 0)
listFolderErr := filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
fileInfos = append(fileInfos, info)
flywukong marked this conversation as resolved.
Show resolved Hide resolved
filePaths = append(filePaths, path)
}
return nil
})

if listFolderErr != nil {
return listFolderErr
}
// upload folder
for id, info := range fileInfos {
// pathList := strings.Split(info.Name(), "/")
objectName := filePaths[id]
if uploadErr := uploadFile(bucketName, objectName, filePaths[id], urlInfo, ctx, gnfdClient, false, false, info.Size()); uploadErr != nil {
fmt.Printf("failed to upload object: %s, error:%v \n", objectName, uploadErr)
}
}

return nil
}

func uploadFile(bucketName, objectName, filePath, urlInfo string, ctx *cli.Context,
gnfdClient client.Client, uploadSigleFolder, printTxnHash bool, objectSize int64) error {

contentType := ctx.String(contentTypeFlag)
secondarySPAccs := ctx.String(secondarySPFlag)
Expand Down Expand Up @@ -319,23 +409,34 @@ func putObject(ctx *cli.Context) error {
opts.SecondarySPAccs = addrList
}

_, err = gnfdClient.HeadObject(c, bucketName, objectName)
c, cancelPutObject := context.WithCancel(globalContext)
defer cancelPutObject()

_, err := gnfdClient.HeadObject(c, bucketName, objectName)
var txnHash string
// if err==nil, object exist on chain, no need to createObject
if err != nil {
if uploadFolder {
if uploadSigleFolder {
txnHash, err = gnfdClient.CreateFolder(c, bucketName, objectName, opts)
if err != nil {
return toCmdErr(err)
}
} else {
txnHash, err = gnfdClient.CreateObject(c, bucketName, objectName, fileReader, opts)
// Open the referenced file.
file, err := os.Open(filePath)
if err != nil {
return err
}
defer file.Close()
txnHash, err = gnfdClient.CreateObject(c, bucketName, objectName, file, opts)
if err != nil {
return toCmdErr(err)
}
}
fmt.Printf("object %s created on chain \n", objectName)
fmt.Println("transaction hash: ", txnHash)
if printTxnHash {
fmt.Printf("object %s created on chain \n", objectName)
fmt.Println("transaction hash: ", txnHash)
}
} else {
fmt.Printf("object %s already exist \n", objectName)
}
Expand All @@ -361,8 +462,7 @@ func putObject(ctx *cli.Context) error {

if err = gnfdClient.PutObject(c, bucketName, objectName,
objectSize, reader, opt); err != nil {
fmt.Println("put object fail:", err.Error())
return nil
return toCmdErr(err)
}

// Check if object is sealed
Expand All @@ -374,9 +474,9 @@ func putObject(ctx *cli.Context) error {
case <-timeout:
return toCmdErr(errors.New("object not sealed after 15 seconds"))
case <-ticker.C:
headObjOutput, err := gnfdClient.HeadObject(c, bucketName, objectName)
if err != nil {
return err
headObjOutput, queryErr := gnfdClient.HeadObject(c, bucketName, objectName)
if queryErr != nil {
return queryErr
}

if headObjOutput.ObjectInfo.GetObjectStatus().String() == "OBJECT_STATUS_SEALED" {
Expand All @@ -386,7 +486,6 @@ func putObject(ctx *cli.Context) error {
}
}
}

}

// getObject download the object payload from sp
Expand Down Expand Up @@ -527,7 +626,6 @@ func listObjects(ctx *cli.Context) error {
}

return nil

}

func listObjectByPage(cli client.Client, c context.Context, bucketName, prefixName string, isRecursive bool) error {
Expand All @@ -553,8 +651,9 @@ func listObjectByPage(cli client.Client, c context.Context, bucketName, prefixNa
if err != nil {
return toCmdErr(err)
}

printListResult(listResult)
if listResult.IsTruncated {
if !listResult.IsTruncated {
break
}

Expand Down Expand Up @@ -665,7 +764,7 @@ func pathExists(path string) (bool, int64, error) {

if err == nil {
if stat.IsDir() {
return false, 0, fmt.Errorf("not support upload dir")
return false, 0, fmt.Errorf("not support upload dir without recursive flag")
}
return true, stat.Size(), nil
}
Expand Down
Loading
Loading