Add a batch mode that provides a simple log output (#85)
* [changelog] Updated * [readme] Updated * [domain] Config Add batch-mode flag * [core] ConfigOption Add BatchMode option * [core] ConfigQuery Add batchMode query Also replaced verbose exists case clauses with a simple contains. * [core] ConfigOptions added to replace Seq[ConfigOption] * [core] Syncronise rename method to createPlan * [cli] Program rename apply as run * [storage-aws] S3StorageServiceBuilder stop using IO to create object * [storage-aws] S3StorageServiceBuilder make default service lazy * [storage-aws] Rename S3ClientCopier => Copier * [storage-aws] Rename S3ClientDeleter => Deleter * [storage-aws] Rename S3ClientObjectLister => Lister * [storage-aws] Only attach upload listener when in batch mode Only detects batch mode when selected as a command line option * [core] Synchronise use leftMap rather than swap.map.swap * [cli] ParseArgs add `-B` and `--batch` options to enable batch mode * [core] ThorpArchive logs file uploaded when in batch mode
This commit is contained in:
parent
1440990d79
commit
1267b6e313
28 changed files with 143 additions and 91 deletions
|
@ -10,6 +10,7 @@ The format is based on [[https://keepachangelog.com/en/1.0.0/][Keep a Changelog]
|
|||
** Added
|
||||
|
||||
- Add a version command-line option (#99)
|
||||
- Add a batch mode (#85)
|
||||
|
||||
* [0.6.0] - 2019-06-30
|
||||
|
||||
|
|
|
@ -16,6 +16,8 @@ hash of the file contents.
|
|||
thorp
|
||||
Usage: thorp [options]
|
||||
|
||||
-V, --version Display the version and quit
|
||||
-B, --batch Enabled batch-mode
|
||||
-s, --source <value> Source directory to sync to S3
|
||||
-b, --bucket <value> S3 bucket name
|
||||
-p, --prefix <value> Prefix within the S3 Bucket
|
||||
|
@ -30,6 +32,11 @@ If you don't provide a ~source~ the current diretory will be used.
|
|||
|
||||
The ~--include~ and ~--exclude~ parameters can be used more than once.
|
||||
|
||||
** Batch mode
|
||||
|
||||
Batch mode disable the ANSI console display and logs simple messages
|
||||
that can be written to a file.
|
||||
|
||||
* Configuration
|
||||
|
||||
Configuration will be read from these files:
|
||||
|
|
|
@ -8,7 +8,7 @@ object Main extends IOApp {
|
|||
override def run(args: List[String]): IO[ExitCode] = {
|
||||
val exitCaseLogger = new PrintLogger(false)
|
||||
ParseArgs(args)
|
||||
.map(Program(_))
|
||||
.map(Program.run)
|
||||
.getOrElse(IO(ExitCode.Error))
|
||||
.guaranteeCase {
|
||||
case Canceled => exitCaseLogger.warn("Interrupted")
|
||||
|
|
|
@ -2,7 +2,7 @@ package net.kemitix.thorp.cli
|
|||
|
||||
import java.nio.file.Paths
|
||||
|
||||
import net.kemitix.thorp.core.ConfigOption
|
||||
import net.kemitix.thorp.core.{ConfigOption, ConfigOptions}
|
||||
import scopt.OParser
|
||||
|
||||
object ParseArgs {
|
||||
|
@ -14,8 +14,11 @@ object ParseArgs {
|
|||
programName("thorp"),
|
||||
head("thorp"),
|
||||
opt[Unit]('V', "version")
|
||||
.action((_, cos) => ConfigOption.Version :: cos)
|
||||
.text("Show version"),
|
||||
.action((_, cos) => ConfigOption.Version :: cos)
|
||||
.text("Show version"),
|
||||
opt[Unit]('B', "batch")
|
||||
.action((_, cos) => ConfigOption.BatchMode :: cos)
|
||||
.text("Enable batch-mode"),
|
||||
opt[String]('s', "source")
|
||||
.action((str, cos) => ConfigOption.Source(Paths.get(str)) :: cos)
|
||||
.text("Source directory to sync to destination"),
|
||||
|
@ -45,7 +48,8 @@ object ParseArgs {
|
|||
)
|
||||
}
|
||||
|
||||
def apply(args: List[String]): Option[List[ConfigOption]] =
|
||||
def apply(args: List[String]): Option[ConfigOptions] =
|
||||
OParser.parse(configParser, args, List())
|
||||
.map(ConfigOptions)
|
||||
|
||||
}
|
||||
|
|
|
@ -9,19 +9,19 @@ import net.kemitix.thorp.storage.aws.S3StorageServiceBuilder.defaultStorageServi
|
|||
|
||||
trait Program {
|
||||
|
||||
def apply(cliOptions: Seq[ConfigOption]): IO[ExitCode] = {
|
||||
def run(cliOptions: ConfigOptions): IO[ExitCode] = {
|
||||
implicit val logger: Logger = new PrintLogger()
|
||||
if (ConfigQuery.showVersion(cliOptions)) IO {
|
||||
println(s"Thorp v${thorp.BuildInfo.version}")
|
||||
ExitCode.Success
|
||||
} else
|
||||
} else {
|
||||
for {
|
||||
storageService <- defaultStorageService
|
||||
actions <- Synchronise(storageService, defaultHashService, cliOptions).valueOrF(handleErrors)
|
||||
events <- handleActions(UnversionedMirrorArchive.default(storageService), actions)
|
||||
_ <- storageService.shutdown
|
||||
actions <- Synchronise.createPlan(defaultStorageService, defaultHashService, cliOptions).valueOrF(handleErrors)
|
||||
events <- handleActions(UnversionedMirrorArchive.default(defaultStorageService, ConfigQuery.batchMode(cliOptions)), actions)
|
||||
_ <- defaultStorageService.shutdown
|
||||
_ <- SyncLogging.logRunFinished(events)
|
||||
} yield ExitCode.Success
|
||||
}
|
||||
}
|
||||
|
||||
private def handleErrors(implicit logger: Logger): List[String] => IO[Stream[Action]] = {
|
||||
|
@ -34,7 +34,8 @@ trait Program {
|
|||
}
|
||||
|
||||
private def handleActions(archive: ThorpArchive,
|
||||
actions: Stream[Action]): IO[Stream[StorageQueueEvent]] =
|
||||
actions: Stream[Action])
|
||||
(implicit l: Logger): IO[Stream[StorageQueueEvent]] =
|
||||
actions.foldLeft(Stream[IO[StorageQueueEvent]]()) {
|
||||
(stream, action) => archive.update(action) ++ stream
|
||||
}.sequence
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package net.kemitix.thorp.cli
|
||||
|
||||
import net.kemitix.thorp.core.ConfigOption.Debug
|
||||
import net.kemitix.thorp.core.{ConfigOption, Resource}
|
||||
import net.kemitix.thorp.core.{ConfigOptions, Resource}
|
||||
import org.scalatest.FunSpec
|
||||
|
||||
import scala.util.Try
|
||||
|
@ -26,11 +26,11 @@ class ParseArgsTest extends FunSpec {
|
|||
}
|
||||
|
||||
describe("parse - debug") {
|
||||
def invokeWithArgument(arg: String): List[ConfigOption] = {
|
||||
def invokeWithArgument(arg: String): ConfigOptions = {
|
||||
val strings = List("--source", pathTo("."), "--bucket", "bucket", arg)
|
||||
.filter(_ != "")
|
||||
val maybeOptions = ParseArgs(strings)
|
||||
maybeOptions.getOrElse(List())
|
||||
maybeOptions.getOrElse(ConfigOptions())
|
||||
}
|
||||
|
||||
describe("when no debug flag") {
|
||||
|
|
|
@ -13,6 +13,9 @@ object ConfigOption {
|
|||
case object Version extends ConfigOption {
|
||||
override def update(config: Config): Config = config
|
||||
}
|
||||
case object BatchMode extends ConfigOption {
|
||||
override def update(config: Config): Config = config.copy(batchMode = true)
|
||||
}
|
||||
case class Source(path: Path) extends ConfigOption {
|
||||
override def update(config: Config): Config = config.copy(source = path.toFile)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
package net.kemitix.thorp.core
|
||||
|
||||
import cats.Semigroup
|
||||
|
||||
case class ConfigOptions(options: List[ConfigOption] = List())
|
||||
extends Semigroup[ConfigOptions] {
|
||||
|
||||
override def combine(x: ConfigOptions, y: ConfigOptions): ConfigOptions =
|
||||
x ++ y
|
||||
|
||||
def ++(other: ConfigOptions): ConfigOptions =
|
||||
ConfigOptions(options ++ other.options)
|
||||
|
||||
def ::(head: ConfigOption): ConfigOptions =
|
||||
ConfigOptions(head :: options)
|
||||
|
||||
def contains[A1 >: ConfigOption](elem: A1): Boolean =
|
||||
options contains elem
|
||||
|
||||
}
|
|
@ -2,23 +2,17 @@ package net.kemitix.thorp.core
|
|||
|
||||
trait ConfigQuery {
|
||||
|
||||
def showVersion(configOptions: Seq[ConfigOption]): Boolean =
|
||||
configOptions.exists {
|
||||
case ConfigOption.Version => true
|
||||
case _ => false
|
||||
}
|
||||
def showVersion(configOptions: ConfigOptions): Boolean =
|
||||
configOptions contains ConfigOption.Version
|
||||
|
||||
def ignoreUserOptions(configOptions: Seq[ConfigOption]): Boolean =
|
||||
configOptions.exists {
|
||||
case ConfigOption.IgnoreUserOptions => true
|
||||
case _ => false
|
||||
}
|
||||
def batchMode(configOptions: ConfigOptions): Boolean =
|
||||
configOptions contains ConfigOption.BatchMode
|
||||
|
||||
def ignoreGlobalOptions(configOptions: Seq[ConfigOption]): Boolean =
|
||||
configOptions.exists {
|
||||
case ConfigOption.IgnoreGlobalOptions => true
|
||||
case _ => false
|
||||
}
|
||||
def ignoreUserOptions(configOptions: ConfigOptions): Boolean =
|
||||
configOptions contains ConfigOption.IgnoreUserOptions
|
||||
|
||||
def ignoreGlobalOptions(configOptions: ConfigOptions): Boolean =
|
||||
configOptions contains ConfigOption.IgnoreGlobalOptions
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ trait ConfigurationBuilder {
|
|||
|
||||
private val defaultConfig: Config = Config(source = pwdFile)
|
||||
|
||||
def buildConfig(priorityOptions: Seq[ConfigOption]): IO[Either[NonEmptyChain[ConfigValidation], Config]] = {
|
||||
def buildConfig(priorityOptions: ConfigOptions): IO[Either[NonEmptyChain[ConfigValidation], Config]] = {
|
||||
val source = findSource(priorityOptions)
|
||||
for {
|
||||
sourceOptions <- sourceOptions(source)
|
||||
|
@ -30,30 +30,30 @@ trait ConfigurationBuilder {
|
|||
} yield validateConfig(config).toEither
|
||||
}
|
||||
|
||||
private def findSource(priorityOptions: Seq[ConfigOption]): File =
|
||||
priorityOptions.foldRight(pwdFile)((co, f) => co match {
|
||||
private def findSource(priorityOptions: ConfigOptions): File =
|
||||
priorityOptions.options.foldRight(pwdFile)((co, f) => co match {
|
||||
case ConfigOption.Source(source) => source.toFile
|
||||
case _ => f
|
||||
})
|
||||
|
||||
private def sourceOptions(source: File): IO[Seq[ConfigOption]] =
|
||||
private def sourceOptions(source: File): IO[ConfigOptions] =
|
||||
readFile(source, ".thorp.conf")
|
||||
|
||||
private def userOptions(higherPriorityOptions: Seq[ConfigOption]): IO[Seq[ConfigOption]] =
|
||||
if (ConfigQuery.ignoreUserOptions(higherPriorityOptions)) IO(List())
|
||||
private def userOptions(higherPriorityOptions: ConfigOptions): IO[ConfigOptions] =
|
||||
if (ConfigQuery.ignoreUserOptions(higherPriorityOptions)) IO(ConfigOptions())
|
||||
else readFile(userHome, ".config/thorp.conf")
|
||||
|
||||
private def globalOptions(higherPriorityOptions: Seq[ConfigOption]): IO[Seq[ConfigOption]] =
|
||||
if (ConfigQuery.ignoreGlobalOptions(higherPriorityOptions)) IO(List())
|
||||
private def globalOptions(higherPriorityOptions: ConfigOptions): IO[ConfigOptions] =
|
||||
if (ConfigQuery.ignoreGlobalOptions(higherPriorityOptions)) IO(ConfigOptions())
|
||||
else parseFile(Paths.get("/etc/thorp.conf"))
|
||||
|
||||
private def userHome = new File(System.getProperty("user.home"))
|
||||
|
||||
private def readFile(source: File, filename: String): IO[Seq[ConfigOption]] =
|
||||
private def readFile(source: File, filename: String): IO[ConfigOptions] =
|
||||
parseFile(source.toPath.resolve(filename))
|
||||
|
||||
private def collateOptions(configOptions: Seq[ConfigOption]): Config =
|
||||
configOptions.foldRight(defaultConfig)((co, c) => co.update(c))
|
||||
private def collateOptions(configOptions: ConfigOptions): Config =
|
||||
configOptions.options.foldRight(defaultConfig)((co, c) => co.update(c))
|
||||
}
|
||||
|
||||
object ConfigurationBuilder extends ConfigurationBuilder
|
||||
|
|
|
@ -8,7 +8,7 @@ import scala.collection.JavaConverters._
|
|||
|
||||
trait ParseConfigFile {
|
||||
|
||||
def parseFile(filename: Path): IO[Seq[ConfigOption]] =
|
||||
def parseFile(filename: Path): IO[ConfigOptions] =
|
||||
readFile(filename).map(ParseConfigLines.parseLines)
|
||||
|
||||
private def readFile(filename: Path) = {
|
||||
|
|
|
@ -7,8 +7,8 @@ import net.kemitix.thorp.core.ConfigOption._
|
|||
|
||||
trait ParseConfigLines {
|
||||
|
||||
def parseLines(lines: List[String]): List[ConfigOption] =
|
||||
lines.flatMap(parseLine)
|
||||
def parseLines(lines: List[String]): ConfigOptions =
|
||||
ConfigOptions(lines.flatMap(parseLine))
|
||||
|
||||
private val pattern = "^\\s*(?<key>\\S*)\\s*=\\s*(?<value>\\S*)\\s*$"
|
||||
private val format = Pattern.compile(pattern)
|
||||
|
|
|
@ -9,12 +9,12 @@ import net.kemitix.thorp.storage.api.{HashService, StorageService}
|
|||
|
||||
trait Synchronise {
|
||||
|
||||
def apply(storageService: StorageService,
|
||||
def createPlan(storageService: StorageService,
|
||||
hashService: HashService,
|
||||
configOptions: Seq[ConfigOption])
|
||||
configOptions: ConfigOptions)
|
||||
(implicit l: Logger): EitherT[IO, List[String], Stream[Action]] =
|
||||
EitherT(ConfigurationBuilder.buildConfig(configOptions))
|
||||
.swap.map(errorMessages).swap
|
||||
.leftMap(errorMessages)
|
||||
.flatMap(config => useValidConfig(storageService, hashService)(config, l))
|
||||
|
||||
def errorMessages(errors: NonEmptyChain[ConfigValidation]): List[String] =
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
package net.kemitix.thorp.core
|
||||
|
||||
import cats.effect.IO
|
||||
import net.kemitix.thorp.domain.StorageQueueEvent
|
||||
import net.kemitix.thorp.domain.{LocalFile, Logger, StorageQueueEvent}
|
||||
|
||||
trait ThorpArchive {
|
||||
|
||||
def update(action: Action): Stream[IO[StorageQueueEvent]]
|
||||
def update(action: Action)(implicit l: Logger): Stream[IO[StorageQueueEvent]]
|
||||
|
||||
def fileUploaded(localFile: LocalFile,
|
||||
batchMode: Boolean)
|
||||
(implicit l: Logger): IO[Unit] =
|
||||
if (batchMode) l.info(s"Uploaded: ${localFile.remoteKey.key}") else IO.unit
|
||||
|
||||
}
|
||||
|
|
|
@ -3,16 +3,19 @@ package net.kemitix.thorp.core
|
|||
import cats.effect.IO
|
||||
import net.kemitix.thorp.core.Action.{DoNothing, ToCopy, ToDelete, ToUpload}
|
||||
import net.kemitix.thorp.domain.StorageQueueEvent.DoNothingQueueEvent
|
||||
import net.kemitix.thorp.domain.{StorageQueueEvent, UploadEventListener}
|
||||
import net.kemitix.thorp.domain.{LocalFile, Logger, StorageQueueEvent, UploadEventListener}
|
||||
import net.kemitix.thorp.storage.api.StorageService
|
||||
|
||||
case class UnversionedMirrorArchive(storageService: StorageService) extends ThorpArchive {
|
||||
override def update(action: Action): Stream[IO[StorageQueueEvent]] =
|
||||
case class UnversionedMirrorArchive(storageService: StorageService,
|
||||
batchMode: Boolean) extends ThorpArchive {
|
||||
override def update(action: Action)
|
||||
(implicit l: Logger): Stream[IO[StorageQueueEvent]] =
|
||||
Stream(
|
||||
action match {
|
||||
case ToUpload(bucket, localFile) =>
|
||||
for {
|
||||
event <- storageService.upload(localFile, bucket, new UploadEventListener(localFile), 1)
|
||||
event <- storageService.upload(localFile, bucket, batchMode, new UploadEventListener(localFile), 1)
|
||||
_ <- fileUploaded(localFile, batchMode)
|
||||
} yield event
|
||||
case ToCopy(bucket, sourceKey, hash, targetKey) =>
|
||||
for {
|
||||
|
@ -29,6 +32,7 @@ case class UnversionedMirrorArchive(storageService: StorageService) extends Thor
|
|||
}
|
||||
|
||||
object UnversionedMirrorArchive {
|
||||
def default(storageService: StorageService): ThorpArchive =
|
||||
new UnversionedMirrorArchive(storageService)
|
||||
def default(storageService: StorageService,
|
||||
batchMode: Boolean): ThorpArchive =
|
||||
new UnversionedMirrorArchive(storageService, batchMode)
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ import org.scalatest.FunSpec
|
|||
class ParseConfigFileTest extends FunSpec {
|
||||
|
||||
private def invoke(filename: Path) = ParseConfigFile.parseFile(filename).unsafeRunSync
|
||||
private val empty = List()
|
||||
private val empty = ConfigOptions()
|
||||
|
||||
describe("parse a missing file") {
|
||||
val filename = Paths.get("/path/to/missing/file")
|
||||
|
@ -29,7 +29,9 @@ class ParseConfigFileTest extends FunSpec {
|
|||
}
|
||||
describe("parse a file with properties") {
|
||||
val filename = Resource(this, "simple-config").toPath
|
||||
val expected = List(ConfigOption.Source(Paths.get("/path/to/source")), ConfigOption.Bucket("bucket-name"))
|
||||
val expected = ConfigOptions(List(
|
||||
ConfigOption.Source(Paths.get("/path/to/source")),
|
||||
ConfigOption.Bucket("bucket-name")))
|
||||
it("should return some options") {
|
||||
assertResult(expected)(invoke(filename))
|
||||
}
|
||||
|
|
|
@ -9,63 +9,63 @@ class ParseConfigLinesTest extends FunSpec {
|
|||
describe("parse single lines") {
|
||||
describe("source") {
|
||||
it("should parse") {
|
||||
val expected = List(ConfigOption.Source(Paths.get("/path/to/source")))
|
||||
val expected = ConfigOptions(List(ConfigOption.Source(Paths.get("/path/to/source"))))
|
||||
val result = ParseConfigLines.parseLines(List("source = /path/to/source"))
|
||||
assertResult(expected)(result)
|
||||
}
|
||||
}
|
||||
describe("bucket") {
|
||||
it("should parse") {
|
||||
val expected = List(ConfigOption.Bucket("bucket-name"))
|
||||
val expected = ConfigOptions(List(ConfigOption.Bucket("bucket-name")))
|
||||
val result = ParseConfigLines.parseLines(List("bucket = bucket-name"))
|
||||
assertResult(expected)(result)
|
||||
}
|
||||
}
|
||||
describe("prefix") {
|
||||
it("should parse") {
|
||||
val expected = List(ConfigOption.Prefix("prefix/to/files"))
|
||||
val expected = ConfigOptions(List(ConfigOption.Prefix("prefix/to/files")))
|
||||
val result = ParseConfigLines.parseLines(List("prefix = prefix/to/files"))
|
||||
assertResult(expected)(result)
|
||||
}
|
||||
}
|
||||
describe("include") {
|
||||
it("should parse") {
|
||||
val expected = List(ConfigOption.Include("path/to/include"))
|
||||
val expected = ConfigOptions(List(ConfigOption.Include("path/to/include")))
|
||||
val result = ParseConfigLines.parseLines(List("include = path/to/include"))
|
||||
assertResult(expected)(result)
|
||||
}
|
||||
}
|
||||
describe("exclude") {
|
||||
it("should parse") {
|
||||
val expected = List(ConfigOption.Exclude("path/to/exclude"))
|
||||
val expected = ConfigOptions(List(ConfigOption.Exclude("path/to/exclude")))
|
||||
val result = ParseConfigLines.parseLines(List("exclude = path/to/exclude"))
|
||||
assertResult(expected)(result)
|
||||
}
|
||||
}
|
||||
describe("debug - true") {
|
||||
it("should parse") {
|
||||
val expected = List(ConfigOption.Debug())
|
||||
val expected = ConfigOptions(List(ConfigOption.Debug()))
|
||||
val result = ParseConfigLines.parseLines(List("debug = true"))
|
||||
assertResult(expected)(result)
|
||||
}
|
||||
}
|
||||
describe("debug - false") {
|
||||
it("should parse") {
|
||||
val expected = List()
|
||||
val expected = ConfigOptions()
|
||||
val result = ParseConfigLines.parseLines(List("debug = false"))
|
||||
assertResult(expected)(result)
|
||||
}
|
||||
}
|
||||
describe("comment line") {
|
||||
it("should be ignored") {
|
||||
val expected = List()
|
||||
val expected = ConfigOptions()
|
||||
val result = ParseConfigLines.parseLines(List("# ignore me"))
|
||||
assertResult(expected)(result)
|
||||
}
|
||||
}
|
||||
describe("unrecognised option") {
|
||||
it("should be ignored") {
|
||||
val expected = List()
|
||||
val expected = ConfigOptions()
|
||||
val result = ParseConfigLines.parseLines(List("unsupported = option"))
|
||||
assertResult(expected)(result)
|
||||
}
|
||||
|
|
|
@ -18,13 +18,13 @@ class SyncSuite
|
|||
|
||||
private val source = Resource(this, "upload")
|
||||
private val prefix = RemoteKey("prefix")
|
||||
private val configOptions = List(
|
||||
private val configOptions = ConfigOptions(List(
|
||||
ConfigOption.Source(source.toPath),
|
||||
ConfigOption.Bucket("bucket"),
|
||||
ConfigOption.Prefix("prefix"),
|
||||
ConfigOption.IgnoreGlobalOptions,
|
||||
ConfigOption.IgnoreUserOptions
|
||||
)
|
||||
))
|
||||
implicit private val logger: Logger = new DummyLogger
|
||||
private val lastModified = LastModified(Instant.now)
|
||||
|
||||
|
@ -50,8 +50,8 @@ class SyncSuite
|
|||
|
||||
def invokeSubject(storageService: StorageService,
|
||||
hashService: HashService,
|
||||
configOptions: List[ConfigOption]): Either[List[String], Stream[Action]] = {
|
||||
Synchronise(storageService, hashService, configOptions).value.unsafeRunSync
|
||||
configOptions: ConfigOptions): Either[List[String], Stream[Action]] = {
|
||||
Synchronise.createPlan(storageService, hashService, configOptions).value.unsafeRunSync
|
||||
}
|
||||
|
||||
describe("when all files should be uploaded") {
|
||||
|
@ -162,6 +162,7 @@ class SyncSuite
|
|||
|
||||
override def upload(localFile: LocalFile,
|
||||
bucket: Bucket,
|
||||
batchMode: Boolean,
|
||||
uploadEventListener: UploadEventListener,
|
||||
tryCount: Int): IO[UploadQueueEvent] =
|
||||
IO.pure(UploadQueueEvent(localFile.remoteKey, localFile.hashes("md5")))
|
||||
|
|
|
@ -6,4 +6,5 @@ final case class Config(bucket: Bucket = Bucket(""),
|
|||
prefix: RemoteKey = RemoteKey(""),
|
||||
filters: List[Filter] = List(),
|
||||
debug: Boolean = false,
|
||||
batchMode: Boolean = false,
|
||||
source: File)
|
||||
|
|
|
@ -13,6 +13,7 @@ trait StorageService {
|
|||
|
||||
def upload(localFile: LocalFile,
|
||||
bucket: Bucket,
|
||||
batchMode: Boolean,
|
||||
uploadEventListener: UploadEventListener,
|
||||
tryCount: Int): IO[StorageQueueEvent]
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ import com.amazonaws.services.s3.model.CopyObjectRequest
|
|||
import net.kemitix.thorp.domain.StorageQueueEvent.CopyQueueEvent
|
||||
import net.kemitix.thorp.domain._
|
||||
|
||||
class S3ClientCopier(amazonS3: AmazonS3) {
|
||||
class Copier(amazonS3: AmazonS3) {
|
||||
|
||||
def copy(bucket: Bucket,
|
||||
sourceKey: RemoteKey,
|
|
@ -6,7 +6,7 @@ import com.amazonaws.services.s3.model.DeleteObjectRequest
|
|||
import net.kemitix.thorp.domain.StorageQueueEvent.DeleteQueueEvent
|
||||
import net.kemitix.thorp.domain.{Bucket, RemoteKey}
|
||||
|
||||
class S3ClientDeleter(amazonS3: AmazonS3) {
|
||||
class Deleter(amazonS3: AmazonS3) {
|
||||
|
||||
def delete(bucket: Bucket,
|
||||
remoteKey: RemoteKey): IO[DeleteQueueEvent] =
|
|
@ -12,7 +12,7 @@ import net.kemitix.thorp.storage.aws.S3ObjectsByKey.byKey
|
|||
import scala.collection.JavaConverters._
|
||||
import scala.util.Try
|
||||
|
||||
class S3ClientObjectLister(amazonS3: AmazonS3) {
|
||||
class Lister(amazonS3: AmazonS3) {
|
||||
|
||||
def listObjects(bucket: Bucket,
|
||||
prefix: RemoteKey): EitherT[IO, String, S3ObjectsData] = {
|
|
@ -12,10 +12,10 @@ class S3StorageService(amazonS3Client: => AmazonS3,
|
|||
amazonS3TransferManager: => TransferManager)
|
||||
extends StorageService {
|
||||
|
||||
lazy val objectLister = new S3ClientObjectLister(amazonS3Client)
|
||||
lazy val copier = new S3ClientCopier(amazonS3Client)
|
||||
lazy val objectLister = new Lister(amazonS3Client)
|
||||
lazy val copier = new Copier(amazonS3Client)
|
||||
lazy val uploader = new Uploader(amazonS3TransferManager)
|
||||
lazy val deleter = new S3ClientDeleter(amazonS3Client)
|
||||
lazy val deleter = new Deleter(amazonS3Client)
|
||||
|
||||
override def listObjects(bucket: Bucket,
|
||||
prefix: RemoteKey): EitherT[IO, String, S3ObjectsData] =
|
||||
|
@ -29,9 +29,10 @@ class S3StorageService(amazonS3Client: => AmazonS3,
|
|||
|
||||
override def upload(localFile: LocalFile,
|
||||
bucket: Bucket,
|
||||
batchMode: Boolean,
|
||||
uploadEventListener: UploadEventListener,
|
||||
tryCount: Int): IO[StorageQueueEvent] =
|
||||
uploader.upload(localFile, bucket, uploadEventListener, 1)
|
||||
uploader.upload(localFile, bucket, batchMode, uploadEventListener, 1)
|
||||
|
||||
override def delete(bucket: Bucket,
|
||||
remoteKey: RemoteKey): IO[StorageQueueEvent] =
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
package net.kemitix.thorp.storage.aws
|
||||
|
||||
import cats.effect.IO
|
||||
import com.amazonaws.services.s3.transfer.{TransferManager, TransferManagerBuilder}
|
||||
import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder}
|
||||
import net.kemitix.thorp.storage.api.StorageService
|
||||
|
@ -11,11 +10,9 @@ object S3StorageServiceBuilder {
|
|||
amazonS3TransferManager: TransferManager): StorageService =
|
||||
new S3StorageService(amazonS3Client, amazonS3TransferManager)
|
||||
|
||||
def defaultStorageService: IO[StorageService] =
|
||||
IO {
|
||||
createService(
|
||||
AmazonS3ClientBuilder.defaultClient,
|
||||
TransferManagerBuilder.defaultTransferManager)
|
||||
}
|
||||
lazy val defaultStorageService: StorageService =
|
||||
createService(
|
||||
AmazonS3ClientBuilder.defaultClient,
|
||||
TransferManagerBuilder.defaultTransferManager)
|
||||
|
||||
}
|
||||
|
|
|
@ -15,10 +15,11 @@ class Uploader(transferManager: => AmazonTransferManager) {
|
|||
|
||||
def upload(localFile: LocalFile,
|
||||
bucket: Bucket,
|
||||
batchMode: Boolean,
|
||||
uploadEventListener: UploadEventListener,
|
||||
tryCount: Int): IO[StorageQueueEvent] =
|
||||
for {
|
||||
upload <- transfer(localFile, bucket, uploadEventListener)
|
||||
upload <- transfer(localFile, bucket, batchMode, uploadEventListener)
|
||||
action = upload match {
|
||||
case Right(r) => UploadQueueEvent(RemoteKey(r.getKey), MD5Hash(r.getETag))
|
||||
case Left(e) => ErrorQueueEvent(localFile.remoteKey, e)
|
||||
|
@ -27,10 +28,11 @@ class Uploader(transferManager: => AmazonTransferManager) {
|
|||
|
||||
private def transfer(localFile: LocalFile,
|
||||
bucket: Bucket,
|
||||
batchMode: Boolean,
|
||||
uploadEventListener: UploadEventListener,
|
||||
): IO[Either[Throwable, UploadResult]] = {
|
||||
val listener: ProgressListener = progressListener(uploadEventListener)
|
||||
val putObjectRequest = request(localFile, bucket, listener)
|
||||
val putObjectRequest = request(localFile, bucket, batchMode, listener)
|
||||
IO {
|
||||
Try(transferManager.upload(putObjectRequest))
|
||||
.map(_.waitForUploadResult)
|
||||
|
@ -38,12 +40,16 @@ class Uploader(transferManager: => AmazonTransferManager) {
|
|||
}
|
||||
}
|
||||
|
||||
private def request(localFile: LocalFile, bucket: Bucket, listener: ProgressListener): PutObjectRequest = {
|
||||
private def request(localFile: LocalFile,
|
||||
bucket: Bucket,
|
||||
batchMode: Boolean,
|
||||
listener: ProgressListener): PutObjectRequest = {
|
||||
val metadata = new ObjectMetadata()
|
||||
localFile.md5base64.foreach(metadata.setContentMD5)
|
||||
new PutObjectRequest(bucket.name, localFile.remoteKey.key, localFile.file)
|
||||
val request = new PutObjectRequest(bucket.name, localFile.remoteKey.key, localFile.file)
|
||||
.withMetadata(metadata)
|
||||
.withGeneralProgressListener(listener)
|
||||
if (batchMode) request
|
||||
else request.withGeneralProgressListener(listener)
|
||||
}
|
||||
|
||||
private def progressListener(uploadEventListener: UploadEventListener) =
|
||||
|
|
|
@ -102,6 +102,8 @@ class StorageServiceSuite
|
|||
Map("md5" -> hash)
|
||||
}
|
||||
|
||||
val batchMode: Boolean = true
|
||||
|
||||
describe("upload") {
|
||||
|
||||
describe("when uploading a file") {
|
||||
|
@ -127,7 +129,7 @@ class StorageServiceSuite
|
|||
pending
|
||||
//FIXME: works okay on its own, but fails when run with others
|
||||
val expected = UploadQueueEvent(remoteKey, Root.hash)
|
||||
val result = storageService.upload(localFile, bucket, uploadEventListener, 1)
|
||||
val result = storageService.upload(localFile, bucket, batchMode, uploadEventListener, 1)
|
||||
assertResult(expected)(result)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,8 @@ class UploaderSuite
|
|||
"md5" -> hash
|
||||
)
|
||||
|
||||
val batchMode: Boolean = true
|
||||
|
||||
describe("S3ClientMultiPartTransferManagerSuite") {
|
||||
describe("upload") {
|
||||
pending
|
||||
|
@ -43,7 +45,7 @@ class UploaderSuite
|
|||
val uploader = new Uploader(amazonS3TransferManager)
|
||||
it("should upload") {
|
||||
val expected = UploadQueueEvent(returnedKey, returnedHash)
|
||||
val result = uploader.upload(bigFile, config.bucket, uploadEventListener, 1)
|
||||
val result = uploader.upload(bigFile, config.bucket, batchMode, uploadEventListener, 1)
|
||||
assertResult(expected)(result)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue