code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package com.google.protobuf
import java.io.IOException
@SerialVersionUID(-1616151763072450476L)
class InvalidProtocolBufferException(description: String)
extends IOException(description)
object InvalidProtocolBufferException {
def truncatedMessage(): InvalidProtocolBufferException = {
new InvalidProtocolBufferException(
"While parsing a protocol message, the input ended unexpectedly " +
"in the middle of a field. This could mean either that the " +
"input has been truncated or that an embedded message " +
"misreported its own length."
)
}
def negativeSize(): InvalidProtocolBufferException = {
new InvalidProtocolBufferException(
"CodedInputStream encountered an embedded string or message " +
"which claimed to have negative size."
)
}
def malformedVarint(): InvalidProtocolBufferException = {
new InvalidProtocolBufferException(
"CodedInputStream encountered a malformed varint."
)
}
def invalidTag(): InvalidProtocolBufferException = {
new InvalidProtocolBufferException(
"Protocol message contained an invalid tag (zero)."
)
}
def invalidEndTag(): InvalidProtocolBufferException = {
new InvalidProtocolBufferException(
"Protocol message end-group tag did not match expected tag."
)
}
def invalidWireType(): InvalidProtocolBufferException = {
new InvalidProtocolBufferException(
"Protocol message tag had invalid wire type."
)
}
def recursionLimitExceeded(): InvalidProtocolBufferException = {
new InvalidProtocolBufferException(
"Protocol message had too many levels of nesting. May be malicious. " +
"Use CodedInputStream.setRecursionLimit() to increase the depth limit."
)
}
def sizeLimitExceeded(): InvalidProtocolBufferException = {
new InvalidProtocolBufferException(
"Protocol message was too large. May be malicious. " +
"Use CodedInputStream.setSizeLimit() to increase the size limit."
)
}
def parseFailure(): InvalidProtocolBufferException = {
new InvalidProtocolBufferException("Failed to parse the message.")
}
def invalidUtf8(): InvalidProtocolBufferException = {
new InvalidProtocolBufferException("Protocol message had invalid UTF-8.")
}
}
| trueaccord/protobuf-scala-runtime | shared/src/main/scala/com/google/protobuf/InvalidProtocolBufferException.scala | Scala | apache-2.0 | 2,294 |
/*
* Part of NDLA image-api
* Copyright (C) 2017 NDLA
*
* See LICENSE
*/
package no.ndla.imageapi.model.api
import java.util.Date
import org.scalatra.swagger.annotations.ApiModel
import org.scalatra.swagger.runtime.annotations.ApiModelProperty
import scala.annotation.meta.field
// format: off
@ApiModel(description = "Meta information for the image")
case class ImageMetaInformationV2(
@(ApiModelProperty @field)(description = "The unique id of the image") id: String,
@(ApiModelProperty @field)(description = "The url to where this information can be found") metaUrl: String,
@(ApiModelProperty @field)(description = "The title for the image") title: ImageTitle,
@(ApiModelProperty @field)(description = "Alternative text for the image") alttext: ImageAltText,
@(ApiModelProperty @field)(description = "The full url to where the image can be downloaded") imageUrl: String,
@(ApiModelProperty @field)(description = "The size of the image in bytes") size: Long,
@(ApiModelProperty @field)(description = "The mimetype of the image") contentType: String,
@(ApiModelProperty @field)(description = "Describes the copyright information for the image") copyright: Copyright,
@(ApiModelProperty @field)(description = "Searchable tags for the image") tags: ImageTag,
@(ApiModelProperty @field)(description = "Searchable caption for the image") caption: ImageCaption,
@(ApiModelProperty @field)(description = "Supported languages for the image title, alt-text, tags and caption.") supportedLanguages: Seq[String],
@(ApiModelProperty @field)(description = "Describes when the image was created") created: Date,
@(ApiModelProperty @field)(description = "Describes who created the image") createdBy: String,
@(ApiModelProperty @field)(description = "Describes if the model has released use of the image", allowableValues = "not-set,yes,no,not-applicable") modelRelease: String,
@(ApiModelProperty @field)(description = "Describes the changes made to the image, only visible to editors") editorNotes: Option[Seq[EditorNote]]
)
| NDLANO/image-api | src/main/scala/no/ndla/imageapi/model/api/ImageMetaInformationV2.scala | Scala | gpl-3.0 | 2,084 |
package uber.nosurge.actors
import akka.actor.{Actor, ActorRef, Props}
import uber.domain.auth.ServerToken
import uber.entities.PriceEstimate
import uber.nosurge.Settings
import uber.nosurge.actors.Message.{CheckPrice, Terminate}
import uber.nosurge.actors.Models.InitialData
import uber.nosurge.actors.PriceChecker.priceDropPercentage
import uber.nosurge.services.RideEstimatesService
import uber.nosurge.util.Util.toSingleDecimal
import scala.concurrent.ExecutionContext.Implicits.global
class PriceChecker(controller: ActorRef, state: InitialData, rideEstimatesService: RideEstimatesService)
(implicit val settings: Settings) extends Actor {
var initialSurgeMultiplier = toSingleDecimal(state.initialSurgeMultiplier)
var initialPriceEstimate = state.initialPrice.estimate
implicit val serverToken: ServerToken = settings.serverToken
override def receive: Receive = {
case CheckPrice =>
val (startLng, startLat) = (state.destination.startLocation.longitude, state.destination.startLocation.latitude)
val (endLng, endLat) = (state.destination.endLocation.longitude, state.destination.endLocation.latitude)
for {
priceEstimates <- rideEstimatesService.fetchPriceEstimates(startLat, startLng, endLat, endLng, None)
} yield {
priceEstimates.foreach {
_.prices
.find(_.display_name.toLowerCase == state.uberType.toLowerCase)
.foreach(notify)
}
}
}
private def notify(priceEstimate: PriceEstimate): Unit = {
if (priceEstimate.surge_multiplier.isEmpty || priceEstimate.surge_multiplier.exists(_ <= 1.0)) {
controller ! Message.surgeNotActiveAnymore(priceEstimate)
controller ! Terminate
} else if (priceEstimate.surge_multiplier.exists(s => toSingleDecimal(initialSurgeMultiplier * priceDropPercentage.toFloat) >= toSingleDecimal(s))) {
val currentSurge = toSingleDecimal(priceEstimate.surge_multiplier.get)
initialSurgeMultiplier = currentSurge
initialPriceEstimate = priceEstimate.estimate
controller ! Message.priceDropped(priceEstimate, currentSurge)
}
}
}
object PriceChecker {
val priceDropPercentage: Float = 0.85.toFloat
def props(controller: ActorRef, initialData: InitialData, ridersService: RideEstimatesService)(implicit settings: Settings) =
Props(new PriceChecker(controller, initialData, ridersService))
}
| allantl/uber-nosurge-notifications | backend/src/main/scala/uber/nosurge/actors/PriceChecker.scala | Scala | apache-2.0 | 2,406 |
package definiti.common.ast
case class ParameterDefinition(
name: String,
typeReference: AbstractTypeReference,
location: Location
)
| definiti/definiti-core | src/main/scala/definiti/common/ast/ParameterDefinition.scala | Scala | mit | 140 |
// Scala provides extra special conveniences
// for converting methods ("functions") to Functions
object Foo {
def isGood(x: Int): Boolean = { x % 2 == 0 }
}
val predicate: Int => Boolean = x => Foo.isGood(x)
| agconti/scala-school | 04-functions-as-values/slides/slide058.scala | Scala | mit | 217 |
package es.weso.shex.validation
import es.weso.shex.ManifestRunner
import org.scalatest._
import es.weso.manifest._
import com.typesafe.config._
import util._
import es.weso.utils.FileUtils._
class AllFromManifest extends RDF2Manifest
with FunSpecLike
with Matchers
with TryValues
with ManifestRunner {
val conf: Config = ConfigFactory.load()
val validationFolder = conf.getString("validationFolder")
val manifestFile = validationFolder + "manifest.ttl"
val base = filePath2URI(validationFolder)
describe("Running tests folder") {
describe(s"Can read and execute tests in $manifestFile with base $base") {
val maybeManifest = RDF2Manifest.read(manifestFile,base)
maybeManifest match {
case Success(manifest) => {
runTests(manifest,base)
// runTestByName(manifest,base,"1literalFractiondigits_pass")
}
case Failure(e) => Assertions.fail("Exception reading manifest file (" + manifestFile + "): " + e.getMessage)
}
}
}
}
| labra/ShExcala | src/compat/scala/es/weso/shex/validation/AllFromManifest.scala | Scala | mit | 1,017 |
package com.github.diegopacheco.scala3.playground.features
@main def MatchTypesMain():Unit = {
type Elem[X] = X match
case String => Char
case Array[t] => t
case Iterable[t] => t
val c:Elem[String] = 'a'
println(c)
val arr:Elem[Array[Int]] = 1
println(arr)
}
| diegopacheco/scala-playground | scala-3-playground/scala-3-playground/src/main/scala/com/github/diegopacheco/scala3/playground/features/MatchTypesMain.scala | Scala | unlicense | 290 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk
/**
* Silk configuration.
*/
package object config | fusepoolP3/p3-silk | silk-core/src/main/scala/de/fuberlin/wiwiss/silk/config/package.scala | Scala | apache-2.0 | 650 |
package org.bitcoins.testkitcore.gen
import org.bitcoins.core.number._
import org.bitcoins.core.protocol.{BigSizeUInt, CompactSizeUInt}
import org.bitcoins.core.script.constant.ScriptNumber
import org.bitcoins.core.util.NumberUtil
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Gen
import scodec.bits.{BitVector, ByteVector}
/** Created by chris on 6/16/16.
*/
trait NumberGenerator {
def positiveShort: Gen[Short] = {
Gen.chooseNum[Short](0, Short.MaxValue)
}
/** Creates a generator that generates positive long numbers */
def positiveLongs: Gen[Long] = Gen.choose(0, Long.MaxValue)
/** Integers between 0 and Int.MaxValue
*/
val positiveInts: Gen[Int] = Gen.choose(0, Int.MaxValue)
/** Integers between Int.MinValue and -1
*/
val negativeInts: Gen[Int] = Gen.choose(Int.MinValue, -1)
/** Random integers
*/
val ints: Gen[Int] = Gen.choose(Int.MinValue, Int.MaxValue)
/** Creates a generator for positive longs without the number zero */
def positiveLongsNoZero: Gen[Long] = Gen.choose(1, Long.MaxValue)
/** Creates a number generator that generates negative long numbers */
def negativeLongs: Gen[Long] = Gen.choose(Long.MinValue, -1)
def uInt5: Gen[UInt5] = Gen.choose(0, 31).map(n => UInt5(n))
def uInt5s: Gen[Seq[UInt5]] = Gen.listOf(uInt5)
def uInt8: Gen[UInt8] = Gen.choose(0, 255).map(n => UInt8(n.toShort))
def uInt8s: Gen[Seq[UInt8]] = Gen.listOf(uInt8)
def uInt16: Gen[UInt16] = Gen.choose(0, 65535).map(UInt16(_))
/** Generates a number in the range 0 <= x <= 2 ^^32 - 1
* then wraps it in a UInt32
*/
def uInt32s: Gen[UInt32] =
Gen.choose(0L, (NumberUtil.pow2(32) - 1).toLong).map(UInt32(_))
/** Chooses a BigInt in the ranges of 0 <= bigInt < 2^^64 */
def bigInts: Gen[BigInt] =
Gen
.chooseNum(Long.MinValue, Long.MaxValue)
.map(x => BigInt(x) + BigInt(2).pow(63))
def positiveBigInts: Gen[BigInt] = bigInts.filter(_ >= 0)
def bigIntsUInt64Range: Gen[BigInt] =
positiveBigInts.filter(_ < (BigInt(1) << 64))
/** Generates a number in the range 0 <= x < 2^^64
* then wraps it in a UInt64
*/
def uInt64s: Gen[UInt64] = uInt64
def uInt64: Gen[UInt64] =
for {
bigInt <- bigIntsUInt64Range
} yield UInt64(bigInt)
def int32s: Gen[Int32] =
Gen.choose(Int32.min.toLong, Int32.max.toLong).map(Int32(_))
def int64s: Gen[Int64] =
Gen.choose(Int64.min.toLong, Int64.max.toLong).map(Int64(_))
def scriptNumbers: Gen[ScriptNumber] =
Gen.choose(Int64.min.toLong, Int64.max.toLong).map(ScriptNumber(_))
/** The policy bounds for nTimeLock fields (see TxBuilder) */
def timeLockScriptNumbers: Gen[ScriptNumber] =
Gen.choose(1L, UInt32.max.toLong - 1L).map(ScriptNumber(_))
def positiveScriptNumbers: Gen[ScriptNumber] =
Gen.choose(0L, Int64.max.toLong).map(ScriptNumber(_))
def compactSizeUInts: Gen[CompactSizeUInt] = uInt64s.map(CompactSizeUInt(_))
def bigSizeUInt: Gen[BigSizeUInt] = uInt64.map(BigSizeUInt.apply)
/** Generates an arbitrary [[scala.Byte Byte]] in Scala */
def byte: Gen[Byte] = arbitrary[Byte]
/** Generates an arbitrary [[scodec.bits.ByteVector ByteVector]] */
def bytevector: Gen[ByteVector] = Gen.listOf(byte).map(ByteVector(_))
def bytevector(length: Int): Gen[ByteVector] =
Gen.listOfN(length, byte).map(ByteVector(_))
/** Generates a 100 byte sequence */
def bytes: Gen[List[Byte]] =
for {
num <- Gen.choose(0, 100)
b <- bytes(num)
} yield b
/** Generates the number of bytes specified by num
* @param num
* @return
*/
def bytes(num: Int): Gen[List[Byte]] = Gen.listOfN(num, byte)
/** Generates a random boolean */
def bool: Gen[Boolean] =
for {
num <- Gen.choose(0, 1)
} yield num == 1
/** Generates a bit vector */
def bitVector: Gen[BitVector] =
for {
n <- Gen.choose(0, 100)
vector <- Gen.listOfN(n, bool)
} yield BitVector.bits(vector)
/** Generates a random GCS P parameter.
*
* Bit parameter for GCS, cannot be more than 32 as we will have a number too large for a UInt64.
* @see [[https://github.com/Roasbeef/btcutil/blob/b5d74480bb5b02a15a9266cbeae37ecf9dd6ffca/gcs/gcs.go#L67]]
*/
def genP: Gen[UInt8] = {
Gen.choose(0, 32).map(UInt8(_))
}
}
object NumberGenerator extends NumberGenerator
| bitcoin-s/bitcoin-s | testkit-core/src/main/scala/org/bitcoins/testkitcore/gen/NumberGenerator.scala | Scala | mit | 4,362 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import io.truthencode.ddo.model.classes.HeroicCharacterClass
import io.truthencode.ddo.support.naming.{DisplayName, FriendlyDisplay}
import io.truthencode.ddo.support.requisite.GrantsToClass
/**
* Created by adarr on 3/19/2017.
*/
trait EnergyResistanceFire extends EnergyResistance with FriendlyDisplay with GrantsToClass {
self: DisplayName =>
override def grantToClass: Seq[(HeroicCharacterClass, Int)] = fvsMap
private def fvsMap = (5 to 15 by 5).map((HeroicCharacterClass.FavoredSoul, _))
/**
* @inheritdoc
*/
override protected def nameSource: String = "Fire"
}
| adarro/ddo-calc | subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/EnergyResistanceFire.scala | Scala | apache-2.0 | 1,310 |
package breeze.optimize.proximal
import breeze.linalg.{DenseVector, DenseMatrix}
import breeze.optimize.DiffFunction
import breeze.stats.distributions.Rand
/**
* @author debasish83
*/
object LogisticGenerator {
case class Cost(data: DenseMatrix[Double],
labels: DenseVector[Double]) extends DiffFunction[DenseVector[Double]] {
def calculate(x: DenseVector[Double]) = {
val cumGradient = DenseVector.zeros[Double](x.length)
var cumLoss = 0.0
var i = 0
while (i < data.rows) {
val brzData = data(i, ::).t
val margin: Double = -1.0 * x.dot(brzData)
val gradientMultiplier = (1.0 / (1.0 + math.exp(margin))) - labels(i)
val gradient = brzData * gradientMultiplier
val loss =
if (labels(i) > 0) {
math.log1p(math.exp(margin)) // log1p is log(1+p) but more accurate for small p
} else {
math.log1p(math.exp(margin)) - margin
}
cumGradient += gradient
cumLoss += loss
i = i + 1
}
(cumLoss, cumGradient)
}
}
def apply(ndim: Int): DiffFunction[DenseVector[Double]] = {
val rand = Rand.gaussian(0, 1)
val data = DenseMatrix.rand[Double](ndim, ndim, rand)
val labels = DenseVector.rand[Double](ndim, rand).map { x => if (x > 0.5) 1.0 else 0.0}
Cost(data, labels)
}
} | wstcpyt/breeze | math/src/main/scala/breeze/optimize/proximal/LogisticGenerator.scala | Scala | apache-2.0 | 1,366 |
package com.twitter.diffy
import com.google.inject.Stage
import com.twitter.finatra.http.test.EmbeddedHttpServer
import com.twitter.inject.Test
class StartupFeatureTest extends Test {
val server = new EmbeddedHttpServer(
stage = Stage.PRODUCTION,
twitterServer = new MainService {
},
extraArgs = Seq(
"-proxy.port=:0",
"-candidate=localhost:80",
"-master.primary=localhost:80",
"-master.secondary=localhost:80",
"-service.protocol=http"))
"verify startup" in {
server.assertHealthy()
}
}
| ljbx/diffy | src/test/scala/com/twitter/diffy/StartupFeatureTest.scala | Scala | apache-2.0 | 548 |
package sisdn.service
import akka.actor.{ActorSystem, Props}
import akka.http.scaladsl.model.headers.HttpCredentials
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.model.HttpMethods._
import akka.http.scaladsl.server._
import akka.stream.ActorMaterializer
import akka.stream.javadsl.Sink
import com.typesafe.config.ConfigFactory
import akka.persistence.query.{EventEnvelope, PersistenceQuery}
import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal
import sisdn.admin._
import slick.driver.MySQLDriver.api._
import sisdn.admin.AdminQueryRoute
trait ServiceRoute extends Directives with Authentication {
implicit val system = ActorSystem()
implicit val executor = system.dispatcher
implicit val materializer = ActorMaterializer()
val allowedOrigins: String
lazy val allowedOrigin = HttpOrigin(allowedOrigins)
val router = system.actorOf(Props(classOf[AdminRouter]))
val admin = new AdminRoutes(router)
val innerRoutes = admin.route
implicit def sisdnRejectionHandler =
RejectionHandler.newBuilder()
.handle { case AuthorizationFailedRejection =>
complete((Forbidden, "ΨΊΩΨ± Ω
Ψ³Ω
ΩΨ Ψ¨Ψ§Ψ¬Ψ±Ψ§Ψ‘ Ψ§ΩΨΉΩ
ΩΩΨ© Ψ§ΩΩ
Ψ·ΩΩΨ¨Ψ©"))
}.result
private def addAccessControlHeaders = mapResponseHeaders { headers =>
`Access-Control-Allow-Origin`(allowedOrigin) +:
`Access-Control-Allow-Headers`("Authorization", "Content-Type",
"pragma", "cache-control", "X-Requested-With") +: headers
}
private def preflightRequestHandler: Route = options {
complete(HttpResponse(200).withHeaders(
`Access-Control-Allow-Methods`(OPTIONS, POST, PUT, GET, DELETE)
)
)
}
def corsHandler(r: Route) = addAccessControlHeaders {
preflightRequestHandler ~ r
}
val serviceRoute = corsHandler {
handleRejections(sisdnRejectionHandler) {
extractCredentials { bt: Option[HttpCredentials] =>
provide(userExtractor(bt.map(_.token()))) { user =>
pathPrefix("api") {
authorize(user.isDefined) {
innerRoutes(user.get)
}
} ~ path("") {
getFromResource("dist/index.html")
} ~
getFromResourceDirectory("dist")
}
}
}
}
}
object ServiceEndpoint extends ServiceRoute with AdminQuery {
val config = ConfigFactory.load()
val secret = config.getString("sisdn.key")
val host = scala.util.Properties.envOrElse("OPENSHIFT_SCALA_IP", "localhost")
val port = scala.util.Properties.envOrElse("OPENSHIFT_SCALA_PORT", "8888").toInt
override val allowedOrigins = config.getString("sisdn.cors.allowedOrigins")
val appEnv = config.getString("sisdn.appEnv")
def main(args: Array[String]) {
val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](
LeveldbReadJournal.Identifier)
db.run(streamOffsets.result).map{ result => result.map{ os =>
queries.eventsByPersistenceId (os._1, os._2, Long.MaxValue)
.mapAsync (1) { writeToDB }
.runWith (Sink.ignore)
}
}
Http().bindAndHandle(serviceRoute, host, port)
}
}
| mhashimm/backend | src/main/scala/sisdn/service/ServiceRoute.scala | Scala | agpl-3.0 | 3,232 |
/*******************************************************************************
Copyright (c) 2013-2014, S-Core, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
***************************************************************************** */
package kr.ac.kaist.jsaf.analysis.typing.models.jquery
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.typing.{AccessHelper => AH, _}
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
import kr.ac.kaist.jsaf.analysis.typing.models.AbsBuiltinFunc
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.cfg.{CFGExpr, CFG, InternalError}
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
import kr.ac.kaist.jsaf.analysis.typing.models.AbsBuiltinFunc
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
object JQueryData extends ModelData {
private val jquery_expando = "jQuery00000000000000000000"
val CacheLoc = newSystemLoc("jQueryCache", Recent)
val CacheDataLoc = newSystemLoc("jQueryCacheData", Old)
private val prop_const: List[(String, AbsProperty)] = List(
("data", AbsBuiltinFunc("jQuery.data", 4)),
("dequeue", AbsBuiltinFunc("jQuery.dequeue", 2)),
("hasData", AbsBuiltinFunc("jQuery.hasData", 1)),
("queue", AbsBuiltinFunc("jQuery.queue", 3)),
("removeData", AbsBuiltinFunc("jQuery.removeData", 3)),
// property
("cache", AbsConstValue(PropValue(ObjectValue(CacheLoc, T, T, T)))),
("guid", AbsConstValue(PropValue(ObjectValue(UInt, T, T, T))))
)
private val prop_proto: List[(String, AbsProperty)] = List(
("cleareQueue", AbsBuiltinFunc("jQuery.prototype.cleareQueue", 1)),
("data", AbsBuiltinFunc("jQuery.prototype.data", 2)),
("dequeue", AbsBuiltinFunc("jQuery.prototype.dequeue", 1)),
("queue", AbsBuiltinFunc("jQuery.prototype.queue", 2)),
("removeData", AbsBuiltinFunc("jQuery.prototype.removeData", 1))
)
private val prop_cache: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(T))),
// weak update
(Str_default_number, AbsConstValue(PropValue(ObjectValue(CacheDataLoc, T, T, T))))
)
private val prop_cache_data: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(T)))
)
def getInitList(): List[(Loc, List[(String, AbsProperty)])] = List(
(JQuery.ConstLoc, prop_const), (JQuery.ProtoLoc, prop_proto),
(CacheLoc, prop_cache), (CacheDataLoc, prop_cache_data)
)
def getSemanticMap(): Map[String, SemanticFun] = {
Map(
"jQuery.prototype.data" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* new addr */
val lset_env = h(SinglePureLocalLoc)("@env")._2._2
val set_addr = lset_env.foldLeft[Set[Address]](Set())((a, l) => a + locToAddr(l))
if (set_addr.size > 1) throw new InternalError("API heap allocation: Size of env address is " + set_addr.size)
val addr_env = (cp._1._1, set_addr.head)
val addr1 = cfg.getAPIAddress(addr_env, 0)
/* new loc */
val l_ret = addrToLoc(addr1, Recent)
/* jQuery object */
val lset_this = h(SinglePureLocalLoc)("@this")._2._2
/* arguments */
var v_key = getArgValue(h, ctx, args, "0")
var v_value = getArgValue(h, ctx, args, "1")
// no arguements
val (h_ret1, ctx_ret1, v_ret1) =
if (UndefTop <= v_key._1._1) {
val v_len = lset_this.foldLeft(ValueBot)((v, l) => v + Helper.Proto(h, l, AbsString.alpha("length")))
val (h_1, ctx_1, v_1) =
if (BoolTrue </ Helper.toBoolean(v_len)) {
val (h_1, ctx_1) = Helper.Oldify(h, ctx, addr1)
val h_2 = h_1.update(l_ret, h_1(CacheDataLoc))
(h_2, ctx_1, Value(l_ret))
}
else
(HeapBot, ContextBot, ValueBot)
val v_2 =
if (BoolFalse </ Helper.toBoolean(v_len))
Value(NullTop)
else
ValueBot
(h_1, ctx_1, v_1 + v_2)
}
else
(HeapBot, ContextBot, ValueBot)
// 1st argument is object
val (h_ret2, v_ret2) =
if (!v_key._2.isEmpty) {
val _h = v_key._2.foldLeft(h)((_h, l) => {
val _h1 = _h(l).getProps.foldLeft(_h)((hh, prop) =>
Helper.PropStore(hh, CacheDataLoc, AbsString.alpha(prop), hh(l)(prop)._1._1))
val o_data = _h1(CacheDataLoc)
val v_def_num = _h1(l)(Str_default_number)
val v_def_oth = _h1(l)(Str_default_other)
val o_data1 = o_data.update(NumStr, v_def_num + o_data(NumStr))
.update(OtherStr, v_def_oth + o_data(OtherStr))
_h1.update(CacheDataLoc, o_data1)
})
(_h, Value(lset_this))
}
else
(HeapBot, ValueBot)
// one argument, 1st argument ia string
val (h_ret3, v_ret3) =
if (v_key._1._5 </ StrBot && UndefTop <= v_value._1._1)
(h, Helper.Proto(h, CacheDataLoc, v_key._1._5))
else
(HeapBot, ValueBot)
// two arguments
val (h_ret4, v_ret4) =
if (v_key._1._5 </ StrBot && v_value </ ValueBot)
(Helper.PropStore(h, CacheDataLoc, v_key._1._5, v_value), Value(lset_this))
else
(HeapBot, ValueBot)
val h_ret = h_ret1 + h_ret2 + h_ret3 + h_ret4
val v_ret = v_ret1 + v_ret2 + v_ret3 + v_ret4
val ctx_ret = ctx_ret1 + ctx
if (v_ret </ ValueBot)
((Helper.ReturnStore(h_ret, v_ret), ctx_ret), (he, ctxe))
else
((HeapBot, ContextBot), (he, ctxe))
}),
("jQuery.prototype.removeData" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* jQuery object */
val lset_this = h(SinglePureLocalLoc)("@this")._2._2
// do nothing
((Helper.ReturnStore(h, Value(lset_this)), ctx), (he, ctxe))
}))
)
}
def getPreSemanticMap(): Map[String, SemanticFun] = {
Map()
}
def getDefMap(): Map[String, AccessFun] = {
Map()
}
def getUseMap(): Map[String, AccessFun] = {
Map()
}
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/jquery/JQueryData.scala | Scala | bsd-3-clause | 7,292 |
package rl
package expand
import org.jboss.netty.channel.{ChannelFutureListener, ChannelHandlerContext}
import org.jboss.netty.handler.codec.http._
import org.jboss.netty.handler.codec.http.HttpHeaders.Names
import org.jboss.netty.buffer.ChannelBuffers
import akka.dispatch.Await
import akka.util.duration._
import org.specs2.time.NoTimeConversions
class UrlExpanderspec extends org.specs2.mutable.Specification with NoTimeConversions {
// def nettyContext: NettyHttpServerContext = new NettyHttpServerContext {
// def handleRequest(ctx: ChannelHandlerContext, req: HttpRequest) {
//
// }
// }
// override def intToRichLong(v: Int) = super.intToRichLong(v)
// override def longToRichLong(v: Long) = super.longToRichLong(v)
sequential
"A UrlExpander" should {
"expand urls that redirect with a 302 status" in {
var count = 0
val server = new NettyHttpServerContext {
def handleRequest(ctx: ChannelHandlerContext, req: HttpRequest) {
if (count < 3) {
count += 1
val resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.FOUND)
resp.setHeader(Names.CONTENT_TYPE, "text/plain")
resp.setHeader(HttpHeaders.Names.LOCATION, "http://127.0.0.1:"+port+"/"+count)
resp.setContent(ChannelBuffers.wrappedBuffer("".getBytes("UTF-8")))
val future = ctx.getChannel.write(resp)
future addListener ChannelFutureListener.CLOSE
} else {
writeResponse(ctx, "done")
}
}
}
server.start
val expand = UrlExpander()
try {
Await.result(expand(Uri("http://127.0.0.1:"+server.port+"/")), 5 seconds) must_== "http://127.0.0.1:"+server.port+"/3"
count must be_==(3)
} finally {
server.stop
expand.stop()
}
}
"expand urls that redirect with a 301 status" in {
var count = 0
val server = new NettyHttpServerContext {
def handleRequest(ctx: ChannelHandlerContext, req: HttpRequest) {
if (count < 3) {
count += 1
val resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.MOVED_PERMANENTLY)
resp.setHeader(Names.CONTENT_TYPE, "text/plain")
resp.setHeader(HttpHeaders.Names.LOCATION, "http://127.0.0.1:"+port+"/"+count)
resp.setContent(ChannelBuffers.wrappedBuffer("".getBytes("UTF-8")))
val future = ctx.getChannel.write(resp)
future addListener ChannelFutureListener.CLOSE
} else {
writeResponse(ctx, "done")
}
}
}
server.start
val expand = UrlExpander()
try {
Await.result(expand(Uri("http://127.0.0.1:"+server.port+"/")), 5 seconds) must_== "http://127.0.0.1:"+server.port+"/3"
count must be_==(3)
} finally {
server.stop
expand.stop()
}
}
"throw an error when the max redirects are done" in {
var count = 0
val server = new NettyHttpServerContext {
def handleRequest(ctx: ChannelHandlerContext, req: HttpRequest) {
if (count < 3) {
count += 1
val resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.MOVED_PERMANENTLY)
resp.setHeader(Names.CONTENT_TYPE, "text/plain")
resp.setHeader(HttpHeaders.Names.LOCATION, "http://127.0.0.1:"+port+"/"+count)
resp.setContent(ChannelBuffers.wrappedBuffer("".getBytes("UTF-8")))
val future = ctx.getChannel.write(resp)
future addListener ChannelFutureListener.CLOSE
} else {
writeResponse(ctx, "done")
}
}
}
server.start
val expand = UrlExpander(ExpanderConfig(maximumResolveSteps = 1))
try {
Await.result(expand(Uri("http://127.0.0.1:"+server.port+"/")), 5 seconds) must throwA[UrlExpander.RedirectsExhausted]
count must_== 2
} finally {
server.stop
expand.stop()
}
}
"not expand urls that return a 200" in {
val server = new NettyHttpServerContext {
def handleRequest(ctx: ChannelHandlerContext, req: HttpRequest) {
writeResponse(ctx, "done")
}
}
server.start
val expand = UrlExpander()
try {
Await.result(expand(Uri("http://127.0.0.1:"+server.port+"/")), 5 seconds) must_== "http://127.0.0.1:"+server.port
} finally {
server.stop
expand.stop()
}
}
//
// "add the http scheme if no scheme provided" in {
// val expand = UrlExpander()
// try {
// Await.result(expand(Uri("www.dressaday.com/2012/11/01/autumn-9929/")), 5 seconds) must_== Uri("http://www.dressaday.com/2012/11/01/autumn-9929/")
// } finally {
// expand.stop()
// }
// }
"expand urls that have invalid chars in them" in {
val expand = UrlExpander()
try {
Await.result(expand(Uri("http://bit.ly/ZvTH4o")), 5 seconds) must_== "http://theweek.com/article/index/242212%20/why-the-associated-press-is-dropping-il%20legal-immigrant-from-its-lexicon"
} finally {
expand.stop()
}
}
"not expand dressaday.com urls that return a 200" in {
val expand = UrlExpander()
try {
Await.result(expand(Uri("http://www.dressaday.com/2012/11/01/autumn-9929/")), 5 seconds) must_== "http://dressaday.com/2012/11/01/autumn-9929/"
} finally {
expand.stop()
}
}
}
} | scalatra/rl | expand/src/test/scala/rl/expand/UrlExpanderspec.scala | Scala | mit | 5,520 |
/* *\\
** _____ __ _____ __ ____ FieldKit **
** / ___/ / / /____/ / / / \\ (c) 2009, field **
** / ___/ /_/ /____/ / /__ / / / http://www.field.io **
** /_/ /____/ /____/ /_____/ **
\\* */
/* created April 22, 2009 */
package field.kit.p5
import field.kit._
/**
* Companion object to class <code>Recorder</code>
*/
object Recorder {
object FileFormat extends Enumeration {
val TGA = Value("tga")
val PNG = Value("png")
val JPG = Value("jpg")
}
object State extends Enumeration {
val SCREENSHOT = Value
val SEQUENCE = Value
val OFF = Value
}
}
/**
* provides screenshot, sequence and tile recording for a renderer
*
* @see <a href="https://dev.field.io/hg/opensource/libAGL/raw-file/9d7bd472280f/src/field/lib/agl/util/recorder/Recorder.scala">libAGL Recorder</a>
* @author Marcus Wendt
*/
class Recorder(val sketch:Sketch) extends Logger {
import java.io.File
import java.nio.ByteBuffer
import java.awt.image.BufferedImage
import javax.media.opengl.GL
import javax.media.opengl.GLException
import com.sun.opengl.util.TGAWriter
import field.kit.gl.util.Compressor
import field.kit.math.Dim2
// -- Configuration --------------------------------------------------------
var name = sketch.logName
var baseDir = "./recordings"
var alpha = false
var fileFormat = Recorder.FileFormat.JPG
/** the target image dimensions */
protected var image = new Dim2[Int]
// internal
private var awtImage:BufferedImage = null
private var buffer:ByteBuffer = null
private var state = Recorder.State.OFF
private var sequenceBasedir = "./"
private var sequenceFrame = 0
// -- tile renderer --------------------------------------------------------
private var useTiler = false
var tiler:Tiler = null
private var tga:TGAWriter = null
private var tmpFile:File = null
// Init
size(sketch.width, sketch.height)
/**
* Sets the target image dimensions
* (typically called via PApplet registered event callback)
*/
def size(width:Int, height:Int) {
image := (width, height)
// check if we need to render the image as tiles
useTiler = width > sketch.width || height > sketch.height
}
/**
* Should be called before anything is drawn to the screen
* (typically called via PApplet registered event callback)
*/
def pre {
if(!isRecording) return
if(useTiler)
tiler.pre
}
/**
* Saves the current frame if the recording is finished.
* (typically called via PApplet registered event callback)
*/
def post {
if(!isRecording) return
val isFrameFinished =
if(useTiler) tiler.post else true
if(isFrameFinished)
save
}
/**
* Writes the finished frame to a file using the <code>Compressor</code> util.
*/
protected def save {
import java.io.IOException
import com.sun.opengl.util.Screenshot
var width = sketch.width
var height = sketch.height
val suffix = "."+ fileFormat
// prepare file & folders
val file = state match {
case Recorder.State.SCREENSHOT =>
val f = new File(baseDir +"/"+ name + "_" + Timer() + suffix)
info("file "+ f)
f.getParentFile.mkdirs
f
case Recorder.State.SEQUENCE =>
// create parent folder for the
if(sequenceFrame == 0) {
val tmp = new File(name)
sequenceBasedir = baseDir + "/" + Timer()
new File(sequenceBasedir).mkdirs
name = tmp.getName
}
val f = new File(sequenceBasedir + "/" + name +"."+ sequenceFrame + suffix)
sequenceFrame += 1
f
}
// save the file
try {
fileFormat match {
case Recorder.FileFormat.TGA =>
Screenshot.writeToTargaFile(file, image.width, image.height, alpha)
case _ =>
// the tiler should already have filled the buffer
if(!useTiler) {
// capture image into buffer
val readbackType = if(alpha) GL.GL_ABGR_EXT else GL.GL_BGR
import javax.media.opengl.GLContext
val gl = GLContext.getCurrent.getGL
gl.glReadPixels(0, 0, awtImage.getWidth, awtImage.getHeight, readbackType, GL.GL_UNSIGNED_BYTE, buffer)
}
// compress buffer
Compressor(awtImage, fileFormat.toString, file)
}
} catch {
case e:GLException => warn(e)
case e:IOException => warn(e)
}
// check if we're done and how to proceed
state match {
case Recorder.State.SCREENSHOT => stop
case Recorder.State.SEQUENCE => initBuffers
case _ => info("state", state)
}
}
/**
* Initializes the <code>BufferedImage</code> and its <code>ByteBuffer</code>
*/
protected def initBuffers {
info("init", image.width, image.height, fileFormat)
// check if we need to reinitialize the image and buffer
if(fileFormat != Recorder.FileFormat.TGA) {
val ib = Compressor.init(image.width, image.height, alpha)
awtImage = ib._1
buffer = ib._2
buffer.clear
}
// init tiler
if(useTiler) {
var dataFormat = if(alpha) GL.GL_ABGR_EXT else GL.GL_BGR
if(fileFormat == Recorder.FileFormat.TGA) {
if(alpha) dataFormat = GL.GL_BGRA
if(tmpFile == null)
tmpFile = new File("tiler_tmp.tga")
tga = new TGAWriter
tga.open(tmpFile, image.width, image.height, alpha)
buffer = tga.getImageData
}
if(tiler == null)
tiler = new Tiler(this)
tiler.init(image.width, image.height, buffer, dataFormat)
}
}
def isRecording = state != Recorder.State.OFF
def stop {
state = Recorder.State.OFF
}
def screenshot = {
initBuffers
state = Recorder.State.SCREENSHOT
}
def sequence {
if(isRecording) {
info("sequence recording stopped.")
stop
} else {
info("starting sequence recording...")
initBuffers
state = Recorder.State.SEQUENCE
sequenceFrame = 0
}
}
}
| field/FieldKit.scala | src.p5/field/kit/p5/Recorder.scala | Scala | lgpl-3.0 | 5,924 |
package chapter1
import org.scalatest.{FreeSpec, Matchers}
class Question2Spec extends FreeSpec with Matchers {
import Question2._
"Should be reversed" in {
reverse("abc") shouldEqual "cba"
reverse("abcdfe") shouldBe "efdcba"
}
}
| alexandrnikitin/algorithm-sandbox | scala/src/test/scala/chapter1/Question2Spec.scala | Scala | mit | 246 |
package com.twitter.finatra.kafka.test.utils
object ThreadUtils {
def fork(func: => Unit): Unit = {
new Thread {
override def run(): Unit = {
func
}
}.start()
}
}
| twitter/finatra | kafka/src/test/scala/com/twitter/finatra/kafka/test/utils/ThreadUtils.scala | Scala | apache-2.0 | 198 |
package com.twitter.finagle
import com.twitter.conversions.time._
import com.twitter.finagle.util.DefaultTimer
import com.twitter.util.{Await, Future}
import scala.math.Ordering
import scala.util.control.NoStackTrace
/**
* Status tells the condition of a networked endpoint. They are used
* to indicate the health of [[Service]], [[ServiceFactory]], and of
* [[transport.Transport]].
*
* Object [[Status$]] contains the status definitions.
*/
sealed trait Status
/**
* Define valid [[Status!]] values. They are, in order from
* most to least healthy:
*
* - Open
* - Busy
* - Closed
*
* (An [[scala.math.Ordering]] is defined in these terms.)
*/
object Status {
private implicit val timer = DefaultTimer.twitter
class ClosedException
extends Exception("Status was Closed; expected Open")
with NoStackTrace
implicit val StatusOrdering: Ordering[Status] = Ordering.by({
case Open => 3
case Busy => 2
case Closed => 1
})
/**
* A composite status indicating the least healthy of the two.
*/
def worst(left: Status, right: Status): Status =
StatusOrdering.min(left, right)
/**
* A composite status indicating the most healthy of the two.
*/
def best(left: Status, right: Status): Status =
StatusOrdering.max(left, right)
/**
* The status representing the worst of the given statuses
* extracted by `status` on `ts`.
*
* @note this may terminate early so don't rely on this method
* for running side effects on `ts`
*/
def worstOf[T](ts: Iterable[T], status: T => Status): Status = {
var worst: Status = Status.Open
val itr = ts.iterator
while (itr.hasNext && worst != Status.Closed)
worst = Status.worst(worst, status(itr.next()))
worst
}
/**
* The status representing the best of the given statuses
* extracted by `status` on `ts`.
*
* @note this may terminate early so don't rely on this method
* for running side effects on `ts`
*/
def bestOf[T](ts: Iterable[T], status: T => Status): Status = {
var best: Status = Status.Closed
val itr = ts.iterator
while (itr.hasNext && best != Status.Open)
best = Status.best(best, status(itr.next()))
best
}
/**
* Open returns a [[com.twitter.util.Future]] that is satisfied
* when the status returned by `get` is [[Open]]. It returns
* an exceptional [[com.twitter.util.Future]] should it be
* [[Closed]].
*
* `whenOpen` polls the underlying status, using
* exponential backoffs from 1ms to around 1s.
*/
def whenOpen(get: => Status): Future[Unit] = {
def go(n: Int): Future[Unit] = get match {
case Open => Future.Done
case Closed => Future.exception(new ClosedException)
case Busy => Future.sleep((1<<n).milliseconds) before go(math.min(n+1, 10))
}
go(0)
}
/**
* A blocking version of [[whenOpen]]; this method returns
* when the status has become [[Open]]. This call
* blocks and should only be used outside of Finagle
* threads to halt progress until the status is [[Open]].
*
* @throws [[ClosedException]] if the status becomes [[Closed]].
*/
def awaitOpen(get: => Status): Unit =
Await.result(whenOpen(get))
/**
* An open [[Service]] or [[ServiceFactory]] is ready to be used.
* It can service requests or sessions immediately.
*/
case object Open extends Status
/**
* A busy [[Service]] or [[ServiceFactory]] is transiently
* unavailable. A Busy [[Service]] or [[ServiceFactory]] can be
* used, but may not provide service immediately.
*/
case object Busy extends Status
/**
* The [[Service]] or [[ServiceFactory]] is closed. It will never
* service requests or sessions again. (And should probably be
* discarded.)
*/
case object Closed extends Status
}
| spockz/finagle | finagle-core/src/main/scala/com/twitter/finagle/Status.scala | Scala | apache-2.0 | 3,833 |
import sbt.{ State => _, Configuration => _, Show => _, _ }
import Keys._
trait OcsBundle {
// Bundle projects.
// Inter-project dependencies must be declared here.
lazy val bundle_edu_gemini_ags_servlet =
project.in(file("bundle/edu.gemini.ags.servlet")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_ags
)
lazy val bundle_edu_gemini_auxfile_workflow =
project.in(file("bundle/edu.gemini.auxfile.workflow")).dependsOn(
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_pot,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_fits,
bundle_edu_gemini_util_javax_mail,
bundle_edu_gemini_util_ssh,
bundle_edu_gemini_util_trpc
)
lazy val bundle_edu_gemini_dataman_app =
project.in(file("bundle/edu.gemini.dataman.app")).dependsOn(
bundle_edu_gemini_util_file_filter,
bundle_edu_gemini_pot % "test->test;compile->compile",
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_security,
bundle_edu_gemini_gsa_query % "test->test;compile->compile"
)
lazy val bundle_edu_gemini_gsa_query =
project.in(file("bundle/edu.gemini.gsa.query")).dependsOn(
bundle_edu_gemini_pot % "test->test;compile->compile",
bundle_edu_gemini_spModel_core
)
lazy val bundle_edu_gemini_horizons_api =
project.in(file("bundle/edu.gemini.horizons.api")).dependsOn(
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_security,
bundle_edu_gemini_util_skycalc,
bundle_edu_gemini_util_trpc,
bundle_jsky_coords
)
lazy val bundle_edu_gemini_horizons_server =
project.in(file("bundle/edu.gemini.horizons.server")).dependsOn(
bundle_edu_gemini_horizons_api,
bundle_edu_gemini_spModel_core,
bundle_jsky_coords
)
lazy val bundle_edu_gemini_itc_shared =
project.in(file("bundle/edu.gemini.itc.shared")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_security,
bundle_edu_gemini_util_trpc,
bundle_edu_gemini_auxfile_workflow
)
lazy val bundle_edu_gemini_itc =
project.in(file("bundle/edu.gemini.itc")).dependsOn(
bundle_edu_gemini_itc_shared,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_pot
)
lazy val bundle_edu_gemini_itc_web =
project.in(file("bundle/edu.gemini.itc.web")).dependsOn(
bundle_edu_gemini_itc % "test->test;compile->compile",
bundle_edu_gemini_itc_shared,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_pot
)
lazy val bundle_edu_gemini_lchquery_servlet =
project.in(file("bundle/edu.gemini.lchquery.servlet")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_util_osgi,
bundle_jsky_util,
bundle_edu_gemini_util_security
)
lazy val bundle_edu_gemini_obslog =
project.in(file("bundle/edu.gemini.obslog")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_util_security
)
lazy val bundle_edu_gemini_oodb_auth_servlet =
project.in(file("bundle/edu.gemini.oodb.auth.servlet")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_security
)
lazy val bundle_edu_gemini_oodb_too_url =
project.in(file("bundle/edu.gemini.oodb.too.url")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_security
)
lazy val bundle_edu_gemini_oodb_too_window =
project.in(file("bundle/edu.gemini.oodb.too.window")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_mail,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_too_event,
bundle_edu_gemini_util_javax_mail,
bundle_edu_gemini_util_osgi
)
lazy val bundle_edu_gemini_osgi_main =
project.in(file("bundle/edu.gemini.osgi.main"))
lazy val bundle_edu_gemini_p2checker =
project.in(file("bundle/edu.gemini.p2checker")).dependsOn(
bundle_edu_gemini_ags,
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_jsky_coords
)
lazy val bundle_edu_gemini_phase2_core =
project.in(file("bundle/edu.gemini.phase2.core")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio
)
lazy val bundle_edu_gemini_phase2_skeleton_servlet =
project.in(file("bundle/edu.gemini.phase2.skeleton.servlet")).dependsOn(
bundle_edu_gemini_auxfile_workflow,
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_phase2_core,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_io,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_osgi
)
lazy val bundle_edu_gemini_pot =
project.in(file("bundle/edu.gemini.pot")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core % "test->test;compile->compile",
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_skycalc,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_model_p1_pdf,
bundle_edu_gemini_model_p1,
bundle_edu_gemini_util_pdf,
bundle_jsky_coords,
bundle_jsky_util
)
lazy val bundle_edu_gemini_qpt_client =
project.in(file("bundle/edu.gemini.qpt.client")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot,
bundle_edu_gemini_qpt_shared,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_ui_workspace,
bundle_edu_gemini_util_security,
bundle_edu_gemini_util_ssh,
bundle_jsky_coords,
bundle_jsky_util
)
lazy val bundle_edu_gemini_qpt_server =
project.in(file("bundle/edu.gemini.qpt.server"))
lazy val bundle_edu_gemini_qpt_shared =
project.in(file("bundle/edu.gemini.qpt.shared")).dependsOn(
bundle_edu_gemini_ags,
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_trpc,
bundle_jsky_coords,
bundle_jsky_util
)
lazy val bundle_edu_gemini_qv_plugin =
project.in(file("bundle/edu.gemini.qv.plugin")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_horizons_api,
bundle_edu_gemini_pot,
bundle_edu_gemini_qpt_shared % "test->test;compile->compile",
bundle_edu_gemini_services_client,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_shared_gui,
bundle_edu_gemini_sp_vcs,
bundle_edu_gemini_spModel_core % "test->test;compile->compile",
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_security,
bundle_edu_gemini_util_skycalc,
bundle_edu_gemini_util_trpc,
bundle_jsky_app_ot_plugin,
bundle_jsky_coords,
bundle_jsky_elevation_plot
)
lazy val bundle_edu_gemini_seqexec_server =
project.in(file("bundle/edu.gemini.seqexec.server")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_seqexec_shared,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_trpc,
bundle_edu_gemini_epics_acm,
bundle_jsky_coords,
bundle_jsky_util
)
lazy val bundle_edu_gemini_seqexec_shared =
project.in(file("bundle/edu.gemini.seqexec.shared")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_trpc,
bundle_jsky_coords,
bundle_jsky_util
)
lazy val bundle_edu_gemini_services_client =
project.in(file("bundle/edu.gemini.services.client")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_util_security,
bundle_edu_gemini_util_skycalc,
bundle_edu_gemini_util_trpc
)
lazy val bundle_edu_gemini_services_server =
project.in(file("bundle/edu.gemini.services.server")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_services_client,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_skycalc
)
lazy val bundle_edu_gemini_shared_ca =
project.in(file("bundle/edu.gemini.shared.ca")).dependsOn(
)
lazy val bundle_edu_gemini_catalog =
project.in(file("bundle/edu.gemini.catalog")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_pot,
bundle_edu_gemini_util_skycalc,
bundle_edu_gemini_spModel_core % "test->test;compile->compile",
bundle_jsky_coords,
bundle_jsky_util,
bundle_jsky_util_gui,
bundle_edu_gemini_util_osgi
)
lazy val bundle_edu_gemini_shared_gui =
project.in(file("bundle/edu.gemini.shared.gui")).dependsOn(
bundle_edu_gemini_util_skycalc
)
lazy val bundle_edu_gemini_shared_mail =
project.in(file("bundle/edu.gemini.shared.mail")).dependsOn(
bundle_edu_gemini_shared_util,
bundle_edu_gemini_util_javax_mail
)
lazy val bundle_edu_gemini_shared_skyobject =
project.in(file("bundle/edu.gemini.shared.skyobject")).dependsOn(
bundle_edu_gemini_util_skycalc,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core
)
lazy val bundle_edu_gemini_shared_util =
project.in(file("bundle/edu.gemini.shared.util"))
lazy val bundle_edu_gemini_smartgcal_odbinit =
project.in(file("bundle/edu.gemini.smartgcal.odbinit")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_smartgcal,
bundle_edu_gemini_util_osgi
)
lazy val bundle_edu_gemini_smartgcal_servlet =
project.in(file("bundle/edu.gemini.smartgcal.servlet")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_smartgcal,
bundle_edu_gemini_util_osgi
)
lazy val bundle_edu_gemini_sp_vcs =
project.in(file("bundle/edu.gemini.sp.vcs")).dependsOn(
bundle_edu_gemini_pot % "test->test;compile->compile",
bundle_edu_gemini_shared_util,
bundle_edu_gemini_sp_vcs_log,
bundle_edu_gemini_sp_vcs_reg,
bundle_edu_gemini_spModel_core % "test->test;compile->compile",
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_security,
bundle_edu_gemini_util_trpc
)
lazy val bundle_edu_gemini_sp_vcs_log =
project.in(file("bundle/edu.gemini.sp.vcs.log")).dependsOn(
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_security
)
lazy val bundle_edu_gemini_sp_vcs_reg =
project.in(file("bundle/edu.gemini.sp.vcs.reg")).dependsOn(
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_util_osgi
)
lazy val bundle_edu_gemini_spModel_core =
project.in(file("bundle/edu.gemini.spModel.core")).dependsOn(
)
lazy val bundle_edu_gemini_ags =
project.in(file("bundle/edu.gemini.ags")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot % "test->test;compile->compile",
bundle_edu_gemini_catalog % "test->test;compile->compile",
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core % "test->test;compile->compile",
bundle_edu_gemini_spModel_pio,
bundle_jsky_coords,
bundle_jsky_util_gui
)
lazy val bundle_edu_gemini_spModel_io =
project.in(file("bundle/edu.gemini.spModel.io")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_javax_mail
)
lazy val bundle_edu_gemini_spModel_pio =
project.in(file("bundle/edu.gemini.spModel.pio")).dependsOn(
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_shared_util
)
lazy val bundle_edu_gemini_spModel_smartgcal =
project.in(file("bundle/edu.gemini.spModel.smartgcal")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_util_ssl_apache
)
lazy val bundle_edu_gemini_spdb_reports_collection =
project.in(file("bundle/edu.gemini.spdb.reports.collection")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_p2checker,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_mail,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_sp_vcs,
bundle_edu_gemini_sp_vcs_log,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_io,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_javax_mail,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_security,
bundle_edu_gemini_util_ssh
)
lazy val bundle_edu_gemini_spdb_rollover_servlet =
project.in(file("bundle/edu.gemini.spdb.rollover.servlet")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_security
)
lazy val bundle_edu_gemini_spdb_shell =
project.in(file("bundle/edu.gemini.spdb.shell")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_io,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_security
)
lazy val bundle_edu_gemini_too_event =
project.in(file("bundle/edu.gemini.too.event")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_security,
bundle_edu_gemini_util_skycalc,
bundle_edu_gemini_util_trpc,
bundle_jsky_coords
)
lazy val bundle_edu_gemini_ui_workspace =
project.in(file("bundle/edu.gemini.ui.workspace")).dependsOn(
)
lazy val bundle_edu_gemini_util_file_filter =
project.in(file("bundle/edu.gemini.util.file.filter")).dependsOn(
)
lazy val bundle_edu_gemini_util_fits =
project.in(file("bundle/edu.gemini.util.fits")).dependsOn(
bundle_edu_gemini_util_file_filter
)
lazy val bundle_edu_gemini_util_javax_mail =
project.in(file("bundle/edu.gemini.util.javax.mail")).dependsOn(
)
lazy val bundle_edu_gemini_util_log_extras =
project.in(file("bundle/edu.gemini.util.log.extras")).dependsOn(
bundle_edu_gemini_util_javax_mail
)
lazy val bundle_edu_gemini_util_osgi =
project.in(file("bundle/edu.gemini.util.osgi")).dependsOn(
)
lazy val bundle_edu_gemini_util_security =
project.in(file("bundle/edu.gemini.util.security")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_javax_mail,
bundle_edu_gemini_util_osgi
)
lazy val bundle_edu_gemini_util_skycalc =
project.in(file("bundle/edu.gemini.util.skycalc")).dependsOn(
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_jsky_coords
)
lazy val bundle_edu_gemini_util_ssh =
project.in(file("bundle/edu.gemini.util.ssh")).dependsOn(
)
lazy val bundle_edu_gemini_util_ssl =
project.in(file("bundle/edu.gemini.util.ssl"))
lazy val bundle_edu_gemini_util_ssl_apache =
project.in(file("bundle/edu.gemini.util.ssl.apache")).dependsOn(
bundle_edu_gemini_util_ssl
)
lazy val bundle_edu_gemini_util_trpc =
project.in(file("bundle/edu.gemini.util.trpc")).dependsOn(
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_security,
bundle_edu_gemini_util_ssl
)
lazy val bundle_edu_gemini_wdba_session_client =
project.in(file("bundle/edu.gemini.wdba.session.client")).dependsOn(
bundle_edu_gemini_wdba_shared,
bundle_edu_gemini_wdba_xmlrpc_api
)
lazy val bundle_edu_gemini_wdba_shared =
project.in(file("bundle/edu.gemini.wdba.shared")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_spModel_core
)
lazy val bundle_edu_gemini_wdba_xmlrpc_api =
project.in(file("bundle/edu.gemini.wdba.xmlrpc.api"))
lazy val bundle_edu_gemini_wdba_xmlrpc_server =
project.in(file("bundle/edu.gemini.wdba.xmlrpc.server")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_io,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_wdba_shared,
bundle_edu_gemini_wdba_xmlrpc_api,
bundle_edu_gemini_util_security
)
lazy val bundle_jsky_app_ot_testlauncher =
project.in(file("bundle/jsky.app.ot.testlauncher")).dependsOn(
bundle_edu_gemini_qv_plugin,
bundle_jsky_app_ot,
bundle_jsky_app_ot_visitlog
)
lazy val bundle_jsky_app_ot =
project.in(file("bundle/jsky.app.ot")).dependsOn(
bundle_edu_gemini_auxfile_workflow,
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_ags,
bundle_edu_gemini_horizons_api,
bundle_edu_gemini_itc_shared,
bundle_edu_gemini_p2checker,
bundle_edu_gemini_phase2_core,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_gui,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_sp_vcs,
bundle_edu_gemini_sp_vcs_log,
bundle_edu_gemini_sp_vcs_reg,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_ags,
bundle_edu_gemini_spModel_io,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_spModel_smartgcal,
bundle_edu_gemini_too_event,
bundle_edu_gemini_ui_miglayout,
bundle_edu_gemini_util_javax_mail,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_security,
bundle_edu_gemini_util_skycalc,
bundle_edu_gemini_util_ssl,
bundle_edu_gemini_util_ssl_apache,
bundle_edu_gemini_util_trpc,
bundle_edu_gemini_wdba_session_client,
bundle_edu_gemini_wdba_shared,
bundle_edu_gemini_wdba_xmlrpc_api,
bundle_jsky_app_ot_plugin,
bundle_jsky_app_ot_shared,
bundle_jsky_coords,
bundle_jsky_elevation_plot,
bundle_jsky_util,
bundle_jsky_util_gui
)
lazy val bundle_jsky_app_ot_plugin =
project.in(file("bundle/jsky.app.ot.plugin")).dependsOn(
bundle_edu_gemini_ags,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_sp_vcs,
bundle_edu_gemini_spModel_core
)
lazy val bundle_jsky_app_ot_shared =
project.in(file("bundle/jsky.app.ot.shared")).dependsOn(
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_ags,
bundle_edu_gemini_spModel_io,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_spModel_smartgcal,
bundle_edu_gemini_util_security,
bundle_edu_gemini_util_trpc,
bundle_jsky_coords
)
lazy val bundle_jsky_app_ot_visitlog =
project.in(file("bundle/jsky.app.ot.visitlog")).dependsOn(
bundle_edu_gemini_shared_skyobject,
bundle_edu_gemini_pot,
bundle_edu_gemini_shared_gui,
bundle_edu_gemini_shared_util,
bundle_edu_gemini_sp_vcs,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_spModel_pio,
bundle_edu_gemini_util_osgi,
bundle_edu_gemini_util_security,
bundle_edu_gemini_util_trpc,
bundle_jsky_app_ot_plugin,
bundle_jsky_coords
)
lazy val bundle_jsky_coords =
project.in(file("bundle/jsky.coords")).dependsOn(
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_shared_util,
bundle_jsky_util,
bundle_jsky_util_gui
)
lazy val bundle_jsky_elevation_plot =
project.in(file("bundle/jsky.elevation.plot")).dependsOn(
bundle_edu_gemini_catalog,
bundle_edu_gemini_spModel_core,
bundle_edu_gemini_util_skycalc,
bundle_jsky_coords,
bundle_jsky_util,
bundle_jsky_util_gui
)
lazy val bundle_jsky_util =
project.in(file("bundle/jsky.util"))
lazy val bundle_jsky_util_gui =
project.in(file("bundle/jsky.util.gui")).dependsOn(
bundle_jsky_util,
bundle_edu_gemini_util_ssl
)
// From OCS2
lazy val bundle_edu_gemini_ags_client_api =
project.in(file("bundle/edu.gemini.ags.client.api")).dependsOn(
bundle_edu_gemini_model_p1
)
lazy val bundle_edu_gemini_ags_client_impl =
project.in(file("bundle/edu.gemini.ags.client.impl")).dependsOn(
bundle_edu_gemini_ags_client_api,
bundle_edu_gemini_model_p1,
bundle_edu_gemini_util_ssl
)
lazy val bundle_edu_gemini_gsa_client =
project.in(file("bundle/edu.gemini.gsa.client")).dependsOn(
bundle_edu_gemini_gsa_query,
bundle_edu_gemini_model_p1
)
lazy val bundle_edu_gemini_model_p1 =
project.in(file("bundle/edu.gemini.model.p1")).dependsOn(
bundle_edu_gemini_spModel_core
)
lazy val bundle_edu_gemini_model_p1_pdf =
project.in(file("bundle/edu.gemini.model.p1.pdf")).dependsOn(
bundle_edu_gemini_util_pdf
)
lazy val bundle_edu_gemini_model_p1_submit =
project.in(file("bundle/edu.gemini.model.p1.submit")).dependsOn(
bundle_edu_gemini_model_p1
)
lazy val bundle_edu_gemini_model_p1_targetio =
project.in(file("bundle/edu.gemini.model.p1.targetio")).dependsOn(
bundle_edu_gemini_model_p1
)
lazy val bundle_edu_gemini_model_p1_visibility =
project.in(file("bundle/edu.gemini.model.p1.visibility")).dependsOn(
bundle_edu_gemini_model_p1
)
lazy val bundle_edu_gemini_p1monitor =
project.in(file("bundle/edu.gemini.p1monitor")).dependsOn(
bundle_edu_gemini_model_p1,
bundle_edu_gemini_model_p1_pdf,
bundle_edu_gemini_util_pdf,
bundle_edu_gemini_util_osgi
)
lazy val bundle_edu_gemini_pit =
project.in(file("bundle/edu.gemini.pit")).dependsOn(
bundle_edu_gemini_ags_client_impl,
bundle_edu_gemini_gsa_client,
bundle_edu_gemini_horizons_server,
bundle_edu_gemini_model_p1,
bundle_edu_gemini_model_p1_pdf,
bundle_edu_gemini_model_p1_submit,
bundle_edu_gemini_model_p1_targetio,
bundle_edu_gemini_model_p1_visibility,
bundle_edu_gemini_ui_workspace,
bundle_edu_gemini_shared_gui,
bundle_edu_gemini_util_pdf,
bundle_edu_gemini_spModel_core
)
lazy val bundle_edu_gemini_pit_launcher =
project.in(file("bundle/edu.gemini.pit.launcher")).dependsOn(
bundle_edu_gemini_pit,
bundle_edu_gemini_ags_client_impl
)
lazy val bundle_edu_gemini_tools_p1pdfmaker =
project.in(file("bundle/edu.gemini.tools.p1pdfmaker")).dependsOn(
bundle_edu_gemini_model_p1_pdf
)
lazy val bundle_edu_gemini_util_pdf =
project.in(file("bundle/edu.gemini.util.pdf"))
lazy val bundle_edu_gemini_epics_acm =
project.in(file("bundle/edu.gemini.epics.acm"))
lazy val bundle_edu_gemini_ui_miglayout =
project.in(file("bundle/edu.gemini.ui.miglayout"))
}
| arturog8m/ocs | project/OcsBundle.scala | Scala | bsd-3-clause | 24,742 |
package scalapb.grpc
import io.grpc.stub.StreamObserver
import io.grpc.{CallOptions, Channel, MethodDescriptor}
import scala.collection.JavaConverters._
import scala.concurrent.Future
object ClientCalls {
def blockingUnaryCall[ReqT, RespT](
channel: Channel,
method: MethodDescriptor[ReqT, RespT],
options: CallOptions,
request: ReqT
): RespT = {
io.grpc.stub.ClientCalls.blockingUnaryCall(channel.newCall(method, options), request)
}
def asyncUnaryCall[ReqT, RespT](
channel: Channel,
method: MethodDescriptor[ReqT, RespT],
options: CallOptions,
request: ReqT
): Future[RespT] = {
Grpc.guavaFuture2ScalaFuture(
io.grpc.stub.ClientCalls.futureUnaryCall(channel.newCall(method, options), request)
)
}
def blockingServerStreamingCall[ReqT, RespT](
channel: Channel,
method: MethodDescriptor[ReqT, RespT],
options: CallOptions,
request: ReqT
): Iterator[RespT] = {
io.grpc.stub.ClientCalls
.blockingServerStreamingCall(channel.newCall(method, options), request)
.asScala
}
def asyncServerStreamingCall[ReqT, RespT](
channel: Channel,
method: MethodDescriptor[ReqT, RespT],
options: CallOptions,
request: ReqT,
responseObserver: StreamObserver[RespT]
): Unit = {
io.grpc.stub.ClientCalls
.asyncServerStreamingCall(channel.newCall(method, options), request, responseObserver)
}
def asyncClientStreamingCall[ReqT, RespT](
channel: Channel,
method: MethodDescriptor[ReqT, RespT],
options: CallOptions,
responseObserver: StreamObserver[RespT]
): StreamObserver[ReqT] = {
io.grpc.stub.ClientCalls
.asyncClientStreamingCall(channel.newCall(method, options), responseObserver)
}
def asyncBidiStreamingCall[ReqT, RespT](
channel: Channel,
method: MethodDescriptor[ReqT, RespT],
options: CallOptions,
responseObserver: StreamObserver[RespT]
): StreamObserver[ReqT] = {
io.grpc.stub.ClientCalls
.asyncBidiStreamingCall(channel.newCall(method, options), responseObserver)
}
}
| dotty-staging/ScalaPB | scalapb-runtime-grpc/src/main/scala/scalapb/grpc/ClientCalls.scala | Scala | apache-2.0 | 2,118 |
// Copyright (c) 2016 Yuichiroh Matsubayashi
package yuima.nuimo.action
sealed class Key(val code: Int, val char: String = null) {
val symbol = if (char != null) char else toString
}
object Key {
object Modifier extends Enumeration {
val command, control, shift, option = Value
}
case object Zero extends Key(29, "0")
case object One extends Key(18, "1")
case object Two extends Key(19, "2")
case object Three extends Key(20, "3")
case object Four extends Key(21, "4")
case object Five extends Key(23, "5")
case object Six extends Key(22, "6")
case object Seven extends Key(26, "7")
case object Eight extends Key(28, "8")
case object Nine extends Key(25, "9")
case object A extends Key(0)
case object B extends Key(11)
case object C extends Key(8)
case object D extends Key(2)
case object E extends Key(14)
case object F extends Key(3)
case object G extends Key(5)
case object H extends Key(4)
case object I extends Key(34)
case object J extends Key(38)
case object K extends Key(40)
case object L extends Key(37)
case object M extends Key(46)
case object N extends Key(45)
case object O extends Key(31)
case object P extends Key(35)
case object Q extends Key(12)
case object R extends Key(15)
case object S extends Key(1)
case object T extends Key(17)
case object U extends Key(32)
case object V extends Key(9)
case object W extends Key(13)
case object X extends Key(7)
case object Y extends Key(16)
case object Z extends Key(6)
case object SectionSign extends Key(10)
case object Grave extends Key(50)
case object Minus extends Key(27)
case object Equal extends Key(24)
case object LeftBracket extends Key(33)
case object RightBracket extends Key(30)
case object Semicolon extends Key(41)
case object Quote extends Key(39)
case object Comma extends Key(43, ",")
case object Period extends Key(47, ".")
case object Slash extends Key(44, "/")
case object Backslash extends Key(42, "\\\\")
case object Keypad0 extends Key(82, "0")
case object Keypad1 extends Key(83, "1")
case object Keypad2 extends Key(84, "2")
case object Keypad3 extends Key(85, "3")
case object Keypad4 extends Key(86, "4")
case object Keypad5 extends Key(87, "5")
case object Keypad6 extends Key(88, "6")
case object Keypad7 extends Key(89, "7")
case object Keypad8 extends Key(91, "8")
case object Keypad9 extends Key(92, "9")
case object KeypadDecimal extends Key(65)
case object KeypadMultiply extends Key(67)
case object KeypadPlus extends Key(69)
case object KeypadDivide extends Key(75)
case object KeypadMinus extends Key(78)
case object KeypadEquals extends Key(81)
case object KeypadClear extends Key(71)
case object KeypadEnter extends Key(76)
case object Space extends Key(49)
case object Return extends Key(36)
case object Tab extends Key(48)
case object Delete extends Key(51)
case object ForwardDelete extends Key(117)
case object Linefeed extends Key(52)
case object Escape extends Key(53)
case object Command extends Key(55)
case object Shift extends Key(56)
case object CapsLock extends Key(57)
case object Option extends Key(58)
case object Control extends Key(59)
case object RightShift extends Key(60)
case object RightOption extends Key(61)
case object RightControl extends Key(62)
case object Function extends Key(63, "fn")
case object F1 extends Key(122)
case object F2 extends Key(120)
case object F3 extends Key(99)
case object F4 extends Key(118)
case object F5 extends Key(96)
case object F6 extends Key(97)
case object F7 extends Key(98)
case object F8 extends Key(100)
case object F9 extends Key(101)
case object F10 extends Key(109)
case object F11 extends Key(103)
case object F12 extends Key(111)
case object F13 extends Key(105)
case object BrightnessDown extends Key(107)
case object BrightnessUp extends Key(113)
case object F16 extends Key(106)
case object F17 extends Key(64)
case object F18 extends Key(79)
case object F19 extends Key(80)
case object F20 extends Key(90)
case object VolumeUp extends Key(72)
case object VolumeDown extends Key(73)
case object Mute extends Key(74)
case object Insert extends Key(114)
case object Home extends Key(115)
case object End extends Key(119)
case object PageUp extends Key(116)
case object PageDown extends Key(121)
case object LeftArrow extends Key(123)
case object RightArrow extends Key(124)
case object DownArrow extends Key(125)
case object UpArrow extends Key(126)
}
| Yuichiroh/nuimo-manager | src/main/scala/yuima/nuimo/action/Key.scala | Scala | mit | 4,678 |
object Holder {
private class C
}
import Holder._
println(/* resolved: false */ C)
println(classOf[/* accessible: false */ C])
| ilinum/intellij-scala | testdata/resolve2/import/access/PrivateClassAll.scala | Scala | apache-2.0 | 132 |
import sbt._
object Version {
val lift = "3.1.0"
val h2 = "1.4.196"
val jetty = "9.4.6.v20170531"
val servlet = "3.1.0"
val logback = "1.2.3"
}
object Library {
val liftWebkit = "net.liftweb" %% "lift-webkit" % Version.lift
val liftMapper = "net.liftweb" %% "lift-mapper" % Version.lift
val h2 = "com.h2database" % "h2" % Version.h2
val jettyRunner = "org.eclipse.jetty" % "jetty-runner" % Version.jetty
val servlet = "javax.servlet" % "javax.servlet-api" % Version.servlet
val logback = "ch.qos.logback" % "logback-classic" % Version.logback
}
object Dependencies {
import Library._
val mcPrice = List(
liftWebkit,
liftMapper,
h2,
servlet % "provided",
logback
)
} | davenatx/mc-price | project/dependencies.scala | Scala | gpl-3.0 | 805 |
package com.equalinformation.scala.programs.test
/**
* Created by bpupadhyaya on 6/13/16.
*/
object TestObj4 {
//TODO
}
| bpupadhyaya/scala-programs-collection | scala-programs-collection/src/test/scala-2.11/com/equalinformation/scala/programs/test/TestObj.scala | Scala | apache-2.0 | 127 |
package com.arcusys.valamis.util.mustache
import scala.io.Source
/**
* template
*/
class Mustache(
root: Token) extends MustacheHelperSupport {
def this(source: Source, open: String = "{{", close: String = "}}") =
this((new Parser {
val src = source
var otag = open
var ctag = close
}).parse())
def this(str: String) = this(Source.fromString(str))
def this(
str: String, open: String, close: String) = this(Source.fromString(str), open, close)
private val compiledTemplate = root
val globals: Map[String, Any] =
{
val excludedGlobals = List("wait", "toString", "hashCode", "getClass", "notify", "notifyAll")
Map(
(this.getClass().getMethods
.filter(x => {
val name = x.getName
val pt = x.getParameterTypes
(!name.startsWith("render$default")
) && (
!name.startsWith("product$default")
) && (
!name.startsWith("init$default")
) && (
!excludedGlobals.contains(name)
) && ((
pt.length == 0
) || (
pt.length == 1
&& pt(0) == classOf[String]
))
})
.map(x => {
x.getName ->
(if (x.getParameterTypes.length == 0) () => { x.invoke(this) }
else (str: String) => { x.invoke(this, str) })
})): _*
)
}
def render(
context: Any = null, partials: Map[String, Mustache] = Map(), callstack: List[Any] = List(this)): String = product(context, partials, callstack).toString
def product(
context: Any = null, partials: Map[String, Mustache] = Map(), callstack: List[Any] = List(this)): TokenProduct = compiledTemplate.render(context, partials, callstack)
}
| ViLPy/Valamis | valamis-util/src/main/scala/com/arcusys/valamis/util/mustache/Mustache.scala | Scala | lgpl-3.0 | 2,127 |
package io.getquill.context.jdbc.sqlite
import io.getquill.context.sql.DistinctSpec
import org.scalatest.matchers.should.Matchers._
class DistinctJdbcSpec extends DistinctSpec {
val context = testContext
import testContext._
override def beforeAll = {
testContext.transaction {
testContext.run(query[Couple].delete)
testContext.run(query[Person].filter(_.age > 0).delete)
testContext.run(liftQuery(peopleEntries).foreach(p => peopleInsert(p)))
testContext.run(liftQuery(couplesEntries).foreach(p => couplesInsert(p)))
}
()
}
"Ex 1 Distinct One Field" in {
testContext.run(`Ex 1 Distinct One Field`) should contain theSameElementsAs `Ex 1 Distinct One Field Result`
}
"Ex 2 Distinct Two Field Tuple`" in {
testContext.run(`Ex 2 Distinct Two Field Tuple`) should contain theSameElementsAs `Ex 2 Distinct Two Field Tuple Result`
}
"Ex 2a Distinct Two Field Tuple Same Element`" in {
testContext.run(`Ex 2a Distinct Two Field Tuple Same Element`) should contain theSameElementsAs `Ex 2a Distinct Two Field Tuple Same Element Result`
}
"Ex 3 Distinct Two Field Case Class`" in {
testContext.run(`Ex 3 Distinct Two Field Case Class`) should contain theSameElementsAs `Ex 3 Distinct Two Field Case Class Result`
}
"Ex 4-base non-Distinct Subquery`" in {
testContext.run(`Ex 4-base non-Distinct Subquery`) should contain theSameElementsAs `Ex 4-base non-Distinct Subquery Result`
}
"Ex 4 Distinct Subquery`" in {
testContext.run(`Ex 4 Distinct Subquery`) should contain theSameElementsAs `Ex 4 Distinct Subquery Result`
}
"Ex 5 Distinct Subquery with Map Single Field" in {
testContext.run(`Ex 5 Distinct Subquery with Map Single Field`) should contain theSameElementsAs `Ex 5 Distinct Subquery with Map Single Field Result`
}
"Ex 6 Distinct Subquery with Map Multi Field" in {
testContext.run(`Ex 6 Distinct Subquery with Map Multi Field`) should contain theSameElementsAs `Ex 6 Distinct Subquery with Map Multi Field Result`
}
"Ex 7 Distinct Subquery with Map Multi Field Tuple" in {
testContext.run(`Ex 7 Distinct Subquery with Map Multi Field Tuple`) should contain theSameElementsAs `Ex 7 Distinct Subquery with Map Multi Field Tuple Result`
}
"Ex 8 Distinct With Sort" in {
testContext.run(`Ex 8 Distinct With Sort`) mustEqual `Ex 8 Distinct With Sort Result`
}
}
| getquill/quill | quill-jdbc/src/test/scala/io/getquill/context/jdbc/sqlite/DistinctJdbcSpec.scala | Scala | apache-2.0 | 2,391 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.avro
import java.util
import org.locationtech.jts.geom.Geometry
import org.geotools.feature.NameImpl
import org.geotools.feature.simple.SimpleFeatureImpl
import org.geotools.filter.identity.FeatureIdImpl
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.Property
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class AvroSimpleFeatureTest extends Specification {
"AvroSimpleFeature" should {
"properly convert attributes that are set as strings" in {
val sft = SimpleFeatureTypes.createType("testType", "a:Integer,b:Date,*geom:Point:srid=4326")
val f = new AvroSimpleFeature(new FeatureIdImpl("fakeid"), sft)
f.setAttribute(0,"1")
f.setAttribute(1,"2013-01-02T00:00:00.000Z")
f.setAttribute(2,"POINT(45.0 49.0)")
f.getAttribute(0) must beAnInstanceOf[java.lang.Integer]
f.getAttribute(1) must beAnInstanceOf[java.util.Date]
f.getAttribute(2) must beAnInstanceOf[Geometry]
}
"properly return all requested properties" in {
val sft = SimpleFeatureTypes.createType("testType", "a:Integer,*geom:Point:srid=4326,d:Double,e:Boolean,f:String")
val valueList = List("1", "POINT (45 49)", "1.01", "true", "Test String")
val nameStringList = List("a", "geom", "d", "e", "f")
val nameList = nameStringList.map(new NameImpl(_))
val f = new AvroSimpleFeature(new FeatureIdImpl("fakeid"), sft)
//Setup sft
for((tempV, index) <- valueList.view.zipWithIndex) {
f.setAttribute(index, tempV)
}
//Test getProperties(name: String)
for((name, value) <- nameStringList.view.zip(valueList)) {
val tempProperty = f.getProperties(name)
tempProperty.head.getName.toString mustEqual name
tempProperty.head.getValue.toString mustEqual value
}
//Test getProperties(name: Name)
for((name, value) <- nameList.view.zip(valueList)) {
val tempProperty = f.getProperties(name)
tempProperty.head.getName mustEqual name
tempProperty.head.getValue.toString mustEqual value
}
f.getProperties must beAnInstanceOf[util.Collection[Property]]
f.getProperties("a") must beAnInstanceOf[util.Collection[Property]]
f.getProperties("a").head.getValue must not(throwA [org.opengis.feature.IllegalAttributeException])
val prop = f.getProperty("a")
prop must not beNull;
prop.getName.getLocalPart mustEqual("a")
prop.getValue mustEqual(1)
}
"properly validate a correct object" in {
val sft = SimpleFeatureTypes.createType("testType", "a:Integer,b:Date,*geom:Point:srid=4326")
val f = new AvroSimpleFeature(new FeatureIdImpl("fakeid"), sft)
f.setAttribute(0,"1")
f.setAttribute(1,"2013-01-02T00:00:00.000Z") // this date format should be converted
f.setAttribute(2,"POINT(45.0 49.0)")
f.validate must not(throwA [org.opengis.feature.IllegalAttributeException])
}
"properly validate multiple AvroSimpleFeature Objects with odd names and unicode characters, including colons" in {
val typeList = List("tower_u1234", "tower:type", "βδ½ ε₯½:δΈηβ", "tower_β½", "β½_β½:β½", "_β½", "a__u1234")
for(typeName <- typeList) {
val sft = SimpleFeatureTypes.createType(typeName, "a⬨_⬨b:Integer,ββcrazyββ_βname&suchβΏ:Date,*geom:Point:srid=4326")
val f = new AvroSimpleFeature(new FeatureIdImpl("fakeid"), sft)
f.setAttribute(0,"1")
f.setAttribute(1,"2013-01-02T00:00:00.000Z") // this date format should be converted
f.setAttribute(2,"POINT(45.0 49.0)")
f.validate must not(throwA[org.opengis.feature.IllegalAttributeException])
}
true must beTrue
}
"fail to validate a correct object" in {
val sft = SimpleFeatureTypes.createType("testType", "a:Integer,b:Date,*geom:Point:srid=4326")
val f = new AvroSimpleFeature(new FeatureIdImpl("fakeid"), sft)
f.setAttribute(0,"1")
f.setAttributeNoConvert(1, "2013-01-02T00:00:00.000Z") // don't convert
f.setAttribute(2,"POINT(45.0 49.0)")
f.validate must throwA [org.opengis.feature.IllegalAttributeException] //should throw it
}
"properly convert empty strings to null" in {
val sft = SimpleFeatureTypes.createType(
"testType",
"a:Integer,b:Float,c:Double,d:Boolean,e:Date,f:UUID,g:Point"+
",h:LineString,i:Polygon,j:MultiPoint,k:MultiLineString"+
",l:MultiPolygon,m:GeometryCollection"
)
val f = new AvroSimpleFeature(new FeatureIdImpl("fakeid"), sft)
f.setAttribute("a","")
f.setAttribute("b","")
f.setAttribute("c","")
f.setAttribute("d","")
f.setAttribute("e","")
f.setAttribute("f","")
f.setAttribute("g","")
f.setAttribute("h","")
f.setAttribute("i","")
f.setAttribute("j","")
f.setAttribute("k","")
f.setAttribute("l","")
f.setAttribute("m","")
f.getAttributes.foreach { v => v must beNull}
f.validate must not(throwA [org.opengis.feature.IllegalAttributeException])
}
"give back a null when an attribute doesn't exist" in {
// Verify that AvroSimpleFeature returns null for attributes that do not exist like SimpleFeatureImpl
val sft = SimpleFeatureTypes.createType("avrotesttype", "a:Integer,b:String")
val sf = new AvroSimpleFeature(new FeatureIdImpl("fakeid"), sft)
sf.getAttribute("c") must not(throwA[NullPointerException])
sf.getAttribute("c") should beNull
val oldSf = new SimpleFeatureImpl(List(null, null), sft, new FeatureIdImpl("fakeid"))
oldSf.getAttribute("c") should beNull
}
"give back a null when a property doesn't exist" in {
// Verify that AvroSimpleFeature returns null for properties that do not exist like SimpleFeatureImpl
val sft = SimpleFeatureTypes.createType("avrotesttype", "a:Integer,b:String")
val sf = new AvroSimpleFeature(new FeatureIdImpl("fakeid"), sft)
sf.getProperty("c") must not(throwA[NullPointerException])
sf.getProperty("c") should beNull
val oldSf = new SimpleFeatureImpl(List(null, null), sft, new FeatureIdImpl("fakeid"))
oldSf.getProperty("c") should beNull
}
"give back a property when a property exists but the value is null" in {
// Verify that AvroSimpleFeature returns null for properties that do not exist like SimpleFeatureImpl
val sft = SimpleFeatureTypes.createType("avrotesttype", "a:Integer,b:String")
val sf = new AvroSimpleFeature(new FeatureIdImpl("fakeid"), sft)
sf.getProperty("b") must not(throwA[NullPointerException])
sf.getProperty("b") should not beNull
val oldSf = new SimpleFeatureImpl(List(null, null), sft, new FeatureIdImpl("fakeid"))
oldSf.getProperty("b") should not beNull
}
"give back a null when the property value is null" in {
// Verify that AvroSimpleFeature returns null for properties that do not exist like SimpleFeatureImpl
val sft = SimpleFeatureTypes.createType("avrotesttype", "a:Integer,b:String")
val sf = new AvroSimpleFeature(new FeatureIdImpl("fakeid"), sft)
sf.getProperty("b").getValue must not(throwA[NullPointerException])
sf.getProperty("b").getValue should beNull
val oldSf = new SimpleFeatureImpl(List(null, null), sft, new FeatureIdImpl("fakeid"))
oldSf.getProperty("b").getValue should beNull
}
"implement equals" in {
val sft = SimpleFeatureTypes.createType("avrotesttype", "a:Integer,b:String,*g:Geometry")
val sf1 = new AvroSimpleFeature(new FeatureIdImpl("fakeid"), sft)
sf1.setAttribute(0, java.lang.Integer.valueOf(1))
sf1.setAttribute(1, "b string")
sf1.setAttribute(2, "POINT(10 15)")
val sf2 = new AvroSimpleFeature(new FeatureIdImpl("fakeid"), sft)
sf2.setAttribute(0, java.lang.Integer.valueOf(1))
sf2.setAttribute(1, "b string")
sf2.setAttribute(2, "POINT(10 15)")
sf1 mustEqual(sf2)
sf1 == sf2 must beTrue
}
}
}
| aheyne/geomesa | geomesa-features/geomesa-feature-avro/src/test/scala/org/locationtech/geomesa/features/avro/AvroSimpleFeatureTest.scala | Scala | apache-2.0 | 8,700 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import java.time.LocalDateTime
object SystemDate {
def dateTimeNow = LocalDateTime.now()
def getSystemDate: LocalDateTime = Option(System.getProperty("feature.system-date")).fold(dateTimeNow) {
case "" => dateTimeNow
case date => LocalDateTime.parse(date)
}
}
| hmrc/vat-registration-frontend | app/utils/SystemDate.scala | Scala | apache-2.0 | 898 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.zouzias.spark.lucenerdd.analyzers
import org.apache.lucene.analysis.en.EnglishAnalyzer
import org.apache.lucene.analysis.el.GreekAnalyzer
import org.apache.lucene.analysis.de.GermanAnalyzer
import org.scalatest.BeforeAndAfterEach
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest._
import matchers.should._
class AnalyzersConfigurableSpec extends AnyFlatSpec with Matchers
with BeforeAndAfterEach
with AnalyzerConfigurable {
"AnalyzersConfigurable.getAnalyzer" should "return english analyzer with 'en' input" in {
val englishAnalyzer = getAnalyzer(Some("en"))
englishAnalyzer shouldNot equal(null)
englishAnalyzer.isInstanceOf[EnglishAnalyzer] should equal(true)
}
"AnalyzersConfigurable.getAnalyzer" should
"return custom test analyzer with 'org.apache.lucene.analysis.el.GreekAnalyzer'" in {
val greekAnalyzer = getAnalyzer(Some("org.apache.lucene.analysis.el.GreekAnalyzer"))
greekAnalyzer shouldNot equal(null)
greekAnalyzer.isInstanceOf[GreekAnalyzer] should equal(true)
}
"AnalyzersConfigurable.getAnalyzer" should
"return custom test analyzer with 'org.apache.lucene.analysis.de.GermanAnalyzer'" in {
val deutschAnalyzer = getAnalyzer(Some("org.apache.lucene.analysis.de.GermanAnalyzer"))
deutschAnalyzer shouldNot equal(null)
deutschAnalyzer.isInstanceOf[GermanAnalyzer] should equal(true)
}
}
| zouzias/spark-lucenerdd | src/test/scala/org/zouzias/spark/lucenerdd/analyzers/AnalyzersConfigurableSpec.scala | Scala | apache-2.0 | 2,195 |
package com.tuvistavie.xserver.backend.model
class Resource (
val id: Int
) {
}
object Resource {
private[this] var allocated: Map[Int, Resource] = Map.empty
def canAllocate(id: Int) = !allocated.contains(id)
def isAllocated(id: Int) = allocated contains(id)
def allocate(id: Int, resource: Resource): Boolean = {
if(canAllocate(id)) {
allocated += (id -> resource)
true
}
else false
}
def unallocate(id: Int, resource: Resource): Boolean = {
if(isAllocated(id)) {
allocated -= id
true
} else false
}
}
| tuvistavie/scala-x-server | backend/src/main/scala/com/tuvistavie/xserver/model/Resource.scala | Scala | mit | 570 |
/*
* Copyright 2009-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.norbert
import javacompat.cluster.{JavaNode, Node => JNode}
import com.linkedin.norbert.cluster.{Node => SNode}
package object javacompat {
implicit def scalaSetToJavaSet[T](set: Set[T]): java.util.Set[T] = {
val s = new java.util.HashSet[T]
set.foreach { elem => s.add(elem) }
s
}
implicit def javaSetToImmutableSet[T](nodes: java.util.Set[T]): Set[T] = {
collection.JavaConversions.asScalaSet(nodes).foldLeft(Set[T]()) { (set, n) => set + n }
}
implicit def javaIntegerSetToScalaIntSet(set: java.util.Set[java.lang.Integer]): Set[Int] = {
collection.JavaConversions.asScalaSet(set).foldLeft(collection.immutable.Set.empty[Int]) { _ + _.intValue }
}
implicit def scalaIntSetToJavaIntegerSet(set: Set[Int]): java.util.Set[java.lang.Integer] = {
val result = new java.util.HashSet[java.lang.Integer](set.size)
set.foreach (result add _)
result
}
implicit def scalaNodeToJavaNode(node: SNode): JNode = {
if (node == null) null else JavaNode(node)
}
implicit def javaNodeToScalaNode(node: JNode): SNode = {
if (node == null) null
else {
val iter = node.getPartitionIds.iterator
var partitionIds = Set.empty[Int]
while(iter.hasNext) {
partitionIds += iter.next.intValue
}
SNode(node.getId, node.getUrl, node.isAvailable, partitionIds)
}
}
implicit def convertSNodeSet(set: Set[SNode]): java.util.Set[JNode] = {
var result = new java.util.HashSet[JNode](set.size)
set.foreach(elem => result.add(scalaNodeToJavaNode(elem)))
result
}
implicit def convertJNodeSet(set: java.util.Set[JNode]): Set[SNode] = {
val iter = set.iterator
var result = Set.empty[SNode]
while(iter.hasNext)
result += javaNodeToScalaNode(iter.next)
result
}
}
| linkedin-sna/norbert | java-cluster/src/main/scala/com/linkedin/norbert/javacompat/package.scala | Scala | apache-2.0 | 2,406 |
package sample.snippet
import scala.xml.{NodeSeq,Text}
import net.liftweb.http.js.JsExp
import net.liftweb.http.js.JsCmds.{Alert,Script,Run,SetHtml,JsIf}
import net.liftweb.http.js.JE.{JsNotEq,Num}
class BasicJavascript {
def one(xhtml: NodeSeq): NodeSeq =
Script(Alert("1: Important Alert Goes Here!"))
def two(xhtml: NodeSeq): NodeSeq = Script(
SetHtml("replaceme",Text("I have been replaced!")) &
Alert("2: Text Replaced")
)
def three(xhtml: NodeSeq): NodeSeq =
Script(Run(
JsIf(JsNotEq(Num(1), Num(2)), Alert("3: 1 does not equal 2!")).toJsCmd
))
} | timperrett/lift-in-action | chapter-9/src/main/scala/sample/snippet/BasicJavaScript.scala | Scala | apache-2.0 | 600 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{execution, AnalysisException, Strategy}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.columnar.{InMemoryRelation, InMemoryTableScanExec}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec
import org.apache.spark.sql.execution.joins.{BuildLeft, BuildRight}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.StreamingQuery
/**
* Converts a logical plan into zero or more SparkPlans. This API is exposed for experimenting
* with the query planner and is not designed to be stable across spark releases. Developers
* writing libraries should instead consider using the stable APIs provided in
* [[org.apache.spark.sql.sources]]
*/
abstract class SparkStrategy extends GenericStrategy[SparkPlan] {
override protected def planLater(plan: LogicalPlan): SparkPlan = PlanLater(plan)
}
case class PlanLater(plan: LogicalPlan) extends LeafExecNode {
override def output: Seq[Attribute] = plan.output
protected override def doExecute(): RDD[InternalRow] = {
throw new UnsupportedOperationException()
}
}
abstract class SparkStrategies extends QueryPlanner[SparkPlan] {
self: SparkPlanner =>
/**
* Plans special cases of limit operators.
*/
object SpecialLimits extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case ReturnAnswer(rootPlan) => rootPlan match {
case Limit(IntegerLiteral(limit), Sort(order, true, child)) =>
TakeOrderedAndProjectExec(limit, order, child.output, planLater(child)) :: Nil
case Limit(IntegerLiteral(limit), Project(projectList, Sort(order, true, child))) =>
TakeOrderedAndProjectExec(limit, order, projectList, planLater(child)) :: Nil
case Limit(IntegerLiteral(limit), child) =>
// With whole stage codegen, Spark releases resources only when all the output data of the
// query plan are consumed. It's possible that `CollectLimitExec` only consumes a little
// data from child plan and finishes the query without releasing resources. Here we wrap
// the child plan with `LocalLimitExec`, to stop the processing of whole stage codegen and
// trigger the resource releasing work, after we consume `limit` rows.
CollectLimitExec(limit, LocalLimitExec(limit, planLater(child))) :: Nil
case other => planLater(other) :: Nil
}
case Limit(IntegerLiteral(limit), Sort(order, true, child)) =>
TakeOrderedAndProjectExec(limit, order, child.output, planLater(child)) :: Nil
case Limit(IntegerLiteral(limit), Project(projectList, Sort(order, true, child))) =>
TakeOrderedAndProjectExec(limit, order, projectList, planLater(child)) :: Nil
case _ => Nil
}
}
/**
* Select the proper physical plan for join based on joining keys and size of logical plan.
*
* At first, uses the [[ExtractEquiJoinKeys]] pattern to find joins where at least some of the
* predicates can be evaluated by matching join keys. If found, Join implementations are chosen
* with the following precedence:
*
* - Broadcast: if one side of the join has an estimated physical size that is smaller than the
* user-configurable [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold
* or if that side has an explicit broadcast hint (e.g. the user applied the
* [[org.apache.spark.sql.functions.broadcast()]] function to a DataFrame), then that side
* of the join will be broadcasted and the other side will be streamed, with no shuffling
* performed. If both sides of the join are eligible to be broadcasted then the
* - Shuffle hash join: if the average size of a single partition is small enough to build a hash
* table.
* - Sort merge: if the matching join keys are sortable.
*
* If there is no joining keys, Join implementations are chosen with the following precedence:
* - BroadcastNestedLoopJoin: if one side of the join could be broadcasted
* - CartesianProduct: for Inner join
* - BroadcastNestedLoopJoin
*/
object JoinSelection extends Strategy with PredicateHelper {
/**
* Matches a plan whose output should be small enough to be used in broadcast join.
*/
private def canBroadcast(plan: LogicalPlan): Boolean = {
plan.stats.hints.broadcast ||
(plan.stats.sizeInBytes >= 0 &&
plan.stats.sizeInBytes <= conf.autoBroadcastJoinThreshold)
}
/**
* Matches a plan whose single partition should be small enough to build a hash table.
*
* Note: this assume that the number of partition is fixed, requires additional work if it's
* dynamic.
*/
private def canBuildLocalHashMap(plan: LogicalPlan): Boolean = {
plan.stats.sizeInBytes < conf.autoBroadcastJoinThreshold * conf.numShufflePartitions
}
/**
* Returns whether plan a is much smaller (3X) than plan b.
*
* The cost to build hash map is higher than sorting, we should only build hash map on a table
* that is much smaller than other one. Since we does not have the statistic for number of rows,
* use the size of bytes here as estimation.
*/
private def muchSmaller(a: LogicalPlan, b: LogicalPlan): Boolean = {
a.stats.sizeInBytes * 3 <= b.stats.sizeInBytes
}
private def canBuildRight(joinType: JoinType): Boolean = joinType match {
case _: InnerLike | LeftOuter | LeftSemi | LeftAnti => true
case j: ExistenceJoin => true
case _ => false
}
private def canBuildLeft(joinType: JoinType): Boolean = joinType match {
case _: InnerLike | RightOuter => true
case _ => false
}
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
// --- BroadcastHashJoin --------------------------------------------------------------------
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if canBuildRight(joinType) && canBroadcast(right) =>
Seq(joins.BroadcastHashJoinExec(
leftKeys, rightKeys, joinType, BuildRight, condition, planLater(left), planLater(right)))
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if canBuildLeft(joinType) && canBroadcast(left) =>
Seq(joins.BroadcastHashJoinExec(
leftKeys, rightKeys, joinType, BuildLeft, condition, planLater(left), planLater(right)))
// --- ShuffledHashJoin ---------------------------------------------------------------------
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if !conf.preferSortMergeJoin && canBuildRight(joinType) && canBuildLocalHashMap(right)
&& muchSmaller(right, left) ||
!RowOrdering.isOrderable(leftKeys) =>
Seq(joins.ShuffledHashJoinExec(
leftKeys, rightKeys, joinType, BuildRight, condition, planLater(left), planLater(right)))
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if !conf.preferSortMergeJoin && canBuildLeft(joinType) && canBuildLocalHashMap(left)
&& muchSmaller(left, right) ||
!RowOrdering.isOrderable(leftKeys) =>
Seq(joins.ShuffledHashJoinExec(
leftKeys, rightKeys, joinType, BuildLeft, condition, planLater(left), planLater(right)))
// --- SortMergeJoin ------------------------------------------------------------
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if RowOrdering.isOrderable(leftKeys) =>
joins.SortMergeJoinExec(
leftKeys, rightKeys, joinType, condition, planLater(left), planLater(right)) :: Nil
// --- Without joining keys ------------------------------------------------------------
// Pick BroadcastNestedLoopJoin if one side could be broadcasted
case j @ logical.Join(left, right, joinType, condition)
if canBuildRight(joinType) && canBroadcast(right) =>
joins.BroadcastNestedLoopJoinExec(
planLater(left), planLater(right), BuildRight, joinType, condition) :: Nil
case j @ logical.Join(left, right, joinType, condition)
if canBuildLeft(joinType) && canBroadcast(left) =>
joins.BroadcastNestedLoopJoinExec(
planLater(left), planLater(right), BuildLeft, joinType, condition) :: Nil
// Pick CartesianProduct for InnerJoin
case logical.Join(left, right, _: InnerLike, condition) =>
joins.CartesianProductExec(planLater(left), planLater(right), condition) :: Nil
case logical.Join(left, right, joinType, condition) =>
val buildSide =
if (right.stats.sizeInBytes <= left.stats.sizeInBytes) {
BuildRight
} else {
BuildLeft
}
// This join could be very slow or OOM
joins.BroadcastNestedLoopJoinExec(
planLater(left), planLater(right), buildSide, joinType, condition) :: Nil
// --- Cases where this strategy does not apply ---------------------------------------------
case _ => Nil
}
}
/**
* Used to plan streaming aggregation queries that are computed incrementally as part of a
* [[StreamingQuery]]. Currently this rule is injected into the planner
* on-demand, only when planning in a [[org.apache.spark.sql.execution.streaming.StreamExecution]]
*/
object StatefulAggregationStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case _ if !plan.isStreaming => Nil
case EventTimeWatermark(columnName, delay, child) =>
EventTimeWatermarkExec(columnName, delay, planLater(child)) :: Nil
case PhysicalAggregation(
namedGroupingExpressions, aggregateExpressions, rewrittenResultExpressions, child) =>
aggregate.AggUtils.planStreamingAggregation(
namedGroupingExpressions,
aggregateExpressions,
rewrittenResultExpressions,
planLater(child))
case _ => Nil
}
}
/**
* Used to plan the streaming deduplicate operator.
*/
object StreamingDeduplicationStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case Deduplicate(keys, child) if child.isStreaming =>
StreamingDeduplicateExec(keys, planLater(child)) :: Nil
case _ => Nil
}
}
object StreamingJoinStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = {
plan match {
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if left.isStreaming && right.isStreaming =>
new StreamingSymmetricHashJoinExec(
leftKeys, rightKeys, joinType, condition, planLater(left), planLater(right)) :: Nil
case Join(left, right, _, _) if left.isStreaming && right.isStreaming =>
throw new AnalysisException(
"Stream stream joins without equality predicate is not supported", plan = Some(plan))
case _ => Nil
}
}
}
/**
* Used to plan the aggregate operator for expressions based on the AggregateFunction2 interface.
*/
object Aggregation extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case PhysicalAggregation(
groupingExpressions, aggregateExpressions, resultExpressions, child) =>
val (functionsWithDistinct, functionsWithoutDistinct) =
aggregateExpressions.partition(_.isDistinct)
if (functionsWithDistinct.map(_.aggregateFunction.children).distinct.length > 1) {
// This is a sanity check. We should not reach here when we have multiple distinct
// column sets. Our MultipleDistinctRewriter should take care this case.
sys.error("You hit a query analyzer bug. Please report your query to " +
"Spark user mailing list.")
}
val aggregateOperator =
if (functionsWithDistinct.isEmpty) {
aggregate.AggUtils.planAggregateWithoutDistinct(
groupingExpressions,
aggregateExpressions,
resultExpressions,
planLater(child))
} else {
aggregate.AggUtils.planAggregateWithOneDistinct(
groupingExpressions,
functionsWithDistinct,
functionsWithoutDistinct,
resultExpressions,
planLater(child))
}
aggregateOperator
case _ => Nil
}
}
protected lazy val singleRowRdd = sparkContext.parallelize(Seq(InternalRow()), 1)
object InMemoryScans extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case PhysicalOperation(projectList, filters, mem: InMemoryRelation) =>
pruneFilterProject(
projectList,
filters,
identity[Seq[Expression]], // All filters still need to be evaluated.
InMemoryTableScanExec(_, filters, mem)) :: Nil
case _ => Nil
}
}
/**
* This strategy is just for explaining `Dataset/DataFrame` created by `spark.readStream`.
* It won't affect the execution, because `StreamingRelation` will be replaced with
* `StreamingExecutionRelation` in `StreamingQueryManager` and `StreamingExecutionRelation` will
* be replaced with the real relation using the `Source` in `StreamExecution`.
*/
object StreamingRelationStrategy extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case s: StreamingRelation =>
StreamingRelationExec(s.sourceName, s.output) :: Nil
case s: StreamingExecutionRelation =>
StreamingRelationExec(s.toString, s.output) :: Nil
case _ => Nil
}
}
/**
* Strategy to convert [[FlatMapGroupsWithState]] logical operator to physical operator
* in streaming plans. Conversion for batch plans is handled by [[BasicOperators]].
*/
object FlatMapGroupsWithStateStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case FlatMapGroupsWithState(
func, keyDeser, valueDeser, groupAttr, dataAttr, outputAttr, stateEnc, outputMode, _,
timeout, child) =>
val execPlan = FlatMapGroupsWithStateExec(
func, keyDeser, valueDeser, groupAttr, dataAttr, outputAttr, None, stateEnc, outputMode,
timeout, batchTimestampMs = None, eventTimeWatermark = None, planLater(child))
execPlan :: Nil
case _ =>
Nil
}
}
// Can we automate these 'pass through' operations?
object BasicOperators extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case r: RunnableCommand => ExecutedCommandExec(r) :: Nil
case MemoryPlan(sink, output) =>
val encoder = RowEncoder(sink.schema)
LocalTableScanExec(output, sink.allData.map(r => encoder.toRow(r).copy())) :: Nil
case logical.Distinct(child) =>
throw new IllegalStateException(
"logical distinct operator should have been replaced by aggregate in the optimizer")
case logical.Intersect(left, right) =>
throw new IllegalStateException(
"logical intersect operator should have been replaced by semi-join in the optimizer")
case logical.Except(left, right) =>
throw new IllegalStateException(
"logical except operator should have been replaced by anti-join in the optimizer")
case logical.DeserializeToObject(deserializer, objAttr, child) =>
execution.DeserializeToObjectExec(deserializer, objAttr, planLater(child)) :: Nil
case logical.SerializeFromObject(serializer, child) =>
execution.SerializeFromObjectExec(serializer, planLater(child)) :: Nil
case logical.MapPartitions(f, objAttr, child) =>
execution.MapPartitionsExec(f, objAttr, planLater(child)) :: Nil
case logical.MapPartitionsInR(f, p, b, is, os, objAttr, child) =>
execution.MapPartitionsExec(
execution.r.MapPartitionsRWrapper(f, p, b, is, os), objAttr, planLater(child)) :: Nil
case logical.FlatMapGroupsInR(f, p, b, is, os, key, value, grouping, data, objAttr, child) =>
execution.FlatMapGroupsInRExec(f, p, b, is, os, key, value, grouping,
data, objAttr, planLater(child)) :: Nil
case logical.FlatMapGroupsInPandas(grouping, func, output, child) =>
execution.python.FlatMapGroupsInPandasExec(grouping, func, output, planLater(child)) :: Nil
case logical.MapElements(f, _, _, objAttr, child) =>
execution.MapElementsExec(f, objAttr, planLater(child)) :: Nil
case logical.AppendColumns(f, _, _, in, out, child) =>
execution.AppendColumnsExec(f, in, out, planLater(child)) :: Nil
case logical.AppendColumnsWithObject(f, childSer, newSer, child) =>
execution.AppendColumnsWithObjectExec(f, childSer, newSer, planLater(child)) :: Nil
case logical.MapGroups(f, key, value, grouping, data, objAttr, child) =>
execution.MapGroupsExec(f, key, value, grouping, data, objAttr, planLater(child)) :: Nil
case logical.FlatMapGroupsWithState(
f, key, value, grouping, data, output, _, _, _, timeout, child) =>
execution.MapGroupsExec(
f, key, value, grouping, data, output, timeout, planLater(child)) :: Nil
case logical.CoGroup(f, key, lObj, rObj, lGroup, rGroup, lAttr, rAttr, oAttr, left, right) =>
execution.CoGroupExec(
f, key, lObj, rObj, lGroup, rGroup, lAttr, rAttr, oAttr,
planLater(left), planLater(right)) :: Nil
case logical.Repartition(numPartitions, shuffle, child) =>
if (shuffle) {
ShuffleExchangeExec(RoundRobinPartitioning(numPartitions), planLater(child)) :: Nil
} else {
execution.CoalesceExec(numPartitions, planLater(child)) :: Nil
}
case logical.Sort(sortExprs, global, child) =>
execution.SortExec(sortExprs, global, planLater(child)) :: Nil
case logical.Project(projectList, child) =>
execution.ProjectExec(projectList, planLater(child)) :: Nil
case logical.Filter(condition, child) =>
execution.FilterExec(condition, planLater(child)) :: Nil
case f: logical.TypedFilter =>
execution.FilterExec(f.typedCondition(f.deserializer), planLater(f.child)) :: Nil
case e @ logical.Expand(_, _, child) =>
execution.ExpandExec(e.projections, e.output, planLater(child)) :: Nil
case logical.Window(windowExprs, partitionSpec, orderSpec, child) =>
execution.window.WindowExec(windowExprs, partitionSpec, orderSpec, planLater(child)) :: Nil
case logical.Sample(lb, ub, withReplacement, seed, child) =>
execution.SampleExec(lb, ub, withReplacement, seed, planLater(child)) :: Nil
case logical.LocalRelation(output, data, _) =>
LocalTableScanExec(output, data) :: Nil
case logical.LocalLimit(IntegerLiteral(limit), child) =>
execution.LocalLimitExec(limit, planLater(child)) :: Nil
case logical.GlobalLimit(IntegerLiteral(limit), child) =>
execution.GlobalLimitExec(limit, planLater(child)) :: Nil
case logical.Union(unionChildren) =>
execution.UnionExec(unionChildren.map(planLater)) :: Nil
case g @ logical.Generate(generator, join, outer, _, _, child) =>
execution.GenerateExec(
generator, join = join, outer = outer, g.qualifiedGeneratorOutput,
planLater(child)) :: Nil
case _: logical.OneRowRelation =>
execution.RDDScanExec(Nil, singleRowRdd, "OneRowRelation") :: Nil
case r: logical.Range =>
execution.RangeExec(r) :: Nil
case logical.RepartitionByExpression(expressions, child, numPartitions) =>
exchange.ShuffleExchangeExec(HashPartitioning(
expressions, numPartitions), planLater(child)) :: Nil
case ExternalRDD(outputObjAttr, rdd) => ExternalRDDScanExec(outputObjAttr, rdd) :: Nil
case r: LogicalRDD =>
RDDScanExec(r.output, r.rdd, "ExistingRDD", r.outputPartitioning, r.outputOrdering) :: Nil
case h: ResolvedHint => planLater(h.child) :: Nil
case _ => Nil
}
}
}
| akopich/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala | Scala | apache-2.0 | 21,521 |
/*
* scala-swing (https://www.scala-lang.org)
*
* Copyright EPFL, Lightbend, Inc., contributors
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.swing
package event
case class FontChanged(override val source: Component) extends ComponentEvent
| scala/scala-swing | src/main/scala/scala/swing/event/FontChanged.scala | Scala | apache-2.0 | 420 |
package org.vitrivr.adampro.utils.exception
import org.vitrivr.adampro.data.index.Index.IndexName
/**
* adamtwo
*
* Ivan Giangreco
* August 2015
*/
case class IndexNotExistingException(message : String = "Index not existing.") extends GeneralAdamException(message)
object IndexNotExistingException {
def withIndexname(indexname : IndexName): IndexNotExistingException = new IndexNotExistingException(s"Index '$indexname' not existing.")
}
| dbisUnibas/ADAMpro | src/main/scala/org/vitrivr/adampro/utils/exception/IndexNotExistingException.scala | Scala | mit | 449 |
package com.rrdinsights.scalabrine.parameters
import com.rrdinsights.scalabrine.TestSpec
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ParameterTest extends TestSpec {
test("toUrl") {
assert(LeagueIdParameter.NBA.toUrl === "LeagueID=00")
assert(SeasonParameter.Season201617.toUrl === "Season=2016-17")
assert(SeasonTypeParameter.RegularSeason.toUrl === "SeasonType=Regular+Season")
assert(TeamIdParameter.BostonCeltics.toUrl === "TeamID=1610612738")
}
}
| rd11490/Scalabrine | src/test/scala/com/rrdinsights/scalabrine/parameters/ParameterTest.scala | Scala | mit | 539 |
package com.github.gmspacagna.scalatable.descriptors
trait HiveTableDescriptor extends HiveTableOperators {
def tableName: String
def dbName: String
def fullTableName = dbName + "." + tableName
}
trait HiveTableOperators {
case object Select extends HQLOperator("SELECT")
case object From extends HQLOperator("FROM")
case object Where extends HQLOperator("WHERE")
case object GroupBy extends HQLOperator("GROUP BY")
case object Like extends HQLOperator("LIKE")
case object Limit extends HQLOperator("LIMIT")
case object LeftSemiJoin extends HQLOperator("LEFT SEMI JOIN")
case object Join extends HQLOperator("JOIN")
case object On extends HQLOperator("ON")
case object And extends HQLOperator("AND")
case object Partition extends HQLOperator("PARTITION")
case object Table extends HQLOperator("TABLE")
case object Insert extends HQLOperator("INSERT")
case object As extends HQLOperator("AS")
def CollectSet(arg: String) = new HQL1ArgOperator("collect_set", arg) {}
def Count(arg: String) = new HQL1ArgOperator("COUNT", arg) {}
def Distinct(arg: String) = new HQL1ArgOperator("DISTINCT", arg) {}
def Sum(arg: String) = new HQL1ArgOperator("SUM", arg) {}
case object WildChar extends SyntaxOperator("*")
case object Space extends SyntaxOperator(" ")
case object CommaSpace extends SyntaxOperator(", ")
implicit def Operator2String(op: HQLOperator) = op.toString
implicit def Operator2String(op: HQL1ArgOperator) = op.toString
implicit def Operator2String(op: SyntaxOperator) = op.toString
implicit def Operator2String(op: HiveTableColumn) = op.toString
implicit def Operator2String(op: GenericId) = op.toString
implicit def Descriptor2String(descriptor: HiveTableDescriptor) = descriptor.tableName
def func(op: HQL1ArgOperator, arg: String): String = op + "(" + arg + ")"
def fullColumnName(tableAlias: String, col: HiveTableColumn) = tableAlias + "." + col
}
object HiveTableOperators extends HiveTableOperators
abstract class HiveEnum(val name: String) {
override def toString = name
}
abstract class GenericId(name: String) extends HiveEnum(name)
abstract class HiveTableColumn(name: String) extends HiveEnum(name)
abstract class HQLOperator(name: String) extends HiveEnum(name)
abstract class HQL1ArgOperator(name: String, arg: String) extends HiveEnum(name + "(" + arg + ")")
abstract class SyntaxOperator(name: String) extends HiveEnum(name)
| gm-spacagna/scala-table-clients | src/main/scala/com/github/gmspacagna/scalatable/descriptors/HiveTableDescriptor.scala | Scala | apache-2.0 | 2,427 |
import org.scalastyle.sbt.ScalastylePlugin
import sbt._
import sbt.Keys._
import com.typesafe.sbt.SbtScalariform.{ ScalariformKeys, _ }
object CommonSettings {
private lazy val defaultSettings = Seq(
scalaVersion := BuildCommon.scalaVersion,
scalacOptions := BuildCommon.scalacOption,
javacOptions := BuildCommon.javacOptions,
parallelExecution in Test := true)
private lazy val localScalariformSettings = scalariformSettings ++ Seq(
ScalariformKeys.preferences := {
import scalariform.formatter.preferences._
FormattingPreferences().
setPreference(AlignParameters, true).
setPreference(AlignSingleLineCaseStatements, true).
setPreference(CompactControlReadability, true).
setPreference(CompactStringConcatenation, false).
setPreference(DoubleIndentClassDeclaration, true).
setPreference(FormatXml, true).
setPreference(IndentLocalDefs, true).
setPreference(IndentPackageBlocks, true).
setPreference(IndentSpaces, 4).
setPreference(MultilineScaladocCommentsStartOnFirstLine, true).
setPreference(PreserveSpaceBeforeArguments, false).
setPreference(PreserveDanglingCloseParenthesis, false).
setPreference(RewriteArrowSymbols, false).
setPreference(SpaceBeforeColon, false).
setPreference(SpaceInsideBrackets, false).
setPreference(SpacesWithinPatternBinders, true)
})
private lazy val localScalaStylePluginSettings = ScalastylePlugin.projectSettings ++ Seq(
ScalastylePlugin.scalastyleTarget := sbt.file("target/scalastyle-result.xml"),
ScalastylePlugin.scalastyleFailOnError := true)
private lazy val commandAlias = addCommandAlias("runAll", ";compile;test;scalastyle") ++
addCommandAlias("cleanRunAll", ";clean;compile;test;scalastyle")
val commonBuildSettings = Defaults.coreDefaultSettings ++
defaultSettings ++
localScalariformSettings ++
localScalaStylePluginSettings ++
commandAlias ++
addCompilerPlugin("org.scalamacros" %% "paradise" % "2.1.0-M5" cross CrossVersion.full)
} | vishnusaran/scala-repl-man-pages | project/CommonSettings.scala | Scala | mit | 2,301 |
package scala.collection.convert
import java.util.{concurrent => juc}
import java.{lang => jl, util => ju}
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.JUnit4
import scala.collection.JavaConverters._
import scala.collection.{concurrent, mutable}
// scala/bug#9113: tests to insure that wrappers return null instead of wrapping it as a collection
@RunWith(classOf[JUnit4])
class NullSafetyToJavaTest {
@Test def testIterableWrapping(): Unit = {
val nullIterable: Iterable[AnyRef] = null
val iterable: jl.Iterable[AnyRef] = asJavaIterable(nullIterable)
assert(iterable == null)
}
// Implicit conversion to ju.Properties is not available
@Test def testIteratorDecoration(): Unit = {
val nullIterator: Iterator[AnyRef] = null
assert(nullIterator.asJava == null)
}
@Test def testEnumerationDecoration(): Unit = {
val nullEnumeration: Iterator[AnyRef] = null
assert(nullEnumeration.asJavaEnumeration == null)
}
@Test def testIterableDecoration(): Unit = {
val nullIterable: Iterable[AnyRef] = null
assert(nullIterable.asJava == null)
}
@Test def testCollectionDecoration(): Unit = {
val nullCollection: Iterable[AnyRef] = null
assert(nullCollection.asJavaCollection == null)
}
@Test def testBufferDecoration(): Unit = {
val nullBuffer: mutable.Buffer[AnyRef] = null
assert(nullBuffer.asJava == null)
}
@Test def testSetDecoration(): Unit = {
val nullSet: Set[AnyRef] = null
assert(nullSet.asJava == null)
}
@Test def testMapDecoration(): Unit = {
val nullMap: mutable.Map[AnyRef, AnyRef] = null
assert(nullMap.asJava == null)
}
@Test def testConcurrentMapDecoration(): Unit = {
val nullConMap: concurrent.Map[AnyRef, AnyRef] = null
assert(nullConMap.asJava == null)
}
@Test def testDictionaryDecoration(): Unit = {
val nullDict: mutable.Map[AnyRef, AnyRef] = null
assert(nullDict.asJavaDictionary == null)
}
// Decorator conversion to ju.Properties is not available
}
| martijnhoekstra/scala | test/junit/scala/collection/convert/NullSafetyToJavaTest.scala | Scala | apache-2.0 | 2,050 |
import common._
package object scalashop {
/** The value of every pixel is represented as a 32 bit integer. */
type RGBA = Int
/** Returns the red component. */
def red(c: RGBA): Int = (0xff000000 & c) >>> 24
/** Returns the green component. */
def green(c: RGBA): Int = (0x00ff0000 & c) >>> 16
/** Returns the blue component. */
def blue(c: RGBA): Int = (0x0000ff00 & c) >>> 8
/** Returns the alpha component. */
def alpha(c: RGBA): Int = (0x000000ff & c) >>> 0
/** Used to create an RGBA value from separate components. */
def rgba(r: Int, g: Int, b: Int, a: Int): RGBA = {
(r << 24) | (g << 16) | (b << 8) | (a << 0)
}
/** Restricts the integer into the specified range. */
def clamp(v: Int, min: Int, max: Int): Int = {
if (v < min) min
else if (v > max) max
else v
}
/** Image is a two-dimensional matrix of pixel values. */
class Img(val width: Int, val height: Int, private val data: Array[RGBA]) {
def this(w: Int, h: Int) = this(w, h, new Array(w * h))
def apply(x: Int, y: Int): RGBA = data(y * width + x)
def update(x: Int, y: Int, c: RGBA): Unit = data(y * width + x) = c
}
/** Computes the blurred RGBA value of a single pixel of the input image. */
def boxBlurKernel(src: Img, x: Int, y: Int, radius: Int): RGBA = {
// TODO implement using while loops
???
}
}
| comprakash/learning-scala | scalashop/src/main/scala/scalashop/package.scala | Scala | gpl-3.0 | 1,366 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.streaming.dsl.task
import java.time.Instant
import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.Constants._
import org.apache.gearpump.streaming.dsl.window.impl.{StreamingOperator, TimestampedValue}
import org.apache.gearpump.streaming.task.{Task, TaskContext, TaskUtil}
class TransformTask[IN, OUT](
operator: StreamingOperator[IN, OUT],
taskContext: TaskContext, userConf: UserConfig) extends Task(taskContext, userConf) {
def this(context: TaskContext, conf: UserConfig) = {
this(
conf.getValue[StreamingOperator[IN, OUT]](GEARPUMP_STREAMING_OPERATOR)(context.system).get,
context, conf
)
}
override def onStart(startTime: Instant): Unit = {
operator.setup()
}
override def onNext(msg: Message): Unit = {
operator.foreach(TimestampedValue(msg.value.asInstanceOf[IN], msg.timestamp))
}
override def onWatermarkProgress(watermark: Instant): Unit = {
TaskUtil.trigger(watermark, operator, taskContext)
}
override def onStop(): Unit = {
operator.teardown()
}
}
| manuzhang/incubator-gearpump | streaming/src/main/scala/org/apache/gearpump/streaming/dsl/task/TransformTask.scala | Scala | apache-2.0 | 1,933 |
/*
mls: basic machine learning algorithms for Scala
Copyright (C) 2014 Davi Pereira dos Santos
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ml.classifiers
import al.strategies.Strategy
import clean.lib.{CM, Ds}
import ml.Pattern
import ml.models.Model
import util.Datasets
case class BestClassifCV100(ds: Ds, r: Int, f: Int, s: Strategy, queries: Seq[Pattern], fqueries: Seq[Pattern], seed: Int, poolForKNN: Seq[Pattern]) extends Learner with CM {
override lazy val toString = s"BestLearnerCV100: $ds"
lazy val id = ds.read(s"select c from classif100 where s=${s.id} and l=${s.learner.id} and r=$r and f=$f") match {
case List(Vector(x)) => x.toInt
case List() =>
val res = classif.id
ds.write(s"insert into classif100 values (${s.id},${s.learner.id},$r,$f,$res)")
res
case x => ds.error(s"problemas: $x")
}
lazy val abr: String = classif.abr
lazy val attPref: String = classif.attPref
lazy val boundaryType: String = classif.boundaryType
lazy val learners = Seq(
KNNBatcha(5, "eucl", poolForKNN, weighted = true)
, C45()
, RF(seed)
, NBBatch()
, SVMLibRBF(seed)
)
lazy val classif = learners.maxBy { l =>
val qs = if (qf(l)) fqueries.toVector else queries.toVector
Datasets.kfoldCV(qs, 5) { (tr, ts, foldnr, minsize) =>
kappa(l.build(tr).confusion(ts)) //nΓ£o pode ser accBal porque pode nΓ£o haver um exemplo por classe
}.sum
}
override lazy val querFiltro = qf(classif)
def update(model: Model, fast_mutable: Boolean, semcrescer: Boolean)(pattern: Pattern) = classif.update(model, fast_mutable, semcrescer)(pattern)
def expected_change(model: Model)(pattern: Pattern) = classif.expected_change(model)(pattern)
def build(pool: Seq[Pattern]) = classif.build(pool)
val context: String = "bestcv100"
}
| active-learning/active-learning-scala | src/main/scala/ml/classifiers/BestClassifCV100.scala | Scala | gpl-2.0 | 2,453 |
package gitbucket.core.model
import gitbucket.core.model.CommitState._
import org.scalatest.FunSpec
class CommitStateSpec extends FunSpec {
describe("CommitState") {
it("should combine empty must eq PENDING") {
assert(combine(Set()) == PENDING)
}
it("should combine includes ERROR must eq FAILURE") {
assert(combine(Set(ERROR, SUCCESS, PENDING)) == FAILURE)
}
it("should combine includes FAILURE must eq peinding") {
assert(combine(Set(FAILURE, SUCCESS, PENDING)) == FAILURE)
}
it("should combine includes PENDING must eq peinding") {
assert(combine(Set(PENDING, SUCCESS)) == PENDING)
}
it("should combine only SUCCESS must eq SUCCESS") {
assert(combine(Set(SUCCESS)) == SUCCESS)
}
}
}
| McFoggy/gitbucket | src/test/scala/gitbucket/core/model/CommitStateSpec.scala | Scala | apache-2.0 | 762 |
package keemun.models
/**
* Copyright (c) Nikita Kovaliov, maizy.ru, 2014
* See LICENSE.txt for details.
*/
//TODO: or use other superclass?
class ConfigError(message: String) extends Exception(message)
| maizy/keemun | app/keemun/models/ConfigError.scala | Scala | mit | 208 |
package models
import org.specs2._
import org.specs2.specification.script.{StandardDelimitedStepParsers, GWT, StepParser}
import java.time.{Duration, LocalTime}
/**
* User: ctranxuan
* Date: 17/05/14
*
* http://www.slideshare.net/etorreborre/specs2-whirlwindtour (40)
* http://etorreborre.github.io/specs2/guide/org.specs2.guide.structure.GivenWhenThenPage.html
*/
class MarketSpec extends Specification with GWT with StandardDelimitedStepParsers { override def is = s2"""
Given a market rate of {90} ${market90.start}
And a market direction {up}
And a market remaining duration of {14:59}
When the time is {10:00}
Then I should get a 100% market at {10:15} ${market90.end}
Given a market rate of {90} ${market90bis.start}
And a market direction {up}
And a market remaining duration of {24:59}
When the time is {11:00}
Then I should get a 100% market at {11:25} ${market90bis.end}
Given a market rate of {70} ${market70.start}
And a market direction {up}
And a market remaining duration of {24:59}
When the time is {09:00}
Then I should get a 100% market at {09:50} ${market70.end}
Given a market rate of {70} ${market70.start}
And a market direction {up}
And a market remaining duration of {00:02}
When the time is {09:00}
Then I should get a 100% market at {09:25:03} ${market70.end}
Given a market rate of {30} ${market30_3.start}
And a market direction {down}
And a market remaining duration of {10:00}
When the time is {09:00}
Then I should get a third 100% market at {19:35:01} ${market30_3.end}
"""
// Given a market rate of {90} ${market_8.start}
// And a market direction {up}
// And a market remaining duration of {24:59}
// When the time is {09:37}
// Then I should get a third 100% market at {19:35:01} ${market_8.end}
val aTime = StepParser((s: String) => LocalTime.parse(s))
val market =
Scenario("market").
given(aString). // issue with given of different types
given(aString).
given(aString).
when(aTime) { case aTime :: aRate :: aDirection :: aDuration :: _ => {
marketTime(aRate, aDirection, aDuration).next(aTime, 1).head
}
}.
andThen(aTime) { case expected :: marketTime :: _ =>
marketTime mustEqual (expected)
}
val market90 =
market.withTitle("market90")
val market90bis =
market.withTitle("market90bis")
val market70 =
market.withTitle("market70")
val market3evals =
Scenario("market3rdEval").
given(aString). // issue with given of different types
given(aString).
given(aString).
when(aTime) { case aTime :: aRate :: aDirection :: aDuration :: _ => {
println(marketTime(aRate, aDirection, aDuration).next(aTime, 8))
println(marketTime(aRate, aDirection, aDuration).next2(aTime, 8))
marketTime(aRate, aDirection, aDuration).next(aTime, 3)(2)
}
}.
andThen(aTime) { case expected :: marketTime :: _ =>
marketTime mustEqual (expected)
}
val market30_3 =
market3evals.withTitle("market_30")
val market_8 =
market3evals.withTitle("market_8")
private def marketTime(aRate: String, aDirection: String, aDuration: String): MarketTime = {
val direction = Direction(aDirection)
val tokens = aDuration.split(":")
val duration = Duration.ofMinutes(tokens(0) toLong) plus(Duration.ofSeconds(tokens(1) toLong))
val rate = MarketRate(aRate.toInt, direction)
MarketTime(rate, duration)
}
}
| ctranxuan/swc | test/models/MarketSpec.scala | Scala | apache-2.0 | 3,800 |
package sLaCa3
/**
* This sLaCa3 program counts the number of occurences of "01" in the binary string stored at memory location
* 0x3100 and writes the count into 0x3101.
*/
object count01sTest extends sLaCa3{
def main(args: Array[String]): Unit = {
//Different test inputs with their binary representation and expected output.
mem(0x3100) = 0x424F; //0100001001001111 Expect 4
//mem(0x3100) = -1; //1111111111111111 Expect 0
//mem(0x3100) = 1; //0000000000000001 Expect 1
//mem(0x3100) = java.lang.Short.MIN_VALUE; //1000000000000000 Expect 0
//mem(0x3100) = 0; //0000000000000000 Expect 0
dot ORIG 0x3000;
LDI R0 "IADDR"; //R0 gets the input value and is shifted left
LD R4 "MASK";
ADD R3 R3 I8;
ADD R3 R3 I8; //R3 will count down to zero to stop looping
"AGAIN" vvv;
ADD R3 R3 I0;
BR z "END";
AND R1 R0 R4; //R1 gets the value of the mask on top of the input
BR z "FOUND0";
ADD R0 R0 R0;
ADD R3 R3 I_1;
BR nzp "AGAIN"
"FOUND0"vvv;
ADD R0 R0 R0;
ADD R3 R3 I_1;
AND R1 R0 R4;
BR n "YES";
BR nz "AGAIN";
"YES" vvv;
ADD R2 R2 I1;
ADD R0 R0 R0;
ADD R3 R3 I_1;
BR nzp "AGAIN";
"END" vvv;
STI R2 "OADDR";
TRAP x25;
"MASK" FILL 0x8000;
"IADDR" FILL 0x3100;
"OADDR" FILL 0x3101;
dot END;
println("Memory at x3100: " + mem(0x3100))
println("Memory at x3101: " + mem(0x3101))
}
}
| davidmaw/sLaCa3 | count01sTest.scala | Scala | mit | 1,404 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import scala.reflect.ClassTag
import org.apache.spark.{NarrowDependency, SparkEnv, Partition, TaskContext}
class PartitionPruningRDDPartition(idx: Int, val parentSplit: Partition) extends Partition {
override val index = idx
}
/**
* Represents a dependency between the PartitionPruningRDD and its parent. In this
* case, the child RDD contains a subset of partitions of the parents'.
*/
class PruneDependency[T](rdd: RDD[T], @transient partitionFilterFunc: Int => Boolean)
extends NarrowDependency[T](rdd) {
@transient
val partitions: Array[Partition] = rdd.partitions
.filter(s => partitionFilterFunc(s.index)).zipWithIndex
.map { case(split, idx) => new PartitionPruningRDDPartition(idx, split) : Partition }
override def getParents(partitionId: Int) = {
List(partitions(partitionId).asInstanceOf[PartitionPruningRDDPartition].parentSplit.index)
}
}
/**
* A RDD used to prune RDD partitions/partitions so we can avoid launching tasks on
* all partitions. An example use case: If we know the RDD is partitioned by range,
* and the execution DAG has a filter on the key, we can avoid launching tasks
* on partitions that don't have the range covering the key.
*/
class PartitionPruningRDD[T: ClassTag](
@transient prev: RDD[T],
@transient partitionFilterFunc: Int => Boolean)
extends RDD[T](prev.context, List(new PruneDependency(prev, partitionFilterFunc))) {
override def compute(split: Partition, context: TaskContext) = firstParent[T].iterator(
split.asInstanceOf[PartitionPruningRDDPartition].parentSplit, context)
override protected def getPartitions: Array[Partition] =
getDependencies.head.asInstanceOf[PruneDependency[T]].partitions
}
object PartitionPruningRDD {
/**
* Create a PartitionPruningRDD. This function can be used to create the PartitionPruningRDD
* when its type T is not known at compile time.
*/
def create[T](rdd: RDD[T], partitionFilterFunc: Int => Boolean) = {
new PartitionPruningRDD[T](rdd, partitionFilterFunc)(rdd.elementClassTag)
}
}
| iiisthu/sparkSdn | core/src/main/scala/org/apache/spark/rdd/PartitionPruningRDD.scala | Scala | apache-2.0 | 2,885 |
package lila
package analyse
import chess.Color
import chess.format.Uci
import lila.tree.Eval
case class Info(
ply: Int,
eval: Eval,
// variation is first in UCI, then converted to PGN before storage
variation: List[String] = Nil
) {
def cp = eval.cp
def mate = eval.mate
def best = eval.best
def turn = 1 + (ply - 1) / 2
def color = Color.fromWhite(ply % 2 == 1)
def hasVariation = variation.nonEmpty
def dropVariation = copy(variation = Nil, eval = eval.dropBest)
def invert = copy(eval = eval.invert)
def cpComment: Option[String] = cp map (_.showPawns)
def mateComment: Option[String] = mate map { m =>
s"Mate in ${math.abs(m.value)}"
}
def evalComment: Option[String] = cpComment orElse mateComment
def isEmpty = cp.isEmpty && mate.isEmpty
def forceCentipawns: Option[Int] = mate match {
case None => cp.map(_.centipawns)
case Some(m) if m.negative => Some(Int.MinValue - m.value)
case Some(m) => Some(Int.MaxValue - m.value)
}
}
object Info {
import Eval.{ Cp, Mate }
val LineMaxPlies = 14
private val separator = ","
private val listSeparator = ";"
def start(ply: Int) = Info(ply, Eval.initial, Nil)
private def strCp(s: String) = parseIntOption(s) map Cp.apply
private def strMate(s: String) = parseIntOption(s) map Mate.apply
private def decode(ply: Int, str: String): Option[Info] = str.split(separator) match {
case Array() => Some(Info(ply, Eval.empty))
case Array(cp) => Some(Info(ply, Eval(strCp(cp), None, None)))
case Array(cp, ma) => Some(Info(ply, Eval(strCp(cp), strMate(ma), None)))
case Array(cp, ma, va) => Some(Info(ply, Eval(strCp(cp), strMate(ma), None), va.split(' ').toList))
case Array(cp, ma, va, be) =>
Some(Info(ply, Eval(strCp(cp), strMate(ma), Uci.Move piotr be), va.split(' ').toList))
case _ => None
}
def decodeList(str: String, fromPly: Int): Option[List[Info]] = {
str.split(listSeparator).toList.zipWithIndex map { case (infoStr, index) =>
decode(index + 1 + fromPly, infoStr)
}
}.sequence
def apply(cp: Option[Cp], mate: Option[Mate], variation: List[String]): Int => Info =
ply => Info(ply, Eval(cp, mate, None), variation)
}
| ornicar/lichess-db | src/main/scala/lila/analyse/Info.scala | Scala | agpl-3.0 | 2,283 |
package pt.charactor
import akka.actor.ActorSystem
import akka.actor.Props
import akka.testkit.{TestActorRef, TestKit, ImplicitSender}
import org.scalatest.WordSpecLike
import org.scalatest.Matchers
import org.scalatest.BeforeAndAfterAll
import pt.charactor.MoverArbiter.CurrentWorldMap
class Vector2DSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender
with WordSpecLike with Matchers with BeforeAndAfterAll {
def this() = this(ActorSystem("MySpec"))
override def afterAll() {
TestKit.shutdownActorSystem(system)
}
"A Vector2D" must {
"bound another Vector inside" in {
Vector2D(50, 50).bounded(Vector2D(100, 100)) shouldEqual Vector2D(50, 50)
Vector2D(-10, 50).bounded(Vector2D(100, 100)) shouldEqual Vector2D(90, 50)
Vector2D(10, -50).bounded(Vector2D(100, 100)) shouldEqual Vector2D(10, 50)
Vector2D(110, 150).bounded(Vector2D(100, 100)) shouldEqual Vector2D(10, 50)
}
}
}
| PiotrTrzpil/charactor-clustered | src/test/scala/pt/charactor/Vector2DSpec.scala | Scala | apache-2.0 | 950 |
package grammarcomp
package clients
import grammar._
import utils._
import CFGrammar._
import generators._
import generators.RandomAccessGenerator._
import parsing._
import scala.collection.mutable.{ Map => MutableMap }
import java.util.Random
object AmbiguityChecker {
sealed abstract class AmbiguityFeedback[+T]
case class PossiblyUnambiguous() extends AmbiguityFeedback
case class Unambiguous() extends AmbiguityFeedback
case class AmbiguityWitness[T](ant: Nonterminal, w: Word[T]) extends AmbiguityFeedback {
override def toString = {
"Ambiguous nonterminal: " + ant + " word: " + wordToString(w)
}
}
case class AmbiguityWitnessWithTrees[T](ant: Nonterminal, w: Word[T], ptrees: List[ParseTree[T]]) extends AmbiguityFeedback {
// override def toString = {
// "Ambiguous nonterminal: " + ant + " word: " + wordToString(w) + "parse trees: " +
//
// }
}
}
class AmbiguityChecker[T](g: Grammar[T])
(implicit gctx: GlobalContext,
opctx: AmbiguityContext,
enumctx: EnumerationContext) {
import AmbiguityChecker._
def checkAmbiguityInStudentGrammar(): List[AmbiguityFeedback[T]] = {
val maxSize = opctx.maxSize
val ng = g.fromCNF
val wordGen = new SizeBasedRandomAccessGenerator(ng, maxSize)
val now = opctx.consecWordsForAmbiguityCheck
val consecChecker = checkForDuplicates(wordGen, now) _
//collect all the non-terminals in post-order
val nontermsInPO = GrammarUtils.postOrder(ng)
//for every size from 1 to 'maxWordSize' check if there is an ambiguous word
var ambiguities = List[AmbiguityWitness[T]]()
var break = false
for (size <- 1 to maxSize) if (!break) {
nontermsInPO.foreach { nt =>
consecChecker(nt, size) match {
case aw: AmbiguityWitness[T] =>
ambiguities :+= aw
break = true
case _ => ;
}
}
}
ambiguities
}
def checkForAmbiguity(startSize: Int = 1, fastmode: Boolean = false): List[AmbiguityFeedback[T]] = {
//This generator is shared by nonterminals
val maxSize = opctx.maxSize
val wordGen = new SizeBasedRandomAccessGenerator(g, maxSize)
val nos = opctx.sampledWordsForAmbiguityCheck
val now = opctx.consecWordsForAmbiguityCheck
val sampleChecker = sampleBasedChecker(wordGen, nos)
val consecChecker = checkForDuplicates(wordGen, now) _
//collect all the non-terminals in post-order
val nontermsInPO = GrammarUtils.postOrder(g)
if (opctx.debugAmbiguityCheck)
println("Post-order: " + nontermsInPO.mkString(","))
//for every size from 1 to 'maxWordSize' check if there is an ambiguous word
var exploredNonterms = Set[Nonterminal]() //set of nonterms that are found to be ambiguous
var ambiguities = List[AmbiguityWitness[T]]()
for (size <- startSize to maxSize) {
if (!gctx.abort) {
//for stats
gctx.stats.updateCumStats(1, "AmbWitSizeInc")
nontermsInPO.filterNot(exploredNonterms.contains).foreach { nt =>
//println("Checking non-terminal: " + nt + " for size: " + size)
if (!gctx.abort) {
consecChecker(nt, size) match {
case aw: AmbiguityWitness[T] =>
//log ambiguous words to console and file
//println("Found ambiguous word: " + aw)
gctx.logMessage("Found ambiguous word: " + aw)
//for stats
gctx.stats.updateCounter(1, "AmbNts")
gctx.stats.updateCounterStats(aw.w.size, "WitSize", "AmbNts")
exploredNonterms += nt
ambiguities :+= aw
case u: Unambiguous =>
exploredNonterms += nt
case _ if !fastmode =>
val spaceSize = wordGen.wordCounter.boundForNonterminal(nt, size)
if (spaceSize > nos) {
//here use the sample based checker
sampleChecker(nt, size) match {
case aw: AmbiguityWitness[T] =>
//log ambiguous words to console and file
//println("Found ambiguous word (by sampling): " + aw)
gctx.logMessage("Found ambiguous word (by sampling): " + aw)
//for stats
gctx.stats.updateCounter(1, "AmbNts")
gctx.stats.updateCounterStats(aw.w.size, "WitSize", "AmbNts")
exploredNonterms += nt
ambiguities :+= aw
case _ => ; //possibly unambiguous here
}
}
case _ => ;
}
}
}
}
}
ambiguities
}
def checkForDuplicates(wordGen: SizeBasedRandomAccessGenerator[T], now: Int)(nt: Nonterminal, size: Int) = {
//we can use bloom filters here if needed
var words = Set[Word[T]]()
var duplicate: Option[Word[T]] = None
val seqEnum = wordGen.getSeqEnumerator(nt, size, now)
var break = false
while (!break && seqEnum.hasNext) {
val w = seqEnum.next
if (words.contains(w)) {
duplicate = Some(w)
break = true //found a duplicate so break and report the string as a witness
} else
words += w
}
//println("word at index " + (index - step) + " : " + wordToString(words.toList(words.size - 1)))
if (duplicate.isDefined) {
AmbiguityWitness(nt, duplicate.get)
} else
PossiblyUnambiguous()
/* Note saying unambiguous in this case is slightly hard
else if (words.size < now)
Unambiguous() //here, we have enumerated all words belonging to the grammar
*/ }
/**
* nos - Number of samples
*/
def sampleBasedChecker(wordGen: SizeBasedRandomAccessGenerator[T], nos: Int) = {
//create a cnf grammar starting at each non-terminal
val cnfg = g.cnfGrammar
val cykParsers = g.nonTerminals.map { nt =>
(nt -> new CYKParser(CNFConverter.removeUnreachableRules(Grammar(nt, cnfg.rules))))
}.toMap
(nt: Nonterminal, size: Int) => {
//create a CYK parser
val cykparser = cykParsers(nt)
val spaceSize = wordGen.wordCounter.boundForNonterminal(nt, size)
// println("# words of size " + size+" : "+spaceSize)
//make sure that we atleast need 10 bits
if (nos >= spaceSize) {
//the range is smaller than the required number of samples, use the normal checker itself
checkForDuplicates(wordGen, nos)(nt, size)
} else {
val sampleEnum = wordGen.getSamplingEnumerator(nt, size, nos)
var feedback: AmbiguityFeedback[T] = PossiblyUnambiguous()
var break = false
while (!break && sampleEnum.hasNext) {
//get a random number with at most rangeBits number of bits
val w = sampleEnum.next
//println("word #: " + index+" word size: "+w.size)
//check if the 'w' has two parse trees
cykparser.hasMultipleTrees(w) match {
case None =>
; //do nothing
case Some((nt, substr, choices)) =>
//log ambiguous words to console and file
/*val msg = "Found ambiuous non-terminal: " + nt + " ambiguous substr: " +
wordToString(substr) + "Possible parsing choices: " + choices
//println(msg)
opctx.logMessage(msg)*/
feedback = AmbiguityWitness(nt, substr)
break = true
}
}
feedback
}
}
}
/* import generators.GrammarBoundingHelper._
*/
/**
* Bounded Ambiguity Checker
* Note: if space is problem we can use bloom filters
*/ /*
def checkForBoundedAmbiguity(g: Grammar, bg: Grammar)(implicit opctx: OperationContext): AmbiguityFeedback = {
if (opctx.debugAmbiguityCheck) {
println("Grammar: " + bg)
}
//This generator is shared by nonterminals
val wordGen = new RandomAccessGenerator(bg)
val maxWordSize = opctx.maxWordSize
def checkForDuplicates(nt: Nonterminal, now: Int, step: Int) = {
var index = -1
var words = MutableMap[Word, BigInt]()
var duplicate: Option[Word] = None
var break = false
while (words.size <= now && !break) {
//increment index in steps
index += step
if (opctx.debugAmbiguityCheck) {
if (index % 100 == 0)
println("word #: " + index)
}
wordGen.getWordAtIndexNonterminal(nt, index) match {
case Element(w) if (words.contains(w)) =>
duplicate = Some(w)
break = true //found a duplicate so break and report the string as a witness
case Element(w) =>
if (opctx.debugAmbiguityCheck) {
if (index % 1000 == 0)
println(nt + " word at index " + index + " : " + wordToString(w))
}
if (w.size > maxWordSize)
break = true
else
words += (w -> index)
case _ =>
//break and say that the nonterminal is not ambiguous
break = true
}
}
//println("word at index " + (index - step) + " : " + wordToString(words.toList(words.size - 1)))
if (duplicate.isDefined) {
val w = duplicate.get
//translate the parse trees to use the non-terminals of the unbounded grammar
val ptree1 = remapParseTree(wordGen.constructParseTreeForNonterminal(nt, words(w)).get)
val ptree2 = remapParseTree(wordGen.constructParseTreeForNonterminal(nt, index).get)
AmbiguityWitnessWithTrees(nt, w, List(ptree1, ptree2))
} else if (words.size < now)
Unambiguous() //here, we have enumerated all words belonging to the grammar
else
PossiblyUnambiguous()
}
//collect all the non-terminals in post-order
val nontermsInPO = GrammarUtils.postOrder(bg)
if (opctx.debugAmbiguityCheck)
println("Post-order: " + nontermsInPO.mkString(","))
//for each non-terminal generate some words (consecutively and also with sampling )
// and check if a word repeats or if it has two parse trees.
//TODO: print the derivation
nontermsInPO.foldLeft(PossiblyUnambiguous(): AmbiguityFeedback) {
case (res: AmbiguityWitness, _) =>
res
case (res @ AmbiguityWitnessWithTrees(ant, w, ptrees), cnt) =>
//print the ambiguous string and continue
println("Found ambiguous word: " + wordToString(w))
//print two parse trees for the word
println("Parse tree 1: " + ParseTreeUtils.parseTreetoString(ptrees(0)))
println("Parse tree 2: " + ParseTreeUtils.parseTreetoString(ptrees(1)))
println("Checking non-terminal: " + cnt)
val now = opctx.consecWordsForAmbiguityCheck
//generate 'now' consecutive words for the non-terminal
checkForDuplicates(cnt, now, 1)
case (acc, nt) =>
// /if (opctx.debugAmbiguityCheck)
println("Checking non-terminal: " + nt)
val now = opctx.consecWordsForAmbiguityCheck
//generate 'now' consecutive words for the non-terminal
checkForDuplicates(nt, now, 1)
}
}*/
} | epfl-lara/GrammarComparison | src/main/scala/grammarcomp/clients/AmbiguityChecker.scala | Scala | mit | 11,362 |
package org.atnos.eff
import cats._
import data._
import cats.implicits._
import Eff._
import Interpret._
/**
* Effect for logging values alongside computations
*
* Compared to traditional Writer monad which accumulates values by default
* this effect can be interpreted in different ways:
*
* - log values to the console or to a file as soon as they are produced
* - accumulate values in a list
*
*/
trait WriterEffect extends
WriterCreation with
WriterInterpretation
object WriterEffect extends WriterEffect
trait WriterCreation {
/** write a given value */
def tell[R, O](o: O)(implicit member: Writer[O, ?] |= R): Eff[R, Unit] =
send[Writer[O, ?], R, Unit](Writer(o, ()))
}
object WriterCreation extends WriterCreation
trait WriterInterpretation {
/**
* run a writer effect and return the list of written values
*
* This uses a ListBuffer internally to append values
*/
def runWriter[R, U, O, A, B](w: Eff[R, A])(implicit m: Member.Aux[Writer[O, ?], R, U]): Eff[U, (A, List[O])] =
runWriterFold(w)(ListFold[O])
/**
* More general fold of runWriter where we can use a fold to accumulate values in a mutable buffer
*/
def runWriterFold[R, U, O, A, B](w: Eff[R, A])(fold: RightFold[O, B])(implicit m: Member.Aux[Writer[O, ?], R, U]): Eff[U, (A, B)] = {
val executed =
Interpret.runInterpreter(w)(new Interpreter[Writer[O, ?], U, A, (A, fold.S)] {
def onPure(a: A): Eff[U, (A, fold.S)] =
Eff.pure((a, fold.init))
def onEffect[X](ox: Writer[O, X], continuation: Continuation[U, X, (A, fold.S)]): Eff[U, (A, fold.S)] = {
val (o, x) = ox.run
Eff.impure(x, continuation, { case (a, s) => (a, fold.fold(o, s)) })
}
def onLastEffect[X](x: Writer[O, X], continuation: Continuation[U, X, Unit]): Eff[U, Unit] =
Eff.pure(())
def onApplicativeEffect[X, T[_] : Traverse](xs: T[Writer[O, X]], continuation: Continuation[U, T[X], (A, fold.S)]): Eff[U, (A, fold.S)] = {
val os = new collection.mutable.ListBuffer[O]
val values = xs.map { w: Writer[O, X] =>
val (o, x) = w.run
os.append(o)
x
}
Eff.impure(values, continuation, { case (a, s) => (a, os.toList.foldLeft(s) { (res, o) => fold.fold(o, res) }) })
}
})
executed.map { case (a, s) => (a, fold.finalize(s)) }
}
/**
* Run a side-effecting fold
*/
def runWriterUnsafe[R, U, O, A](w: Eff[R, A])(f: O => Unit)(implicit m: Member.Aux[Writer[O, ?], R, U]): Eff[U, A] =
interpretUnsafe(w)(new SideEffect[Writer[O, ?]] {
def apply[X](tx: Writer[O, X]): X = {
val (o, x) = tx.run
f(o)
x
}
def applicative[X, Tr[_] : Traverse](ms: Tr[Writer[O, X]]): Tr[X] =
ms.map(apply)
})
def runWriterEval[R, U, O, A](w: Eff[R, A])(f: O => Eval[Unit])(implicit m: Member.Aux[Writer[O, ?], R, U], ev: Eval |= U): Eff[U, A] =
runWriterFold(w)(EvalFold(f)).flatMap { case (a, e) => send[Eval, U, Unit](e).as(a) }
def runWriterMonoid[R, U, O, A](w: Eff[R, A])(implicit m: Member.Aux[Writer[O, ?], R, U], O: Monoid[O]): Eff[U, (A, O)] =
runWriterFold(w)(MonoidFold[O])
def runWriterIntoMonoid[R, U, O, M, A](w: Eff[R, A])(f: O => M)(implicit m: Member.Aux[Writer[O, ?], R, U], M: Monoid[M]): Eff[U, (A, M)] =
runWriterFold(w)(IntoMonoidFold[M, O](f))
implicit def ListFold[A]: RightFold[A, List[A]] = new RightFold[A, List[A]] {
type S = List[A]
val init = List[A]()
def fold(a: A, s: S) = a :: s
def finalize(s: S) = s
}
def IntoMonoidFold[M: Monoid, A](f: A => M): RightFold[A, M] = new RightFold[A, M] {
type S = M
val init: M = Monoid[M].empty
def fold(a: A, s: M): M = f(a) |+| s
def finalize(s: M): M = s
}
def MonoidFold[A : Monoid]: RightFold[A, A] =
IntoMonoidFold(identity)
def EvalFold[A](f: A => Eval[Unit]): RightFold[A, Eval[Unit]] = new RightFold[A, Eval[Unit]] {
type S = Eval[Unit]
val init = Eval.Unit
def fold(a: A, s: S) = Eval.defer { f(a) >> s }
def finalize(s: S) = s
}
}
/** support trait for folding values while possibly keeping some internal state */
trait RightFold[A, B] {
type S
val init: S
def fold(a: A, s: S): S
def finalize(s: S): B
}
object WriterInterpretation extends WriterInterpretation
| etorreborre/eff | shared/src/main/scala/org/atnos/eff/WriterEffect.scala | Scala | mit | 4,368 |
//package codesniffer.deckard.vgen
//
//import java.lang.reflect.Modifier
//
//import codesniffer.deckard.{WeightedVec, CharacVec, ArrayVec}
//import codesniffer.api.Node
//import codesniffer.api.body.MethodDeclaration
//import codesniffer.api.stmt.Statement
//import codesniffer.api.visitor.VoidVisitorAdapter
//
//import scala.StringBuilder
//import scala.beans.BeanProperty
//import scala.collection.mutable
//import scala.collection.convert.wrapAsJava._
//import scala.collection.mutable.Buffer
//
///**
// * Created by Bowen Cai on 5/21/2015.
// */
//case class SlicerVecGen[F](vecGen: BasicVecGen[F], counter: SubTreeCounter[F]) extends VoidVisitorAdapter[Context[F]] {
//
// // less than lowerBound -> ignored
// @BeanProperty var lowerBound = 10
// // greater than upperBound -> break
// @BeanProperty var upperBound = 20
//
// override def visit(method: MethodDeclaration, ctx: Context[F]): Unit =
// if (!ctx.config.filterMethod(method)) {
// val modifiers = method.getModifiers
// if (!Modifier.isAbstract(modifiers) && !Modifier.isNative(modifiers)
// && method.getBody != null
// && method.getBody.getStmts != null && method.getBody.getStmts.size() > 0) {
//
// val methodName = method.getName
//
// val prevLoc = ctx.currentLocation
// ctx.currentLocation = ctx.currentLocation.enterMethod(methodName, method.getBeginLine, method.getEndLine)
//
// try {
// val head: Node = method.getBody
// val total = counter.nodeCount(head)(ctx)
// if (total > lowerBound) {
// if (total < upperBound)
// genVec(head, ctx)(method)
// else search(head, ctx)(method)
// }
// } catch {
// case e: Exception => throw new RuntimeException(s"Could not travel though method ${ctx.currentLocation}", e)
// }
// ctx.currentLocation = prevLoc
// }
// }
//
// @inline
// private def countWithin(n: Node): Int = {
// val oc = n.getData
// if (oc != null) oc.asInstanceOf[Int]
// else 1
// }
//
// /**
// *
// * @param node node number under this node is greater than upperBound
// * @param ctx
// * @param method
// * @return
// */
// @inline
// protected def search(node: Node, ctx: Context[F])(implicit method: MethodDeclaration): Unit = {
// var kids = node.getChildrenNodes
// if (kids != null && kids.size() > 0) {
// while (kids.size() == 1)
// kids = kids.get(0).getChildrenNodes
// travel(kids, ctx)
// }
// }
//
// protected def travel(stmts: java.util.List[Node], ctx: Context[F])(implicit method: MethodDeclaration): Unit = {
// var head = stmts.get(0)
// var i = 1
// var cc = countWithin(head)
// // find an appropriate head
// while (cc > upperBound && i < stmts.size()) {
// search(head, ctx)
// head = stmts.get(i)
// i += 1
// cc = countWithin(head)
// }
//
// // init
// var sWindow = new mutable.ListBuffer[Node] // the sliding window
// sWindow += head
// var sum = cc
//
// // travel though this level
// while (i < stmts.size()) {
// // enrich window
// while (sum < lowerBound) {
// if (i >= stmts.size())
// return
//
// var next = stmts.get(i)
// i += 1
// var ccc = countWithin(next)
//
// // skip big node, abandon the already-in
// while (ccc > upperBound) {
// search(next, ctx)
// if (i < stmts.size()) {
// next = stmts.get(i)
// i += 1
// ccc = countWithin(next)
// sWindow.clear()
// sum = 0
// } else return
// }
// // add to window
// sWindow += next
// sum += ccc
// }
//
// // generate A vector
// genVec(sWindow, ctx)
//
// // chop head
// if (sum >= upperBound) {
// while (sum >= upperBound) {
// sum -= countWithin(sWindow.head) // pop extra nodes
// sWindow.remove(0, 1)
// }
// } else {
// sum -= countWithin(sWindow.head) // pop at least one node, forwarding the window
// sWindow.remove(0, 1)
// }
//
// }
// // take the rest
// if (sum > lowerBound) {
// genVec(sWindow, ctx)
// }
// }
//
// @inline
// protected def genVec(node: Node, ctx: Context[F])(implicit method: MethodDeclaration): Unit = {
// val prev = ctx.currentLocation
// ctx.currentLocation = ctx.currentLocation.copy(lineBegin = node.getBeginLine, lineEnd = node.getEndLine)
//// val v = new WeightedVec[F](vecGen.before(method, ctx))
// val v = vecGen.before(method, ctx)
//
// ctx.currentLocation = prev
//
// val nodeCode = node.toString.intern()
// v.data = Some(nodeCode)
//
// vecGen.collectNode(node, v)(ctx)
// vecGen.after(method, v, ctx)
//// println(node)
//// println(va)
// }
//
// @inline
// protected def genVec[E <: Node](nodes: mutable.Buffer[E], ctx: Context[F])(implicit method: MethodDeclaration): Unit = {
// if (nodes.nonEmpty) {
// val prev = ctx.currentLocation
// ctx.currentLocation = ctx.currentLocation.copy(lineBegin = nodes.head.getBeginLine, lineEnd = nodes.last.getEndLine)
// val v = vecGen.before(method, ctx)
// ctx.currentLocation = prev
//
// val nodeCode = nodes.addString(new StringBuilder(512), "\\r\\n").toString().intern()
// v.data = Some(nodeCode)
//
// vecGen.collectNodes(nodes, v)(ctx)
// vecGen.after(method, v, ctx)
//
//// nodes.foreach(println)
//// println(va)
// }
//
// }
//}
//
////
//// protected def scan[E <:Node](topStmts: java.util.List[E], ctx: Context[F])(implicit method: MethodDeclaration): Unit = {
////
//// val topC = counter.kidsCount(topStmts)(ctx)
//// if (topC > 15) {
//// var buf = new mutable.ListBuffer [Node]
//// buf += topStmts.get(0)
//// var i = 1
////
//// while (i < topStmts.size()) {
//// var sum = topStmts.get(0).getData.asInstanceOf[Int]
//// while (sum < avg) {
//// val next = topStmts.get(i)
//// buf += next
//// i += 1
//// sum += next.getData.asInstanceOf[Int]
//// }
//// slide(buf, ctx)
//// while (sum >= avg) {
//// sum -= buf.last.getData.asInstanceOf[Int]
//// buf.remove(buf.size - 1, 1)
//// }
////
//// if (sum > 5) {
//// val v = null.asInstanceOf[ArrayVec[F]]
//// vecGen.collectNodes(buf, v)(ctx)
//// ctx.vecWriter.write(v)
//// }
//// }
////
//// for (st <- topStmts) {
//// topStmts.get(1)
//// }
////
//// } else if (topC > 5) {
//// // write
//// }
//// }
//
//
| xkommando/CodeSniffer | deckard/src/main/scala/codesniffer/deckard/vgen/SlicerVecGen.scala | Scala | lgpl-3.0 | 6,944 |
/*
* This interface is primarily used for implementing storage adapters where
* row data is partitioned by a time stamp column and also contains one or more key
* columns
*
*/
package com.ligadata.StorageBase
import com.ligadata.KvBase.{ Key, Value, TimeRange }
import com.ligadata.Utils.{ KamanjaLoaderInfo }
import java.util.Date
trait DataStoreOperations {
// update operations, add & update semantics are different for relational databases
def put(containerName: String, key: Key, value: Value): Unit
// def put(containerName: String, data_list: Array[(Key, Value)]): Unit
def put(data_list: Array[(String, Array[(Key, Value)])]): Unit // data_list has List of container names, and each container has list of key & value
// delete operations
def del(containerName: String, keys: Array[Key]): Unit // For the given keys, delete the values
def del(containerName: String, time: TimeRange, keys: Array[Array[String]]): Unit // For the given multiple bucket key strings, delete the values with in given date range
// get operations
def get(containerName: String, callbackFunction: (Key, Value) => Unit): Unit
def get(containerName: String, keys: Array[Key], callbackFunction: (Key, Value) => Unit): Unit
def get(containerName: String, timeRanges: Array[TimeRange], callbackFunction: (Key, Value) => Unit): Unit // Range of dates
def get(containerName: String, timeRanges: Array[TimeRange], bucketKeys: Array[Array[String]], callbackFunction: (Key, Value) => Unit): Unit
def get(containerName: String, bucketKeys: Array[Array[String]], callbackFunction: (Key, Value) => Unit): Unit
/*
// Passing filter to storage
def get(containerName: String, filterFunction: (Key, Value) => Boolean, callbackFunction: (Key, Value) => Unit): Unit
def get(containerName: String, timeRanges: Array[TimeRange], filterFunction: (Key, Value) => Boolean, callbackFunction: (Key, Value) => Unit): Unit // Range of dates
def get(containerName: String, timeRanges: Array[TimeRange], bucketKeys: Array[Array[String]], filterFunction: (Key, Value) => Boolean, callbackFunction: (Key, Value) => Unit): Unit
def get(containerName: String, bucketKeys: Array[Array[String]], filterFunction: (Key, Value) => Boolean, callbackFunction: (Key, Value) => Unit): Unit
*/
// getKeys operations similar to get, but only key values
def getKeys(containerName: String, callbackFunction: (Key) => Unit): Unit
def getKeys(containerName: String, keys: Array[Key], callbackFunction: (Key) => Unit): Unit
def getKeys(containerName: String, timeRanges: Array[TimeRange], callbackFunction: (Key) => Unit): Unit // Range of dates
def getKeys(containerName: String, timeRanges: Array[TimeRange], bucketKeys: Array[Array[String]], callbackFunction: (Key) => Unit): Unit
def getKeys(containerName: String, bucketKeys: Array[Array[String]], callbackFunction: (Key) => Unit): Unit
}
trait DataStore extends DataStoreOperations {
def beginTx(): Transaction
def endTx(tx: Transaction): Unit // Same as commit
def commitTx(tx: Transaction): Unit
def rollbackTx(tx: Transaction): Unit
// clean up operations
def Shutdown(): Unit
def TruncateContainer(containerNames: Array[String]): Unit
def DropContainer(containerNames: Array[String]): Unit
def CreateContainer(containerNames: Array[String]): Unit
}
trait Transaction extends DataStoreOperations {
val parent: DataStore // Parent Data Store
}
// Storage Adapter Object to create storage adapter
trait StorageAdapterObj {
def CreateStorageAdapter(kvManagerLoader: KamanjaLoaderInfo, datastoreConfig: String): DataStore
}
| traytonwhite/Kamanja | trunk/Storage/StorageBase/src/main/scala/com/ligadata/StorageBase/StorageAdapterBase.scala | Scala | apache-2.0 | 3,597 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.r
import org.apache.hadoop.fs.Path
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkException
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.ml.attribute.AttributeGroup
import org.apache.spark.ml.feature.RFormula
import org.apache.spark.ml.regression.{AFTSurvivalRegression, AFTSurvivalRegressionModel}
import org.apache.spark.ml.util._
import org.apache.spark.sql.{DataFrame, Dataset}
private[r] class AFTSurvivalRegressionWrapper private (
val pipeline: PipelineModel,
val features: Array[String]) extends MLWritable {
private val aftModel: AFTSurvivalRegressionModel =
pipeline.stages(1).asInstanceOf[AFTSurvivalRegressionModel]
lazy val rCoefficients: Array[Double] = if (aftModel.getFitIntercept) {
Array(aftModel.intercept) ++ aftModel.coefficients.toArray ++ Array(math.log(aftModel.scale))
} else {
aftModel.coefficients.toArray ++ Array(math.log(aftModel.scale))
}
lazy val rFeatures: Array[String] = if (aftModel.getFitIntercept) {
Array("(Intercept)") ++ features ++ Array("Log(scale)")
} else {
features ++ Array("Log(scale)")
}
def transform(dataset: Dataset[_]): DataFrame = {
pipeline.transform(dataset).drop(aftModel.getFeaturesCol)
}
override def write: MLWriter =
new AFTSurvivalRegressionWrapper.AFTSurvivalRegressionWrapperWriter(this)
}
private[r] object AFTSurvivalRegressionWrapper extends MLReadable[AFTSurvivalRegressionWrapper] {
private def formulaRewrite(formula: String): (String, String) = {
var rewritedFormula: String = null
var censorCol: String = null
val regex = """Surv\\(([^,]+), ([^,]+)\\) ~ (.+)""".r
try {
val regex(label, censor, features) = formula
// TODO: Support dot operator.
if (features.contains(".")) {
throw new UnsupportedOperationException(
"Terms of survreg formula can not support dot operator.")
}
rewritedFormula = label.trim + "~" + features.trim
censorCol = censor.trim
} catch {
case e: MatchError =>
throw new SparkException(s"Could not parse formula: $formula")
}
(rewritedFormula, censorCol)
}
def fit(formula: String, data: DataFrame): AFTSurvivalRegressionWrapper = {
val (rewritedFormula, censorCol) = formulaRewrite(formula)
val rFormula = new RFormula().setFormula(rewritedFormula)
RWrapperUtils.checkDataColumns(rFormula, data)
val rFormulaModel = rFormula.fit(data)
// get feature names from output schema
val schema = rFormulaModel.transform(data).schema
val featureAttrs = AttributeGroup.fromStructField(schema(rFormula.getFeaturesCol))
.attributes.get
val features = featureAttrs.map(_.name.get)
val aft = new AFTSurvivalRegression()
.setCensorCol(censorCol)
.setFitIntercept(rFormula.hasIntercept)
.setFeaturesCol(rFormula.getFeaturesCol)
val pipeline = new Pipeline()
.setStages(Array(rFormulaModel, aft))
.fit(data)
new AFTSurvivalRegressionWrapper(pipeline, features)
}
override def read: MLReader[AFTSurvivalRegressionWrapper] = new AFTSurvivalRegressionWrapperReader
override def load(path: String): AFTSurvivalRegressionWrapper = super.load(path)
class AFTSurvivalRegressionWrapperWriter(instance: AFTSurvivalRegressionWrapper)
extends MLWriter {
override protected def saveImpl(path: String): Unit = {
val rMetadataPath = new Path(path, "rMetadata").toString
val pipelinePath = new Path(path, "pipeline").toString
val rMetadata = ("class" -> instance.getClass.getName) ~
("features" -> instance.features.toSeq)
val rMetadataJson: String = compact(render(rMetadata))
sc.parallelize(Seq(rMetadataJson), 1).saveAsTextFile(rMetadataPath)
instance.pipeline.save(pipelinePath)
}
}
class AFTSurvivalRegressionWrapperReader extends MLReader[AFTSurvivalRegressionWrapper] {
override def load(path: String): AFTSurvivalRegressionWrapper = {
implicit val format = DefaultFormats
val rMetadataPath = new Path(path, "rMetadata").toString
val pipelinePath = new Path(path, "pipeline").toString
val rMetadataStr = sc.textFile(rMetadataPath, 1).first()
val rMetadata = parse(rMetadataStr)
val features = (rMetadata \\ "features").extract[Array[String]]
val pipeline = PipelineModel.load(pipelinePath)
new AFTSurvivalRegressionWrapper(pipeline, features)
}
}
}
| ZxlAaron/mypros | mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala | Scala | apache-2.0 | 5,343 |
package score.discord.canti.command.api
import com.codedx.util.MapK
final case class CommandInvocation(
prefix: String,
name: String,
args: MapK[ArgSpec, [T] =>> T],
invoker: CommandInvoker
):
def jda = invoker.user.getJDA
| ScoreUnder/canti-bot | src/main/scala/score/discord/canti/command/api/CommandInvocation.scala | Scala | agpl-3.0 | 235 |
package com.github.mdr.mash.view.render.browser
object ArrowHelper {
def addUpArrow(s: String): String = setMiddleCharacter(s, 'β')
def addDownArrow(s: String): String = setMiddleCharacter(s, 'β')
private def setMiddleCharacter(s: String, c: Char): String =
if (s.isEmpty) s else s.updated(s.length / 2, c)
}
| mdr/mash | src/main/scala/com/github/mdr/mash/view/render/browser/ArrowHelper.scala | Scala | mit | 328 |
package org.openapitools.client.api
import argonaut._
import argonaut.EncodeJson._
import argonaut.DecodeJson._
import org.http4s.{EntityDecoder, EntityEncoder}
import org.http4s.argonaut._
import org.joda.time.DateTime
import ExtensionClassContainerImpl1links._
case class ExtensionClassContainerImpl1links (
self: Option[Link],
`class`: Option[String])
object ExtensionClassContainerImpl1links {
import DateTimeCodecs._
implicit val ExtensionClassContainerImpl1linksCodecJson: CodecJson[ExtensionClassContainerImpl1links] = CodecJson.derive[ExtensionClassContainerImpl1links]
implicit val ExtensionClassContainerImpl1linksDecoder: EntityDecoder[ExtensionClassContainerImpl1links] = jsonOf[ExtensionClassContainerImpl1links]
implicit val ExtensionClassContainerImpl1linksEncoder: EntityEncoder[ExtensionClassContainerImpl1links] = jsonEncoderOf[ExtensionClassContainerImpl1links]
}
| cliffano/swaggy-jenkins | clients/scalaz/generated/src/main/scala/org/openapitools/client/api/ExtensionClassContainerImpl1links.scala | Scala | mit | 900 |
package se.stagehand.swing.gui
import scala.swing._
import se.stagehand.swing.lib.Vector2
import java.awt.MouseInfo
import se.stagehand.lib.Log
class BetterDialog extends Dialog {
protected val log = Log.getLog(this.getClass())
def centerOn(component:Component) {
centerOn(GUIUtils.rectCenter(component.bounds))
}
def centerOn(p:Point) {
val center =
location = GUIUtils.rectCenter(bounds).neg + p
}
def refresh {
log.debug("" + preferredSize)
peer.setSize(preferredSize)
contents.foreach(_.revalidate)
repaint
}
}
object BetterDialog {
def inputDialog[T](dialog:InputDialog[T], position: Point):T = {
dialog.centerOn(position)
dialog.open()
dialog.selected
}
def inputDialog[T](dialog:InputDialog[T]):T = inputDialog[T](dialog,GUIUtils.mouse.getLocation())
abstract class InputDialog[T] extends BetterDialog {
def selected:T
modal = true
}
} | evilcandybag/Stagehand-core | src/main/scala/se/stagehand/swing/gui/BetterDialog.scala | Scala | gpl-2.0 | 938 |
package com.blinkbox.books.spray
import spray.http.{MediaType, MediaTypes}
package object v2 {
/**
* The version 2 media type.
*/
val `application/vnd.blinkbox.books.v2+json` = MediaTypes.register(MediaType.custom(
mainType = "application",
subType = "vnd.blinkbox.books.v2+json",
binary = true, // binary as the encoding is defined as utf-8 by the json spec
compressible = true))
} | blinkboxbooks/common-spray.scala | src/main/scala/com/blinkbox/books/spray/v2/package.scala | Scala | mit | 411 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical
/**
* A visitor pattern for traversing a [[LogicalPlan]] tree and computing some properties.
*/
trait LogicalPlanVisitor[T] {
def visit(p: LogicalPlan): T = p match {
case p: Aggregate => visitAggregate(p)
case p: Distinct => visitDistinct(p)
case p: Except => visitExcept(p)
case p: Expand => visitExpand(p)
case p: Filter => visitFilter(p)
case p: Generate => visitGenerate(p)
case p: GlobalLimit => visitGlobalLimit(p)
case p: Intersect => visitIntersect(p)
case p: Join => visitJoin(p)
case p: LocalLimit => visitLocalLimit(p)
case p: Pivot => visitPivot(p)
case p: Project => visitProject(p)
case p: Repartition => visitRepartition(p)
case p: RepartitionByExpression => visitRepartitionByExpr(p)
case p: Sample => visitSample(p)
case p: ScriptTransformation => visitScriptTransform(p)
case p: Union => visitUnion(p)
case p: Window => visitWindow(p)
case p: Tail => visitTail(p)
case p: Sort => visitSort(p)
case p: WithCTE => visitWithCTE(p)
case p: LogicalPlan => default(p)
}
def default(p: LogicalPlan): T
def visitAggregate(p: Aggregate): T
def visitDistinct(p: Distinct): T
def visitExcept(p: Except): T
def visitExpand(p: Expand): T
def visitFilter(p: Filter): T
def visitGenerate(p: Generate): T
def visitGlobalLimit(p: GlobalLimit): T
def visitIntersect(p: Intersect): T
def visitJoin(p: Join): T
def visitLocalLimit(p: LocalLimit): T
def visitPivot(p: Pivot): T
def visitProject(p: Project): T
def visitRepartition(p: Repartition): T
def visitRepartitionByExpr(p: RepartitionByExpression): T
def visitSample(p: Sample): T
def visitScriptTransform(p: ScriptTransformation): T
def visitUnion(p: Union): T
def visitWindow(p: Window): T
def visitTail(p: Tail): T
def visitSort(sort: Sort): T
def visitWithCTE(p: WithCTE): T
}
| chuckchen/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlanVisitor.scala | Scala | apache-2.0 | 2,752 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.graphx
import org.apache.spark.SparkContext._
import org.apache.spark.graphx.{GraphXUtils, PartitionStrategy}
import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.graphx.util.GraphGenerators
import java.io.{PrintWriter, FileOutputStream}
/**
* The SynthBenchmark application can be used to run various GraphX algorithms on
* synthetic log-normal graphs. The intent of this code is to enable users to
* profile the GraphX system without access to large graph datasets.
* synthbenchmark???????????ΠΈ?????????????,??Ξ΄???????????????????????Π΄?????????
*/
object SynthBenchmark {
/**
* To run this program use the following:
* ????Π΄???????????
* MASTER=spark://foobar bin/run-example graphx.SynthBenchmark -app=pagerank
*
* Options:
* -app "pagerank" or "cc" for pagerank or connected components. (Default: pagerank) *
* -niters the number of iterations of pagerank to use (Default: 10)
* ????????PageRank????
* -nverts the number of vertices in the graph (Default: 1000000)
* ??Π΅??????
* -numEPart the number of edge partitions in the graph (Default: number of cores)
* ??Π΅????????????
* -partStrategy the graph partitioning strategy to use
* ???????????
* -mu the mean parameter for the log-normal graph (Default: 4.0)
* ?????????????????
* -sigma the stdev parameter for the log-normal graph (Default: 1.3)
* ????????????????
* -degFile the local file to save the degree information (Default: Empty)
* ????????????????
* -seed seed to use for RNGs (Default: -1, picks seed randomly)
*/
def main(args: Array[String]) {
val options = args.map {
arg =>
arg.dropWhile(_ == '-').split('=') match {
case Array(opt, v) => (opt -> v)
case _ => throw new IllegalArgumentException("Invalid argument: " + arg)
}
}
var app = "pagerank"
var niter = 10
var numVertices = 100000
var numEPart: Option[Int] = None
var partitionStrategy: Option[PartitionStrategy] = None
var mu: Double = 4.0
var sigma: Double = 1.3
var degFile: String = ""
var seed: Int = -1
options.foreach {
case ("app", v) => app = v
case ("niters", v) => niter = v.toInt
case ("nverts", v) => numVertices = v.toInt
case ("numEPart", v) => numEPart = Some(v.toInt)
case ("partStrategy", v) => partitionStrategy = Some(PartitionStrategy.fromString(v))
case ("mu", v) => mu = v.toDouble
case ("sigma", v) => sigma = v.toDouble
case ("degFile", v) => degFile = v
case ("seed", v) => seed = v.toInt
case (opt, _) => throw new IllegalArgumentException("Invalid option: " + opt)
}
val conf = new SparkConf()
.setAppName(s"GraphX Synth Benchmark (nverts = $numVertices, app = $app)")
GraphXUtils.registerKryoClasses(conf)
val sc = new SparkContext(conf)
// Create the graph
println(s"Creating graph...")
val unpartitionedGraph = GraphGenerators.logNormalGraph(sc, numVertices,
numEPart.getOrElse(sc.defaultParallelism), mu, sigma, seed)
// Repartition the graph
val graph = partitionStrategy.foldLeft(unpartitionedGraph)(_.partitionBy(_)).cache()
var startTime = System.currentTimeMillis()
val numEdges = graph.edges.count()
println(s"Done creating graph. Num Vertices = $numVertices, Num Edges = $numEdges")
val loadTime = System.currentTimeMillis() - startTime
// Collect the degree distribution (if desired)
if (!degFile.isEmpty) {
val fos = new FileOutputStream(degFile)
val pos = new PrintWriter(fos)
val hist = graph.vertices.leftJoin(graph.degrees)((id, _, optDeg) => optDeg.getOrElse(0))
.map(p => p._2).countByValue()
hist.foreach {
case (deg, count) => pos.println(s"$deg \\t $count")
}
}
// Run PageRank
startTime = System.currentTimeMillis()
if (app == "pagerank") {
println("Running PageRank")
val totalPR = graph.staticPageRank(niter).vertices.map(_._2).sum()
println(s"Total PageRank = $totalPR")
} else if (app == "cc") {
println("Running Connected Components")
val numComponents = graph.connectedComponents.vertices.map(_._2).distinct().count()
println(s"Number of components = $numComponents")
}
val runTime = System.currentTimeMillis() - startTime
println(s"Num Vertices = $numVertices")
println(s"Num Edges = $numEdges")
println(s"Creation time = ${loadTime/1000.0} seconds")
println(s"Run time = ${runTime/1000.0} seconds")
sc.stop()
}
}
// scalastyle:on println
| tophua/spark1.52 | examples/src/main/scala/org/apache/spark/examples/graphx/SynthBenchmark.scala | Scala | apache-2.0 | 5,522 |
package evaluation
/**
* <pre>
* Created on 2016/12/16.
* </pre>
*
* @author K.Sakamoto
*/
object MicroAveraging {
def summaryStatistics(metrics: Array[Seq[Double]]): SummaryStatistics = {
SummaryStatistics(metrics.flatten, metrics.length)
}
def summaryStatistics(metrics: Seq[Double]): SummaryStatistics = {
SummaryStatistics(metrics, 1)
}
}
| ktr-skmt/FelisCatusZero | src/main/scala/evaluation/MicroAveraging.scala | Scala | apache-2.0 | 373 |
package ai.verta.repository
final case class IllegalCommitSavedStateException(
private val message: String = "",
private val cause: Throwable = None.orNull
) extends Exception(message, cause)
| mitdbg/modeldb | client/scala/src/main/scala/ai/verta/repository/IllegalCommitSavedStateException.scala | Scala | mit | 197 |
package com.twitter.finagle.param
import com.twitter.concurrent.AsyncSemaphore
import com.twitter.finagle.Stack
import com.twitter.finagle.filter.RequestSemaphoreFilter
/**
* A collection of methods for configuring the server-side admission control modules
* of Finagle servers.
*
* @tparam A a [[Stack.Parameterized]] server to configure
*/
class ServerAdmissionControlParams[A <: Stack.Parameterized[A]](self: Stack.Parameterized[A]) {
/**
* Configures the requests concurrency of this server.
*
* @param maxConcurrentRequests the maximum number of requests allowed to be handled
* concurrently (default: unbounded)
*
* @param maxWaiters the maximum number requests (on top of `maxConcurrentRequests`)
* allowed to be queued (default: unbounded)
*
* @see [[https://twitter.github.io/finagle/guide/Servers.html#concurrency-limit]]
*/
def concurrencyLimit(maxConcurrentRequests: Int, maxWaiters: Int): A = {
val semaphore =
if (maxConcurrentRequests == Int.MaxValue) None
else Some(new AsyncSemaphore(maxConcurrentRequests, maxWaiters))
self.configured(RequestSemaphoreFilter.Param(semaphore))
}
}
| koshelev/finagle | finagle-core/src/main/scala/com/twitter/finagle/param/ServerAdmissionControlParams.scala | Scala | apache-2.0 | 1,210 |
package org.joshjoyce.gasbag
import scala.io.Source
class ScrobbleFileSource(file: String) extends Traversable[SongInfo] {
override def foreach[B](f: (SongInfo) => B) {
val source = Source.fromFile(file)
val lines = source.getLines
// header lines
lines.drop(3)
lines.foreach {
line => {
val fields = line.split("\\t")
val artist = fields(0)
val album = fields(1) match {
case "" => None
case s => Some(s)
}
val title = fields(2)
val trackNum = fields(3) match {
case "" => None
case s => Some(s)
}
val durationInSeconds = Some(fields(4))
val rating = fields(5)
val timestamp = fields(6).toInt
val mbid = if (fields.size > 7) Some(fields(7)) else None
if (rating == "L") { // not skipped
val song = SongInfo(artist, title, album, durationInSeconds, trackNum, mbid, timestamp)
f(song)
}
}
}
}
}
| jnj/gasbag | src/main/scala/org/joshjoyce/gasbag/ScrobbleFileSource.scala | Scala | apache-2.0 | 1,015 |
package spgui.widgets.akuten
import spgui.communication.BackendCommunication
import spgui.widgets.ToAndFrom
import sp.erica._
import sp.domain._
import Logic._
/**
* Created by kristofer on 2017-05-02.
*/
object PatientModel {
import rx._
implicit val ctx: Ctx.Owner = Ctx.Owner.safe()
val model = new PatienModel
def getPatientObserver(callBack: (Map[String, API_Patient.Patient]) => Unit): rx.Obs = {
model.pats.foreach(callBack)
}
val wsObs = BackendCommunication.getWebSocketStatusObserver( mess => {
if (mess) send(API_PatientEvent.GetState)
}, "patient-cards-widget-topic")
def send(mess: API_PatientEvent.Event) {
val json = ToAndFrom.make(SPHeader(from = "Frontend", to = "WidgetService"), mess)
BackendCommunication.publish(json, "widget-event")
}
}
class PatienModel {
import rx._
implicit val ctx: Ctx.Owner = Ctx.Owner.safe()
val upd = Var(Map[String, API_Patient.Patient]())
val pats = Var(Map[String, API_Patient.Patient]())
val prev = Var(Map[String, API_Patient.Patient]())
val messObs = BackendCommunication.getMessageObserver(
mess => {
ToAndFrom.eventBody(mess).map {
case API_PatientEvent.State(patients) =>
upd() = patients
case _ => println("something else in PatientModel: " + mess)
}
}
, "patient-cards-widget-topic")
val checkPrev = Rx {
val u = upd()
val p = prev()
if (u != p) { // verkar inte fungera dΓ₯ det alltid Γ€r skillnad...
pats() = u
prev() = u
}
}
} | kristoferB/SP | sperica/frontend/src/main/scala/spgui/widgets/akuten/PatientModel.scala | Scala | mit | 1,535 |
package com.twitter.finagle.netty4.ssl.client
import com.twitter.finagle.Address
import com.twitter.finagle.ssl._
import com.twitter.finagle.ssl.client.SslClientConfiguration
import com.twitter.io.TempFile
import java.io.File
import java.net.InetSocketAddress
import java.security.KeyStore
import javax.net.ssl.{KeyManagerFactory, TrustManagerFactory}
import org.scalatest.FunSuite
class Netty4ClientEngineFactoryTest extends FunSuite {
private[this] val address: Address = Address(new InetSocketAddress("localhost", 12345))
private[this] val other: Address = Address.Failed(new Exception("testing"))
// Force JDK version for tests, because the native engine could fail to load in different
// environments
private[this] val factory = Netty4ClientEngineFactory(forceJdk = true)
test("default config with inet address creates client engine with peer") {
val config = SslClientConfiguration()
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine.getUseClientMode())
assert(sslEngine.getPeerHost() == "localhost")
assert(sslEngine.getPeerPort() == 12345)
}
test("default config without inet address creates client engine without peer") {
val config = SslClientConfiguration()
val engine = factory(other, config)
val sslEngine = engine.self
assert(sslEngine.getUseClientMode())
assert(sslEngine.getPeerHost() == null)
assert(sslEngine.getPeerPort() == -1)
}
test("config with good cert and key credentials succeeds") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/svc-test-client.cert.pem")
// deleteOnExit is handled by TempFile
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/svc-test-client-pkcs8.key.pem")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertAndKey(tempCertFile, tempKeyFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with bad cert or key credential fails") {
val tempCertFile = File.createTempFile("test", "crt")
tempCertFile.deleteOnExit()
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/svc-test-client-pkcs8.key.pem")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertAndKey(tempCertFile, tempKeyFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
intercept[SslConfigurationException] {
factory(address, config)
}
}
test("config with good cert chain and key credentials succeeds") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/svc-test-client-full-chain.cert.pem")
// deleteOnExit is handled by TempFile
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/svc-test-client-pkcs8.key.pem")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertsAndKey(tempCertFile, tempKeyFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with bad cert chain or key credential fails") {
val tempCertFile = File.createTempFile("test", "crt")
tempCertFile.deleteOnExit()
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/svc-test-client-pkcs8.key.pem")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertsAndKey(tempCertFile, tempKeyFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
intercept[SslConfigurationException] {
factory(address, config)
}
}
test("config with expired cert and valid key credential fails") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/svc-test-client-expired.cert.pem")
// deleteOnExit is handled by TempFile
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/svc-test-client-pkcs8.key.pem")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertAndKey(tempCertFile, tempKeyFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
intercept[SslConfigurationException] {
factory(address, config)
}
}
test("config with cert, key, and chain succeeds") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/svc-test-client.cert.pem")
// deleteOnExit is handled by TempFile
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/svc-test-client-pkcs8.key.pem")
// deleteOnExit is handled by TempFile
// This file contains multiple certificates
val tempChainFile = TempFile.fromResourcePath("/ssl/certs/svc-test-chain.cert.pem")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertKeyAndChain(tempCertFile, tempKeyFile, tempChainFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with insecure trust credentials succeeds") {
val config = SslClientConfiguration(trustCredentials = TrustCredentials.Insecure)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with good trusted cert collection succeeds") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/svc-test-chain.cert.pem")
// deleteOnExit is handled by TempFile
val trustCredentials = TrustCredentials.CertCollection(tempCertFile)
val config = SslClientConfiguration(trustCredentials = trustCredentials)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with TrustManagerFactory succeeds") {
val trustManagerFactory =
TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm)
trustManagerFactory.init(null.asInstanceOf[KeyStore])
val trustCredentials = TrustCredentials.TrustManagerFactory(trustManagerFactory)
val config = SslClientConfiguration(trustCredentials = trustCredentials)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with KeyManagerFactory succeeds") {
val keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm)
keyManagerFactory.init(null, Array[Char]())
val keyCredentials = KeyCredentials.KeyManagerFactory(keyManagerFactory)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with bad trusted cert collection fails") {
val tempCertFile = File.createTempFile("test", "crt")
tempCertFile.deleteOnExit()
val trustCredentials = TrustCredentials.CertCollection(tempCertFile)
val config = SslClientConfiguration(trustCredentials = trustCredentials)
intercept[IllegalArgumentException] {
factory(address, config)
}
}
test("config with good cipher suites succeeds") {
val cipherSuites = CipherSuites.Enabled(Seq("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"))
val config = SslClientConfiguration(cipherSuites = cipherSuites)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
val enabled = sslEngine.getEnabledCipherSuites()
assert(enabled.length == 1)
assert(enabled(0) == "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256")
}
test("config with bad cipher suites fails") {
val cipherSuites = CipherSuites.Enabled(Seq("TLS_ECDHE_ECDSA_WITH_AES_102_CBC_SHA496"))
val config = SslClientConfiguration(cipherSuites = cipherSuites)
intercept[IllegalArgumentException] {
factory(address, config)
}
}
test("config with good enabled protocols succeeds") {
val protocols = Protocols.Enabled(Seq("TLSv1.2"))
val config = SslClientConfiguration(protocols = protocols)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
val enabled = sslEngine.getEnabledProtocols()
assert(enabled.length == 1)
assert(enabled(0) == "TLSv1.2")
}
test("config with bad enabled protocols fails") {
val protocols = Protocols.Enabled(Seq("TLSv2.0"))
val config = SslClientConfiguration(protocols = protocols)
intercept[IllegalArgumentException] {
factory(address, config)
}
}
}
| luciferous/finagle | finagle-netty4/src/test/scala/com/twitter/finagle/netty4/ssl/client/Netty4ClientEngineFactoryTest.scala | Scala | apache-2.0 | 8,538 |
package de.zalando.swagger
import java.io.File
import java.net.URI
import de.zalando.swagger.strictModel.SwaggerModel
import org.scalatest.{FunSpec, MustMatchers}
class StrictParseExamplesTest extends FunSpec with MustMatchers with ExpectedResults {
val fixtures = new File(resourcesPath + "examples").listFiles ++
new File(resourcesPath + "schema_examples").listFiles
describe("Strict Swagger Parser") {
fixtures.filter(_.getName.endsWith(".yaml")).foreach { file =>
it(s"should parse the yaml swagger file ${file.getName} as specification") {
val result = StrictYamlParser.parse(file)
result._1 mustBe a [URI]
result._2 mustBe a [SwaggerModel]
}
}
}
}
| zalando/play-swagger | swagger-parser/src/test/scala/de/zalando/swagger/StrictParseExamplesTest.scala | Scala | mit | 713 |
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 Daniel W. H. James
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap.dynamodb
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
case class ForumThread(
forumName: String,
subject: String,
message: String,
lastPostedBy: String,
lastPostedDateTime: DateTime,
views: Long,
replies: Long,
answered: Long,
tags: Set[String]
)
object ForumThread {
val tableName = "Thread"
val secondaryIndexName = "LastPostedIndex"
val tableRequest =
new CreateTableRequest()
.withTableName(ForumThread.tableName)
.withProvisionedThroughput(Schema.provisionedThroughput(10L, 5L))
.withAttributeDefinitions(
Schema.stringAttribute(Attributes.forumName),
Schema.stringAttribute(Attributes.subject),
Schema.stringAttribute(Attributes.lastPostedDateTime)
)
.withKeySchema(
Schema.hashKey(Attributes.forumName),
Schema.rangeKey(Attributes.subject)
)
.withLocalSecondaryIndexes(
new LocalSecondaryIndex()
.withIndexName(ForumThread.secondaryIndexName)
.withKeySchema(
Schema.hashKey(Attributes.forumName),
Schema.rangeKey(Attributes.lastPostedDateTime)
)
.withProjection(
new Projection()
.withProjectionType(ProjectionType.KEYS_ONLY)
)
)
object Attributes {
val forumName = "ForumName"
val subject = "Subject"
val message = "Message"
val lastPostedBy = "LastPostedBy"
val lastPostedDateTime = "LastPostedDateTime"
val views = "Views"
val replies = "Replies"
val answered = "Answered"
val tags = "Tags"
}
implicit object forumThreadSerializer extends DynamoDBSerializer[ForumThread] {
private val fmt = ISODateTimeFormat.dateTime
override val tableName = ForumThread.tableName
override val hashAttributeName = Attributes.forumName
override val rangeAttributeName = Some(Attributes.subject)
override def primaryKeyOf(thread: ForumThread) =
Map(
Attributes.forumName -> thread.forumName,
Attributes.subject -> thread.subject
)
override def toAttributeMap(thread: ForumThread) =
Map(
Attributes.forumName -> thread.forumName,
Attributes.subject -> thread.subject,
Attributes.message -> thread.message,
Attributes.lastPostedBy -> thread.lastPostedBy,
Attributes.lastPostedDateTime -> fmt.print(thread.lastPostedDateTime),
Attributes.views -> thread.views,
Attributes.replies -> thread.replies,
Attributes.answered -> thread.answered,
Attributes.tags -> thread.tags
)
override def fromAttributeMap(item: collection.mutable.Map[String, AttributeValue]) =
ForumThread(
forumName = item(Attributes.forumName),
subject = item(Attributes.subject),
message = item(Attributes.message),
lastPostedBy = item(Attributes.lastPostedBy),
lastPostedDateTime = fmt.parseDateTime(item(Attributes.lastPostedDateTime)),
views = item(Attributes.views),
replies = item(Attributes.replies),
answered = item(Attributes.answered),
tags = item(Attributes.tags)
)
}
}
| liruqi/aws-wrap | integration/src/it/scala/dynamodb/ForumThread.scala | Scala | apache-2.0 | 4,176 |
/**
* FILE: UnitDAOTest.scala
* PERCORSO /Codice/sgad/servertier/src/test/scala/sgad/servertier/dataaccess/databaseaccess/shareddatadao
* DATA CREAZIONE: 20 Febbraio 2014
* AUTORE: ProTech
* EMAIL: [email protected]
*
* Questo file Γ¨ proprietΓ del gruppo ProTech, viene rilasciato sotto licenza Apache v2.
*
* DIARIO DELLE MODIFICHE:
* 2014-02-20 - Creazione della classe - Segantin Fabio
*/
import sgad.servertier.dataaccess.data.shareddata.{Cost, QuantityResource, Resource, `Unit`, DataFactory}
import com.mongodb.casbah.commons.Imports.MongoDBObject
import org.scalatest._
import org.joda.time.IllegalFieldValueException
import sgad.servertier.dataaccess.databaseaccess.shareddatadao.{UnitDAO, CostDAO}
/**
* Classe di test per l'object UnitDAO
*/
class UnitDAOTest extends FlatSpec {
DataFactory.setUnits(Map())
DataFactory.setResources(Map())
DataFactory.setBuildings(Map())
var gold = new Resource("oro")
var potion = new Resource("pozione")
DataFactory.setResources(Map("oro" -> gold, "pozione" -> potion))
var quantityResource1 = Vector(new QuantityResource(gold, 100), new QuantityResource(potion, 300))
var quantityResource2 = Vector(new QuantityResource(gold, 20), new QuantityResource(potion, 70))
var externalUnit = new `Unit`("fante", 5, 30, new Cost(20, quantityResource2), true)
var horse = new `Unit`("cavallo", 5, 30, new Cost(20, quantityResource1), true)
val mongoObject = MongoDBObject(
"name" -> externalUnit.getName,
"attack" -> externalUnit.getAttack,
"defence" -> externalUnit.getDefence,
"cost" -> CostDAO.getMongoObject(externalUnit.getCost),
"isBuilder" -> true)
val mongoObject2 = MongoDBObject(
"name" -> horse.getName,
"attack" -> horse.getAttack,
"defence" -> horse.getDefence,
"cost" -> CostDAO.getMongoObject(horse.getCost),
"isBuilder" -> true)
"UnitDAO" must "creare un MongoObject adeguato" in {
assert(mongoObject != UnitDAO.getMongoObject(horse))
assert(mongoObject2 == UnitDAO.getMongoObject(horse))
}
it must "creare un costo relativo al mongoObject giusto" in {
assert(horse != UnitDAO.getObject(mongoObject))
assert(horse == UnitDAO.getObject(mongoObject2))
}
it must "lanciare una eccezione se il mongoObject non ha le informazioni relative" in {
intercept[IllegalFieldValueException] {
val mongoObject2 = MongoDBObject(
"attack" -> horse.getAttack,
"defence" -> horse.getDefence,
"cost" -> CostDAO.getMongoObject(horse.getCost))
UnitDAO.getObject(mongoObject2)
}
intercept[IllegalFieldValueException] {
val mongoObject2 = MongoDBObject(
"name" -> horse.getName,
"defence" -> horse.getDefence,
"cost" -> CostDAO.getMongoObject(horse.getCost))
UnitDAO.getObject(mongoObject2)
}
intercept[IllegalFieldValueException] {
val mongoObject2 = MongoDBObject(
"name" -> horse.getName,
"attack" -> horse.getAttack,
"cost" -> CostDAO.getMongoObject(horse.getCost))
UnitDAO.getObject(mongoObject2)
}
intercept[IllegalFieldValueException] {
val mongoObject2 = MongoDBObject(
"name" -> horse.getName,
"attack" -> horse.getAttack,
"defence" -> horse.getDefence)
UnitDAO.getObject(mongoObject2)
}
}
it must "lanciare una eccezione se il tipo dei dati non Γ¨ corretto" in {
intercept[IllegalFieldValueException] {
val mongoObject3 = MongoDBObject(
"name" -> "cavallo",
"attack" -> 10.0,
"defence" -> 10,
"cost" -> CostDAO.getMongoObject(horse.getCost))
UnitDAO.getObject(mongoObject3)
}
intercept[IllegalFieldValueException] {
val mongoObject3 = MongoDBObject(
"name" -> "cavallo",
"attack" -> 10,
"defence" -> 10.0,
"cost" -> CostDAO.getMongoObject(horse.getCost))
UnitDAO.getObject(mongoObject3)
}
intercept[IllegalFieldValueException] {
val mongoObject3 = MongoDBObject(
"name" -> "cavallo",
"attack" -> 10,
"defence" -> 10,
"cost" -> "{}")
UnitDAO.getObject(mongoObject3)
}
}
} | protechunipd/SGAD | Codice/sgad/servertier/src/test/scala/sgad/servertier/dataaccess/databaseaccess/shareddatadao/UnitDAOTest.scala | Scala | apache-2.0 | 3,950 |
package org.tribbloid.spookystuff.pages
import org.apache.spark.SparkEnv
import org.tribbloid.spookystuff.SpookyEnvSuite
import org.tribbloid.spookystuff.actions._
import org.tribbloid.spookystuff.dsl
/**
* Created by peng on 11/30/14.
*/
class TestUnstructured extends SpookyEnvSuite {
import dsl._
lazy val page = (Wget("http://www.wikipedia.org/").as('old)::Nil).resolve(spooky).head.asInstanceOf[Page]
test("Unstructured is serializable for div") {
val elements = page.children("div.central-featured-lang")
assert(elements.size === 10)
elements.foreach{
element =>
val ser = SparkEnv.get.serializer.newInstance()
val serElement = ser.serialize(element)
val element2 = ser.deserialize[Unstructured](serElement)
assert (element === element2)
assert(element.asInstanceOf[HtmlElement].parsed.outerHtml().split("\\n").map(_.trim) === element2.asInstanceOf[HtmlElement].parsed.outerHtml().split("\\n").map(_.trim))
assert(element.children("a").size === element2.children("a").size)
assert(element.attr("class") === element2.attr("class"))
assert(element.code === element2.code)
assert(element.ownText === element2.ownText)
assert(element.boilerPipe === element2.boilerPipe)
}
}
lazy val tablePage = (Wget("http://en.wikipedia.org/").as('old)::Nil).resolve(spooky).head.asInstanceOf[Page]
test("Unstructured is serializable for tr") {
val elements = tablePage.children("table#mp-topbanner > tbody > tr")
assert(elements.size === 1)
elements.foreach{
element =>
val ser = SparkEnv.get.serializer.newInstance()
val serElement = ser.serialize(element)
val element2 = ser.deserialize[Unstructured](serElement)
assert (element === element2)
assert(element.asInstanceOf[HtmlElement].parsed.outerHtml().split("\\n").map(_.trim) === element2.asInstanceOf[HtmlElement].parsed.outerHtml().split("\\n").map(_.trim))
assert(element.children("a").size === element2.children("a").size)
assert(element.attr("class") === element2.attr("class"))
assert(element.code === element2.code)
assert(element.ownText === element2.ownText)
assert(element.boilerPipe === element2.boilerPipe)
}
}
test("Unstructured is serializable for td") {
val elements = tablePage.children("table#mp-topbanner > tbody > tr > td")
assert(elements.size === 4)
elements.foreach{
element =>
val ser = SparkEnv.get.serializer.newInstance()
val serElement = ser.serialize(element)
val element2 = ser.deserialize[Unstructured](serElement)
assert (element === element2)
assert(element.asInstanceOf[HtmlElement].parsed.outerHtml().split("\\n").map(_.trim) === element2.asInstanceOf[HtmlElement].parsed.outerHtml().split("\\n").map(_.trim))
assert(element.children("a").size === element2.children("a").size)
assert(element.attr("class") === element2.attr("class"))
assert(element.code === element2.code)
assert(element.ownText === element2.ownText)
assert(element.boilerPipe === element2.boilerPipe)
}
}
test("attrs should handles empty attributes properly") {
assert(page.children("h1.central-textlogo img").attrs("title").nonEmpty)
assert(page.children("h1.central-textlogo img dummy").attrs("title").isEmpty)
assert(page.children("h1.central-textlogo img").attrs("dummy").isEmpty)
}
}
| chenUT/spookystuff | core/src/test/scala/org/tribbloid/spookystuff/pages/TestUnstructured.scala | Scala | apache-2.0 | 3,485 |
package models.services
import java.util.UUID
import javax.inject.Inject
import com.mohiva.play.silhouette.api.LoginInfo
import models.daos.UserDAO
import models.user.User
import play.api.libs.concurrent.Execution.Implicits._
import scala.concurrent.Future
/**
* Handles actions to users.
*
* @param userDAO The user DAO implementation.
*/
class UserServiceImpl @Inject() (userDAO: UserDAO) extends UserService {
/**
* Retrieves a user that matches the specified login info.
*
* @param loginInfo The login info to retrieve a user.
* @return The retrieved user or None if no user could be retrieved for the given login info.
*/
def retrieve(loginInfo: LoginInfo): Future[Option[User]] = userDAO.find(loginInfo)
/**
* Saves a user.
*
* @param user The user to save.
* @return The saved user.
*/
def save(user: User) = userDAO.save(user)
}
| danZzyy/SidewalkWebpage | sidewalk-webpage/app/models/services/UserServiceImpl.scala | Scala | mit | 886 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.consumers
import monix.execution.Callback
import monix.execution.Ack.{Continue, Stop}
import monix.execution.{Ack, Scheduler}
import monix.execution.cancelables.AssignableCancelable
import scala.util.control.NonFatal
import monix.reactive.Consumer
import monix.reactive.observers.Subscriber
/** Implementation for [[monix.reactive.Consumer.foldLeft]]. */
private[reactive] final class FoldLeftConsumer[A, R](initial: () => R, f: (R, A) => R) extends Consumer.Sync[A, R] {
def createSubscriber(cb: Callback[Throwable, R], s: Scheduler): (Subscriber.Sync[A], AssignableCancelable) = {
val out = new Subscriber.Sync[A] {
implicit val scheduler = s
private[this] var isDone = false
private[this] var state = initial()
def onNext(elem: A): Ack = {
// Protects calls to user code from within the operator,
// as a matter of contract.
try {
state = f(state, elem)
Continue
} catch {
case ex if NonFatal(ex) =>
onError(ex)
Stop
}
}
def onComplete(): Unit =
if (!isDone) {
isDone = true
cb.onSuccess(state)
}
def onError(ex: Throwable): Unit =
if (!isDone) {
isDone = true
cb.onError(ex)
}
}
(out, AssignableCancelable.dummy)
}
}
| alexandru/monifu | monix-reactive/shared/src/main/scala/monix/reactive/internal/consumers/FoldLeftConsumer.scala | Scala | apache-2.0 | 2,059 |
package io.muvr
import akka.actor._
import com.typesafe.config.ConfigFactory
import scala.collection.JavaConversions._
/**
* CLI application for the exercise app
*/
object MuvrApp extends App with Muvr with ProxyScaffolding {
lazy val config = {
val role = "muvr-monolith"
val clusterShardingConfig = ConfigFactory.parseString(s"akka.contrib.cluster.sharding.role=$role")
val clusterRoleConfig = ConfigFactory.parseString(s"akka.cluster.roles=[$role]")
clusterShardingConfig
.withFallback(clusterRoleConfig)
.withFallback(ConfigFactory.load("main.conf"))
}
override def journalStartUp(system: ActorSystem): Unit = ()
val ports = config.getIntList("akka.cluster.jvm-ports")
ports.foreach(port β actorSystemStartUp(port, 10000 + port))
}
| boonhero/muvr-server | main/src/main/scala/io/muvr/MuvrApp.scala | Scala | bsd-3-clause | 785 |
package poly.collection
import cats.implicits._
import poly.collection.mut._
/**
* Represents an iterable collection that is sorted according to a specific order every time it is iterated.
* @author Tongfei Chen
* @since 0.1.0
*/
trait SortedIterable[T] extends Iterable[T] { self =>
/** Returns the order under which the elements of this collection are sorted. */
implicit def elementOrder: Order[T]
override def filter(f: T => Boolean): SortedIterable[T] = super.filter(f) asIfSorted elementOrder
override def filterNot(f: T => Boolean) = filter(!f)
/**
* Returns the unique elements of this iterable collection while retaining their original order.
* The equivalence function is this sorted collection's inherent order.
*/
def distinct(): SortedIterable[T] = new SortedIterableT.Distinct(self)
/**
* $LAZY Merges two sorted iterable collection into one sorted iterable collection.
* @param that Another sorted sequence. These two sequences must be sorted under the same order.
*/
def merge(that: SortedIterable[T]): SortedIterable[T] = new SortedIterableT.Merged(self, that)
def min() = self.head
def max() = self.last
/** Merges two sorted iterables eagerly into a sorted sequence. */
def mergeE(that: SortedIterable[T]): SortedSeq[T] = {
val ai = this.newIterator
val bi = that.newIterator
val c = ArraySeq[T]()
var aNotComplete = ai.advance()
var bNotComplete = bi.advance()
while (aNotComplete && bNotComplete) {
if (ai.current <= bi.current) {
c :+= ai.current
aNotComplete = ai.advance()
} else {
c :+= bi.current
bNotComplete = bi.advance()
}
}
// Appends remaining elements
if (aNotComplete) do c :+= ai.current while (ai.advance())
if (bNotComplete) do c :+= bi.current while (bi.advance())
c.asIfSorted(this.elementOrder)
}
}
abstract class AbstractSortedIterable[T] extends AbstractIterable[T] with SortedIterable[T]
private[poly] object SortedIterableT {
class Distinct[T](self: SortedIterable[T]) extends AbstractSortedIterable[T] {
implicit def elementOrder = self.elementOrder
def newIterator = new AbstractIterator[T] {
private[this] val it = self.newIterator
private[this] var curr = default[T]
private[this] var first = true
def current = curr
def advance(): Boolean = {
while (it.advance()) {
if (first || (it.current =!= curr)) {
first = false
curr = it.current
return true
}
}
false
}
}
}
class Merged[T](self: SortedIterable[T], that: SortedIterable[T]) extends AbstractSortedIterable[T] {
implicit def elementOrder: Order[T] = self.elementOrder
def newIterator: Iterator[T] = new AbstractIterator[T] {
private[this] val ai = self.newIterator
private[this] val bi = that.newIterator
private[this] var curr: T = _
private[this] var aNotComplete = ai.advance()
private[this] var bNotComplete = bi.advance()
def advance() = {
if (aNotComplete && bNotComplete) {
if (ai.current <= bi.current) {
curr = ai.current
aNotComplete = ai.advance()
aNotComplete
} else {
curr = bi.current
bNotComplete = bi.advance()
bNotComplete
}
}
else if (aNotComplete) {
curr = ai.current
ai.advance()
}
else if (bNotComplete) {
curr = bi.current
bi.advance()
}
else false
}
def current = curr
}
}
} | ctongfei/poly-collection | core/src/main/scala/poly/collection/SortedIterable.scala | Scala | mit | 3,654 |
package io.getquill.context.sql
import io.getquill.Spec
case class Id(value: Long) extends AnyVal
trait ProductSpec extends Spec {
val context: SqlContext[_, _]
import context._
case class Product(id: Long, description: String, sku: Long)
val product = quote {
query[Product]
}
val productInsert = quote {
(p: Product) => query[Product].insert(p).returning(_.id)
}
val productInsertBatch = quote {
(b: Query[Product]) => b.foreach(p => productInsert.apply(p))
}
def productById = quote {
(id: Long) => product.filter(_.id == id)
}
val productEntries = List(
Product(0L, "Notebook", 1001L),
Product(0L, "Soap", 1002L),
Product(0L, "Pencil", 1003L)
)
val productSingleInsert = quote {
product.insert(_.id -> 0, _.description -> "Window", _.sku -> 1004L).returning(_.id)
}
}
| mentegy/quill | quill-sql/src/test/scala/io/getquill/context/sql/ProductSpec.scala | Scala | apache-2.0 | 846 |
package com.technophobia.substeps.domain.execution
import com.technophobia.substeps.domain._
import org.mockito.runners.MockitoJUnitRunner
import org.mockito.{Matchers, Spy, Mockito, Mock}
import org.junit.runner.RunWith
import org.junit.{Assert, Test}
import com.technophobia.substeps.domain.execution.RunResult.{Failed, Passed}
import com.technophobia.substeps.domain.Feature
import scala.Some
@RunWith(classOf[MockitoJUnitRunner])
class FeatureWithMultipleScenarioTest {
@Spy
var scenarioOne = BasicScenario(null,Seq(),Set())
@Spy
var scenarioTwo = BasicScenario(null,Seq(),Set())
@Mock
var background : Background = _
@Test
def testRun() {
Mockito.doReturn(RunResult.Passed).when(scenarioOne).run()
Mockito.doReturn(RunResult.Passed).when(scenarioTwo).run()
val feature = Feature("A feature", None, List(scenarioOne, scenarioTwo), Set())
val passed = feature.run(TagChecker.fromExclusions(Set()))
Assert.assertEquals(Passed, passed)
Mockito.verify(scenarioOne).run()
Mockito.verify(scenarioTwo).run()
}
@Test
def testWhenOneFailsTwoIsStillRun() {
val failure = Failed("failure")
Mockito.doReturn(failure).when(scenarioOne).run()
Mockito.doReturn(RunResult.Passed).when(scenarioTwo).run()
val feature = Feature("A feature", None, List(scenarioOne, scenarioTwo), Set())
val passed = feature.run(TagChecker.fromExclusions(Set()))
Assert.assertEquals(failure, passed)
Mockito.verify(scenarioOne).run()
Mockito.verify(scenarioTwo).run()
}
@Test
def testWhenTwoFailsOneIsStillRun() {
val failure = Failed("failure")
Mockito.doReturn(RunResult.Passed).when(scenarioOne).run()
Mockito.doReturn(failure).when(scenarioTwo).run()
val feature = Feature("A feature", None, List(scenarioOne, scenarioTwo), Set())
val passed = feature.run(TagChecker.fromExclusions(Set()))
Assert.assertEquals(failure, passed)
Mockito.verify(scenarioOne).run()
Mockito.verify(scenarioTwo).run()
}
@Test
def backgroundPassesAndScenariosRun() {
Mockito.when(background.run()).thenReturn(Passed)
Mockito.doReturn(RunResult.Passed).when(scenarioOne).run()
Mockito.doReturn(RunResult.Passed).when(scenarioTwo).run()
val feature = Feature("A feature with a background", Some(background), List(scenarioOne, scenarioTwo), Set())
val result = feature.run(TagChecker.fromExclusions(Set()))
Assert.assertEquals(RunResult.Passed, result)
Mockito.verify(scenarioOne).run()
Mockito.verify(scenarioTwo).run()
Mockito.verify(background, Mockito.times(2)).run()
}
}
| G2G3Digital/substeps-scala-core | src/test/scala/com/technophobia/substeps/domain/execution/FeatureWithMultipleScenarioTest.scala | Scala | lgpl-3.0 | 2,613 |
package com.eny.ooo.manager.connection
import javax.inject.{Inject, Named, Singleton}
import com.github.mauricio.async.db.Configuration
import com.github.mauricio.async.db.mysql.MySQLConnection
import com.github.mauricio.async.db.mysql.pool.MySQLConnectionFactory
import com.github.mauricio.async.db.pool.{ConnectionPool, PoolConfiguration}
@Singleton
class DbImpl @Inject() (
@Named("db.user")user:String,
@Named("db.host")host:String,
@Named("db.port")port:Int,
@Named("db.password")password:String,
@Named("db.database")db:String
) extends Db {
val configuration = new Configuration(user, host, port, if(password.equals("")) None else Some(password), Some(db))
override def pool() =
new ConnectionPool[MySQLConnection](new MySQLConnectionFactory(configuration), PoolConfiguration.Default)
}
| nyavro/ooo-master | dbImpl/src/main/scala/com/eny/ooo/manager/connection/DbImpl.scala | Scala | apache-2.0 | 836 |
package io.github.voidcontext.bricksetclient.client
import io.github.voidcontext.bricksetclient.api._
import scala.concurrent.Future
import scala.util.{Failure, Success}
case class InvalidCredentialsError(message: String) extends Exception(message)
class BricksetClient(val apiKey: String) {
/**
* Soap client
*/
val service = (new BricksetAPIv2Soap12Bindings
with scalaxb.SoapClientsAsync
with scalaxb.DispatchHttpClientsAsync {}
).service
val apiError = new Exception("API error")
/**
* Checks the current api key
*/
def checkKey(): Future[CheckKeyResponse] = {
service.checkKey(Some(apiKey))
}
def extractOption[T](future: Future[Option[T]], err: Throwable = apiError): Future[T] = {
future map {
case Some(value) => value
case None => throw err
}
}
/**
* Get user hash using the given credentials
*/
def login(username: String, password: String): Future[LoginResult] = {
val errorRe = "(ERROR:.*|INVALIDKEY)".r
val loginResultFuture = service.login(Some(apiKey), Some(username), Some(password)) map { response: LoginResponse =>
for {
result <- response.loginResult
} yield result match {
case errorRe(response) => Failure(InvalidCredentialsError(response))
case userHash => Success(userHash)
}
}
extractOption(loginResultFuture)
}
def getSets(userHash: Option[String] = Some(""), query: Option[String] = Some(""), theme: Option[String] = Some(""),
subtheme: Option[String] = Some(""), setNumber: Option[String] = Some(""), year: Option[String] = Some(""),
owned: Option[String] = Some(""), wanted: Option[String] = Some(""), orderBy: Option[String] = Some(""),
pageSize: Option[String] = Some(""), pageNumber: Option[String] = Some(""), userName: Option[String] = Some("")
): Future[Seq[Sets]] = {
val setListFuture = service.getSets(Some(apiKey), userHash, query, theme, subtheme, setNumber, year, owned, wanted, orderBy, pageSize,
pageNumber, userName) map { res =>
for {
aos <- res.getSetsResult
} yield aos.sets.flatten
}
extractOption(setListFuture)
}
/**
* Shortcut for getting the owned sets of the previously logged in user
*/
def getOwnedSets(userHash: String) : Future[Seq[Sets]] = {
getSets(Some(userHash), owned = Some("1"))
}
}
object BricksetClient {
def apply(apikey: String) = {
new BricksetClient(apikey)
}
}
| voidcontext/scala-brickset-client | src/main/scala/io/github/voidcontext/bricksetclient/client/BricksetClient.scala | Scala | mit | 2,531 |
/*
* Copyright 2013 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.storehaus.dynamodb
import com.twitter.storehaus.ConvertedStore
import com.amazonaws.regions.Regions
import com.amazonaws.services.dynamodbv2.model._
import AwsBijections._
object DynamoStringStore {
def apply(awsAccessKey: String, awsSecretKey: String, tableName: String,
primaryKeyColumn: String, valueColumn: String,
endpoint: Regions = Regions.US_EAST_1): DynamoStringStore =
new DynamoStringStore(DynamoStore(awsAccessKey, awsSecretKey, tableName,
primaryKeyColumn, valueColumn, endpoint))
}
class DynamoStringStore(underlying: DynamoStore)
extends ConvertedStore[String, String, AttributeValue, String](underlying)(identity)
| twitter/storehaus | storehaus-dynamodb/src/main/scala/com/twitter/storehaus/dynamodb/DynamoStringStore.scala | Scala | apache-2.0 | 1,277 |
package com.challenges
/** LuhnChecker is an object with methods that apply the Luhn algorithm to determine
* whether a given string is a valid credit card number or not.
*
*/
object LuhnChecker {
/**
* Verify that a credit card number is valid by applying the Luhn algorithm.
*
* Return true if the credit card number is valid, false otherwise.
*/
def isValid(number: String): Boolean = {
require((number.length >= 14) && (number.length <= 16), "CC number must be between 14/16 digits.")
def sumDigits(x: Integer): Int = {
x.toString.map(_.asDigit).sum
}
number.reverse.map(_.asDigit).grouped(2).foldLeft(0)(
(start, group) => start + group.reduce(
(a: Int, b: Int) => a + sumDigits(b*2)
)
) % 10 == 0
}
}
| obryan5598/scala-challenges | src/main/scala/com/challenges/LuhnChecker.scala | Scala | mit | 797 |
import java.io.File
import com.madgag.textmatching.{Glob, TextMatcher}
import scopt.OptionParser
import scalax.file.ImplicitConversions._
import scalax.file.Path
import scalax.file.defaultfs.DefaultPath
object BenchmarkConfig {
val parser = new OptionParser[BenchmarkConfig]("benchmark") {
opt[File]("resources-dir").text("benchmark resources folder - contains jars and repos").action {
(v, c) => c.copy(resourcesDirOption = v)
}
opt[String]("java").text("Java command paths").action {
(v, c) => c.copy(javaCmds = v.split(',').toSeq)
}
opt[String]("versions").text("BFG versions to time - bfg-[version].jar - eg 1.4.0,1.5.0,1.6.0").action {
(v, c) => c.copy(bfgVersions = v.split(",").toSeq)
}
opt[Int]("die-if-longer-than").action {
(v, c) => c.copy(dieIfTaskTakesLongerThan = Some(v))
}
opt[String]("repos").text("Sample repos to test, eg github-gems,jgit,git").action {
(v, c) => c.copy(repoNames = v.split(",").toSeq)
}
opt[String]("commands").valueName("<glob>").text("commands to exercise").action {
(v, c) => c.copy(commands = TextMatcher(v, defaultType = Glob))
}
opt[File]("scratch-dir").text("Temp-dir for job runs - preferably ramdisk, eg tmpfs.").action {
(v, c) => c.copy(scratchDir = v)
}
opt[Unit]("only-bfg") action { (_, c) => c.copy(onlyBfg = true) } text "Don't benchmark git-filter-branch"
}
}
case class BenchmarkConfig(resourcesDirOption: Path = Path.fromString(System.getProperty("user.dir")) / "bfg-benchmark" / "resources",
scratchDir: DefaultPath = Path.fromString("/dev/shm/"),
javaCmds: Seq[String] = Seq("java"),
bfgVersions: Seq[String] = Seq.empty,
commands: TextMatcher = Glob("*"),
onlyBfg: Boolean = false,
dieIfTaskTakesLongerThan: Option[Int] = None,
repoNames: Seq[String] = Seq.empty) {
lazy val resourcesDir = Path.fromString(resourcesDirOption.path).toAbsolute
lazy val jarsDir = resourcesDir / "jars"
lazy val reposDir = resourcesDir / "repos"
lazy val bfgJars = bfgVersions.map(version => jarsDir / s"bfg-$version.jar")
lazy val repoSpecDirs = repoNames.map(reposDir / _)
}
| digitalquest/bfg-repo-cleaner | bfg-benchmark/src/main/scala/BenchmarkConfig.scala | Scala | gpl-3.0 | 2,329 |
package specmethods
class BankAccount(private var mBalance:Int) {
def deposit(amount:Int):Unit = {
require(amount>=0, "Can't make negative deposit.")
// Logging here
mBalance += amount
}
def withdraw(amount:Int):Unit = {
require(amount>=0, "Can't make negative withdrawl.")
require(amount<=mBalance, "Can't withdraw more money than you have.")
// Logging here
mBalance -= amount
}
def balance = mBalance
def balance_=(amount:Int):Unit = {
if(amount<balance) withdraw(balance-amount)
else deposit(amount-balance)
}
}
object NotBankAccount extends App {
val account = new BankAccount(0)
println(account.balance)
account.balance = 100
} | conorheffron/scala-course | src/specmethods/BankAccount.scala | Scala | gpl-3.0 | 698 |
package zhongl.websockit
import org.scalatest.FunSpec
import org.scalatest.matchers.ShouldMatchers
class StubSpec extends FunSpec with ShouldMatchers {
describe("Stub DSL") {
it("should create stub with customized rules") {
val s = new Stub {
($".name" =~ "jason") >> json"2"
($".name" =~ "allen") >> json"${$".seq"}"
}
s.receive("""{"name":"allen", "seq":1}""") should be(Some("1"))
s.receive("""{"name":"jason"}""") should be(Some("2"))
}
it("should complain non-json-object") {
val s = new Stub {
($".name" =~ "jason") >> json"2"
}
(evaluating { s.receive("""1""") } should produce[IllegalArgumentException])
.getMessage should be("""1 is not a json object""")
}
it("should get array element") {
new Stub {
($".[0]" =~ 1) >> json"2"
} receive "[1]" should be(Some("2"))
}
it("should support regex") {
new Stub {
($".name" =* """\\w+""") >> json"""{"name":"${$".name"}"}"""
} receive """{"name":"allen"}""" should be(Some("""{"name":"allen"}"""))
}
it("should support composed filters") {
val s = new Stub {
($".name" =~ "allen" || $".age" > 25) >> json"ok"
}
s.receive("""{"name":"allen"}""") should be(Some("ok"))
s.receive("""{"age":30}""") should be(Some("ok"))
}
it("should echo input") {
new Stub {
(() => true) >> $
} receive "hi" should be(Some("hi"))
}
it("should do nothing") {
new Stub {
(() => true) >> nil
(() => true) >> $
} receive "hi" should be(None)
}
it("should miss match instead of complain invalid path") {
new Stub {
($".typo" =~ 1) >> json"ok"
} receive """{"type":1}""" should be(None)
}
}
}
| zhongl/websockit | src/test/scala/zhongl/websockit/StubSpec.scala | Scala | apache-2.0 | 1,812 |
object prob {
opaque type Probability = Double
object Probability {
def apply(n: Double): Option[Probability] =
if (0.0 <= n && n <= 1.0) Some(n) else None
def unsafe(p: Double): Probability = {
require(0.0 <= p && p <= 1.0, s"probabilities lie in [0, 1] (got $p)")
p
}
def asDouble(p: Probability): Double = p
val Never: Probability = 0.0
val CoinToss: Probability = 0.5
val Certain: Probability = 1.0
implicit val ordering: Ordering[Probability] =
implicitly[Ordering[Double]]
implicit class ProbabilityOps(p1: Probability) extends AnyVal {
def unary_~ : Probability = Certain - p1
def &(p2: Probability): Probability = p1 * p2
def |(p2: Probability): Probability = p1 + p2 - (p1 * p2)
def isImpossible: Boolean = p1 == Never
def isCertain: Boolean = p1 == Certain
import scala.util.Random
def sample(r: Random = Random): Boolean = r.nextDouble <= p1
def toDouble: Double = p1
}
val caughtTrain = Probability.unsafe(0.3)
val missedTrain = ~caughtTrain
val caughtCab = Probability.CoinToss
val arrived = caughtTrain | (missedTrain & caughtCab)
println((1 to 5).map(_ => arrived.sample()).toList)
}
}
| som-snytt/dotty | tests/pos/opaque-propability.scala | Scala | apache-2.0 | 1,250 |
package com.twitter.finatra.http.filters
import com.twitter.finagle.http.{Method => HttpMethod, Request, Response, Status}
import com.twitter.finagle.stats.{Counter, Stat, StatsReceiver}
import com.twitter.finagle.{Service, SimpleFilter}
import com.twitter.finatra.http.contexts.RouteInfo
import com.twitter.finatra.http.filters.StatsFilter.Stats
import com.twitter.finatra.http.response.SimpleResponse
import com.twitter.inject.Logging
import com.twitter.util.{Duration, Future, Memoize, Return, Stopwatch, Throw}
import javax.inject.{Inject, Singleton}
object StatsFilter {
private object Stats {
// Unless stats are per-endpoint, don't track request count/time
// since those are handled by [[com.twitter.finagle.service.StatsFilter]]
// already.
def mk(statsReceiver: StatsReceiver, statusCode: Int, perEndpoint: Boolean): Stats = {
val statusClass = s"${statusCode / 100}XX"
Stats(
requestCount = if (perEndpoint) Some(statsReceiver.counter("requests")) else None,
statusCodeCount = statsReceiver.scope("status").counter(statusCode.toString),
statusClassCount = statsReceiver.scope("status").counter(statusClass),
requestTime = if (perEndpoint) Some(statsReceiver.stat("time")) else None,
statusCodeTime = statsReceiver.scope("time").stat(statusCode.toString),
statusClassTime = statsReceiver.scope("time").stat(statusClass),
responseSize = statsReceiver.stat("response_size"))
}
}
private case class Stats(
requestCount: Option[Counter],
statusCodeCount: Counter,
statusClassCount: Counter,
requestTime: Option[Stat],
statusCodeTime: Stat,
statusClassTime: Stat,
responseSize: Stat) {
def count(duration: Duration, response: Response): Unit = {
requestCount.foreach { _.incr() }
statusCodeCount.incr()
statusClassCount.incr()
val durationMs = duration.inMilliseconds
requestTime.foreach { _.add(durationMs.toFloat) }
statusCodeTime.add(durationMs.toFloat)
statusClassTime.add(durationMs.toFloat)
responseSize.add(response.length.toFloat)
}
}
}
/**
* A drop-in replacement for [[com.twitter.finagle.http.filter.StatsFilter]]
* with per-route stats scoped under `route/<name>/<method>`.
*/
@Singleton
class StatsFilter[R <: Request] @Inject()(
statsReceiver: StatsReceiver)
extends SimpleFilter[R, Response]
with Logging {
private val perRouteStats = Memoize[(RouteInfo, HttpMethod, Int), Stats] {
case (routeInfo, method, statusCode) =>
val nameOrPath =
if (routeInfo.name.nonEmpty)
routeInfo.name
else
routeInfo.sanitizedPath
val scopedStatsReceiver =
statsReceiver.
scope("route").
scope(nameOrPath).
scope(method.toString.toUpperCase)
Stats.mk(scopedStatsReceiver, statusCode, perEndpoint = true)
}
private val globalStats = Memoize[Int, Stats] { statusCode =>
Stats.mk(statsReceiver, statusCode, perEndpoint = false)
}
/* Public */
def apply(request: R, service: Service[R, Response]): Future[Response] = {
val elapsed = Stopwatch.start()
service(request) respond {
case Return(response) =>
record(request, response, elapsed())
case Throw(e) =>
error(s"Internal server error - please ensure ${classOf[ExceptionMappingFilter[_]].getName} is installed", e)
record(request, SimpleResponse(Status.InternalServerError), elapsed())
}
}
/* Private */
private def record(request: Request, response: Response, duration: Duration): Unit = {
globalStats(response.statusCode).count(duration, response)
RouteInfo(request) foreach { routeInfo =>
perRouteStats((routeInfo, request.method, response.statusCode)).count(duration, response)
}
}
}
| syamantm/finatra | http/src/main/scala/com/twitter/finatra/http/filters/StatsFilter.scala | Scala | apache-2.0 | 3,827 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import java.util.concurrent.{CountDownLatch, TimeUnit}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicLong, AtomicReference}
import scala.annotation.meta.param
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Map}
import scala.util.control.NonFatal
import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.exceptions.TestFailedException
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.broadcast.BroadcastManager
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.internal.config
import org.apache.spark.rdd.{DeterministicLevel, RDD}
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.shuffle.{FetchFailedException, MetadataFetchFailedException}
import org.apache.spark.storage.{BlockId, BlockManagerId, BlockManagerMaster}
import org.apache.spark.util.{AccumulatorContext, AccumulatorV2, CallSite, LongAccumulator, ThreadUtils, Utils}
class DAGSchedulerEventProcessLoopTester(dagScheduler: DAGScheduler)
extends DAGSchedulerEventProcessLoop(dagScheduler) {
override def post(event: DAGSchedulerEvent): Unit = {
try {
// Forward event to `onReceive` directly to avoid processing event asynchronously.
onReceive(event)
} catch {
case NonFatal(e) => onError(e)
}
}
override def onError(e: Throwable): Unit = {
logError("Error in DAGSchedulerEventLoop: ", e)
dagScheduler.stop()
throw e
}
}
class MyCheckpointRDD(
sc: SparkContext,
numPartitions: Int,
dependencies: List[Dependency[_]],
locations: Seq[Seq[String]] = Nil,
@(transient @param) tracker: MapOutputTrackerMaster = null,
indeterminate: Boolean = false)
extends MyRDD(sc, numPartitions, dependencies, locations, tracker, indeterminate) {
// Allow doCheckpoint() on this RDD.
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
Iterator.empty
}
/**
* An RDD for passing to DAGScheduler. These RDDs will use the dependencies and
* preferredLocations (if any) that are passed to them. They are deliberately not executable
* so we can test that DAGScheduler does not try to execute RDDs locally.
*
* Optionally, one can pass in a list of locations to use as preferred locations for each task,
* and a MapOutputTrackerMaster to enable reduce task locality. We pass the tracker separately
* because, in this test suite, it won't be the same as sc.env.mapOutputTracker.
*/
class MyRDD(
sc: SparkContext,
numPartitions: Int,
dependencies: List[Dependency[_]],
locations: Seq[Seq[String]] = Nil,
@(transient @param) tracker: MapOutputTrackerMaster = null,
indeterminate: Boolean = false)
extends RDD[(Int, Int)](sc, dependencies) with Serializable {
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
throw new RuntimeException("should not be reached")
override def getPartitions: Array[Partition] = (0 until numPartitions).map(i => new Partition {
override def index: Int = i
}).toArray
override protected def getOutputDeterministicLevel = {
if (indeterminate) DeterministicLevel.INDETERMINATE else super.getOutputDeterministicLevel
}
override def getPreferredLocations(partition: Partition): Seq[String] = {
if (locations.isDefinedAt(partition.index)) {
locations(partition.index)
} else if (tracker != null && dependencies.size == 1 &&
dependencies(0).isInstanceOf[ShuffleDependency[_, _, _]]) {
// If we have only one shuffle dependency, use the same code path as ShuffledRDD for locality
val dep = dependencies(0).asInstanceOf[ShuffleDependency[_, _, _]]
tracker.getPreferredLocationsForShuffle(dep, partition.index)
} else {
Nil
}
}
override def toString: String = "DAGSchedulerSuiteRDD " + id
}
class DAGSchedulerSuiteDummyException extends Exception
class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLimits {
import DAGSchedulerSuite._
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
implicit val defaultSignaler: Signaler = ThreadSignaler
val conf = new SparkConf
/** Set of TaskSets the DAGScheduler has requested executed. */
val taskSets = scala.collection.mutable.Buffer[TaskSet]()
/** Stages for which the DAGScheduler has called TaskScheduler.cancelTasks(). */
val cancelledStages = new HashSet[Int]()
val tasksMarkedAsCompleted = new ArrayBuffer[Task[_]]()
val taskScheduler = new TaskScheduler() {
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start() = {}
override def stop() = {}
override def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId,
executorUpdates: Map[(Int, Int), ExecutorMetrics]): Boolean = true
override def submitTasks(taskSet: TaskSet) = {
// normally done by TaskSetManager
taskSet.tasks.foreach(_.epoch = mapOutputTracker.getEpoch)
taskSets += taskSet
}
override def cancelTasks(stageId: Int, interruptThread: Boolean): Unit = {
cancelledStages += stageId
}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = false
override def killAllTaskAttempts(
stageId: Int, interruptThread: Boolean, reason: String): Unit = {}
override def notifyPartitionCompletion(stageId: Int, partitionId: Int): Unit = {
taskSets.filter(_.stageId == stageId).lastOption.foreach { ts =>
val tasks = ts.tasks.filter(_.partitionId == partitionId)
assert(tasks.length == 1)
tasksMarkedAsCompleted += tasks.head
}
}
override def setDAGScheduler(dagScheduler: DAGScheduler) = {}
override def defaultParallelism() = 2
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
}
/**
* Listeners which records some information to verify in UTs. Getter-kind methods in this class
* ensures the value is returned after ensuring there's no event to process, as well as the
* value is immutable: prevent showing odd result by race condition.
*/
class EventInfoRecordingListener extends SparkListener {
private val _submittedStageInfos = new HashSet[StageInfo]
private val _successfulStages = new HashSet[Int]
private val _failedStages = new ArrayBuffer[Int]
private val _stageByOrderOfExecution = new ArrayBuffer[Int]
private val _endedTasks = new HashSet[Long]
override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit = {
_submittedStageInfos += stageSubmitted.stageInfo
}
override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = {
val stageInfo = stageCompleted.stageInfo
_stageByOrderOfExecution += stageInfo.stageId
if (stageInfo.failureReason.isEmpty) {
_successfulStages += stageInfo.stageId
} else {
_failedStages += stageInfo.stageId
}
}
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
_endedTasks += taskEnd.taskInfo.taskId
}
def submittedStageInfos: Set[StageInfo] = {
waitForListeners()
_submittedStageInfos.toSet
}
def successfulStages: Set[Int] = {
waitForListeners()
_successfulStages.toSet
}
def failedStages: List[Int] = {
waitForListeners()
_failedStages.toList
}
def stageByOrderOfExecution: List[Int] = {
waitForListeners()
_stageByOrderOfExecution.toList
}
def endedTasks: Set[Long] = {
waitForListeners()
_endedTasks.toSet
}
private def waitForListeners(): Unit = sc.listenerBus.waitUntilEmpty()
}
var sparkListener: EventInfoRecordingListener = null
var mapOutputTracker: MapOutputTrackerMaster = null
var broadcastManager: BroadcastManager = null
var securityMgr: SecurityManager = null
var scheduler: DAGScheduler = null
var dagEventProcessLoopTester: DAGSchedulerEventProcessLoop = null
/**
* Set of cache locations to return from our mock BlockManagerMaster.
* Keys are (rdd ID, partition ID). Anything not present will return an empty
* list of cache locations silently.
*/
val cacheLocations = new HashMap[(Int, Int), Seq[BlockManagerId]]
// stub out BlockManagerMaster.getLocations to use our cacheLocations
val blockManagerMaster = new BlockManagerMaster(null, null, conf, true) {
override def getLocations(blockIds: Array[BlockId]): IndexedSeq[Seq[BlockManagerId]] = {
blockIds.map {
_.asRDDId.map(id => (id.rddId -> id.splitIndex)).flatMap(key => cacheLocations.get(key)).
getOrElse(Seq())
}.toIndexedSeq
}
override def removeExecutor(execId: String): Unit = {
// don't need to propagate to the driver, which we don't have
}
}
/** The list of results that DAGScheduler has collected. */
val results = new HashMap[Int, Any]()
var failure: Exception = _
val jobListener = new JobListener() {
override def taskSucceeded(index: Int, result: Any) = results.put(index, result)
override def jobFailed(exception: Exception) = { failure = exception }
}
/** A simple helper class for creating custom JobListeners */
class SimpleListener extends JobListener {
val results = new HashMap[Int, Any]
var failure: Exception = null
override def taskSucceeded(index: Int, result: Any): Unit = results.put(index, result)
override def jobFailed(exception: Exception): Unit = { failure = exception }
}
override def beforeEach(): Unit = {
super.beforeEach()
init(new SparkConf())
}
private def init(testConf: SparkConf): Unit = {
sc = new SparkContext("local[2]", "DAGSchedulerSuite", testConf)
sparkListener = new EventInfoRecordingListener
failure = null
sc.addSparkListener(sparkListener)
taskSets.clear()
tasksMarkedAsCompleted.clear()
cancelledStages.clear()
cacheLocations.clear()
results.clear()
securityMgr = new SecurityManager(conf)
broadcastManager = new BroadcastManager(true, conf, securityMgr)
mapOutputTracker = new MapOutputTrackerMaster(conf, broadcastManager, true) {
override def sendTracker(message: Any): Unit = {
// no-op, just so we can stop this to avoid leaking threads
}
}
scheduler = new DAGScheduler(
sc,
taskScheduler,
sc.listenerBus,
mapOutputTracker,
blockManagerMaster,
sc.env)
dagEventProcessLoopTester = new DAGSchedulerEventProcessLoopTester(scheduler)
}
override def afterEach(): Unit = {
try {
scheduler.stop()
dagEventProcessLoopTester.stop()
mapOutputTracker.stop()
broadcastManager.stop()
} finally {
super.afterEach()
}
}
override def afterAll(): Unit = {
super.afterAll()
}
/**
* Type of RDD we use for testing. Note that we should never call the real RDD compute methods.
* This is a pair RDD type so it can always be used in ShuffleDependencies.
*/
type PairOfIntsRDD = RDD[(Int, Int)]
/**
* Process the supplied event as if it were the top of the DAGScheduler event queue, expecting
* the scheduler not to exit.
*
* After processing the event, submit waiting stages as is done on most iterations of the
* DAGScheduler event loop.
*/
private def runEvent(event: DAGSchedulerEvent): Unit = {
dagEventProcessLoopTester.post(event)
}
/**
* When we submit dummy Jobs, this is the compute function we supply. Except in a local test
* below, we do not expect this function to ever be executed; instead, we will return results
* directly through CompletionEvents.
*/
private val jobComputeFunc = (context: TaskContext, it: Iterator[(_)]) =>
it.next.asInstanceOf[Tuple2[_, _]]._1
/** Send the given CompletionEvent messages for the tasks in the TaskSet. */
private def complete(taskSet: TaskSet, results: Seq[(TaskEndReason, Any)]): Unit = {
assert(taskSet.tasks.size >= results.size)
for ((result, i) <- results.zipWithIndex) {
if (i < taskSet.tasks.size) {
runEvent(makeCompletionEvent(taskSet.tasks(i), result._1, result._2))
}
}
}
private def completeWithAccumulator(
accumId: Long,
taskSet: TaskSet,
results: Seq[(TaskEndReason, Any)]): Unit = {
assert(taskSet.tasks.size >= results.size)
for ((result, i) <- results.zipWithIndex) {
if (i < taskSet.tasks.size) {
runEvent(makeCompletionEvent(
taskSet.tasks(i),
result._1,
result._2,
Seq(AccumulatorSuite.createLongAccum("", initValue = 1, id = accumId))))
}
}
}
/** Submits a job to the scheduler and returns the job id. */
private def submit(
rdd: RDD[_],
partitions: Array[Int],
func: (TaskContext, Iterator[_]) => _ = jobComputeFunc,
listener: JobListener = jobListener,
properties: Properties = null): Int = {
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(JobSubmitted(jobId, rdd, func, partitions, CallSite("", ""), listener, properties))
jobId
}
/** Submits a map stage to the scheduler and returns the job id. */
private def submitMapStage(
shuffleDep: ShuffleDependency[_, _, _],
listener: JobListener = jobListener): Int = {
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(MapStageSubmitted(jobId, shuffleDep, CallSite("", ""), listener))
jobId
}
/** Sends TaskSetFailed to the scheduler. */
private def failed(taskSet: TaskSet, message: String): Unit = {
runEvent(TaskSetFailed(taskSet, message, None))
}
/** Sends JobCancelled to the DAG scheduler. */
private def cancel(jobId: Int): Unit = {
runEvent(JobCancelled(jobId, None))
}
test("[SPARK-3353] parent stage should have lower stage id") {
sc.parallelize(1 to 10).map(x => (x, x)).reduceByKey(_ + _, 4).count()
val stageByOrderOfExecution = sparkListener.stageByOrderOfExecution
assert(stageByOrderOfExecution.length === 2)
assert(stageByOrderOfExecution(0) < stageByOrderOfExecution(1))
}
/**
* This test ensures that DAGScheduler build stage graph correctly.
*
* Suppose you have the following DAG:
*
* [A] <--(s_A)-- [B] <--(s_B)-- [C] <--(s_C)-- [D]
* \\ /
* <-------------
*
* Here, RDD B has a shuffle dependency on RDD A, and RDD C has shuffle dependency on both
* B and A. The shuffle dependency IDs are numbers in the DAGScheduler, but to make the example
* easier to understand, let's call the shuffled data from A shuffle dependency ID s_A and the
* shuffled data from B shuffle dependency ID s_B.
*
* Note: [] means an RDD, () means a shuffle dependency.
*/
test("[SPARK-13902] Ensure no duplicate stages are created") {
val rddA = new MyRDD(sc, 1, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(1))
val s_A = shuffleDepA.shuffleId
val rddB = new MyRDD(sc, 1, List(shuffleDepA), tracker = mapOutputTracker)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(1))
val s_B = shuffleDepB.shuffleId
val rddC = new MyRDD(sc, 1, List(shuffleDepA, shuffleDepB), tracker = mapOutputTracker)
val shuffleDepC = new ShuffleDependency(rddC, new HashPartitioner(1))
val s_C = shuffleDepC.shuffleId
val rddD = new MyRDD(sc, 1, List(shuffleDepC), tracker = mapOutputTracker)
submit(rddD, Array(0))
assert(scheduler.shuffleIdToMapStage.size === 3)
assert(scheduler.activeJobs.size === 1)
val mapStageA = scheduler.shuffleIdToMapStage(s_A)
val mapStageB = scheduler.shuffleIdToMapStage(s_B)
val mapStageC = scheduler.shuffleIdToMapStage(s_C)
val finalStage = scheduler.activeJobs.head.finalStage
assert(mapStageA.parents.isEmpty)
assert(mapStageB.parents === List(mapStageA))
assert(mapStageC.parents === List(mapStageA, mapStageB))
assert(finalStage.parents === List(mapStageC))
complete(taskSets(0), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(1), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(3), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("All shuffle files on the slave should be cleaned up when slave lost") {
// reset the test context with the right shuffle service config
afterEach()
val conf = new SparkConf()
conf.set(config.SHUFFLE_SERVICE_ENABLED.key, "true")
conf.set("spark.files.fetchFailure.unRegisterOutputOnHost", "true")
init(conf)
runEvent(ExecutorAdded("exec-hostA1", "hostA"))
runEvent(ExecutorAdded("exec-hostA2", "hostA"))
runEvent(ExecutorAdded("exec-hostB", "hostB"))
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(3))
val firstShuffleId = firstShuffleDep.shuffleId
val shuffleMapRdd = new MyRDD(sc, 3, List(firstShuffleDep))
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(3))
val secondShuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep))
submit(reduceRdd, Array(0))
// map stage1 completes successfully, with one task on each executor
complete(taskSets(0), Seq(
(Success,
MapStatus(
BlockManagerId("exec-hostA1", "hostA", 12345), Array.fill[Long](1)(2), mapTaskId = 5)),
(Success,
MapStatus(
BlockManagerId("exec-hostA2", "hostA", 12345), Array.fill[Long](1)(2), mapTaskId = 6)),
(Success, makeMapStatus("hostB", 1, mapTaskId = 7))
))
// map stage2 completes successfully, with one task on each executor
complete(taskSets(1), Seq(
(Success,
MapStatus(
BlockManagerId("exec-hostA1", "hostA", 12345), Array.fill[Long](1)(2), mapTaskId = 8)),
(Success,
MapStatus(
BlockManagerId("exec-hostA2", "hostA", 12345), Array.fill[Long](1)(2), mapTaskId = 9)),
(Success, makeMapStatus("hostB", 1, mapTaskId = 10))
))
// make sure our test setup is correct
val initialMapStatus1 = mapOutputTracker.shuffleStatuses(firstShuffleId).mapStatuses
// val initialMapStatus1 = mapOutputTracker.mapStatuses.get(0).get
assert(initialMapStatus1.count(_ != null) === 3)
assert(initialMapStatus1.map{_.location.executorId}.toSet ===
Set("exec-hostA1", "exec-hostA2", "exec-hostB"))
assert(initialMapStatus1.map{_.mapId}.toSet === Set(5, 6, 7))
val initialMapStatus2 = mapOutputTracker.shuffleStatuses(secondShuffleId).mapStatuses
// val initialMapStatus1 = mapOutputTracker.mapStatuses.get(0).get
assert(initialMapStatus2.count(_ != null) === 3)
assert(initialMapStatus2.map{_.location.executorId}.toSet ===
Set("exec-hostA1", "exec-hostA2", "exec-hostB"))
assert(initialMapStatus2.map{_.mapId}.toSet === Set(8, 9, 10))
// reduce stage fails with a fetch failure from one host
complete(taskSets(2), Seq(
(FetchFailed(BlockManagerId("exec-hostA2", "hostA", 12345),
firstShuffleId, 0L, 0, 0, "ignored"),
null)
))
// Here is the main assertion -- make sure that we de-register
// the map outputs for both map stage from both executors on hostA
val mapStatus1 = mapOutputTracker.shuffleStatuses(firstShuffleId).mapStatuses
assert(mapStatus1.count(_ != null) === 1)
assert(mapStatus1(2).location.executorId === "exec-hostB")
assert(mapStatus1(2).location.host === "hostB")
val mapStatus2 = mapOutputTracker.shuffleStatuses(secondShuffleId).mapStatuses
assert(mapStatus2.count(_ != null) === 1)
assert(mapStatus2(2).location.executorId === "exec-hostB")
assert(mapStatus2(2).location.host === "hostB")
}
test("zero split job") {
var numResults = 0
var failureReason: Option[Exception] = None
val fakeListener = new JobListener() {
override def taskSucceeded(partition: Int, value: Any): Unit = numResults += 1
override def jobFailed(exception: Exception): Unit = {
failureReason = Some(exception)
}
}
val jobId = submit(new MyRDD(sc, 0, Nil), Array(), listener = fakeListener)
assert(numResults === 0)
cancel(jobId)
assert(failureReason.isDefined)
assert(failureReason.get.getMessage() === "Job 0 cancelled ")
}
test("run trivial job") {
submit(new MyRDD(sc, 1, Nil), Array(0))
complete(taskSets(0), List((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("run trivial job w/ dependency") {
val baseRdd = new MyRDD(sc, 1, Nil)
val finalRdd = new MyRDD(sc, 1, List(new OneToOneDependency(baseRdd)))
submit(finalRdd, Array(0))
complete(taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("equals and hashCode AccumulableInfo") {
val accInfo1 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = true, countFailedValues = false)
val accInfo2 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = false, countFailedValues = false)
val accInfo3 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = false, countFailedValues = false)
assert(accInfo1 !== accInfo2)
assert(accInfo2 === accInfo3)
assert(accInfo2.hashCode() === accInfo3.hashCode())
}
test("cache location preferences w/ dependency") {
val baseRdd = new MyRDD(sc, 1, Nil).cache()
val finalRdd = new MyRDD(sc, 1, List(new OneToOneDependency(baseRdd)))
cacheLocations(baseRdd.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
submit(finalRdd, Array(0))
val taskSet = taskSets(0)
assertLocations(taskSet, Seq(Seq("hostA", "hostB")))
complete(taskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("regression test for getCacheLocs") {
val rdd = new MyRDD(sc, 3, Nil).cache()
cacheLocations(rdd.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
cacheLocations(rdd.id -> 1) =
Seq(makeBlockManagerId("hostB"), makeBlockManagerId("hostC"))
cacheLocations(rdd.id -> 2) =
Seq(makeBlockManagerId("hostC"), makeBlockManagerId("hostD"))
val locs = scheduler.getCacheLocs(rdd).map(_.map(_.host))
assert(locs === Seq(Seq("hostA", "hostB"), Seq("hostB", "hostC"), Seq("hostC", "hostD")))
}
/**
* This test ensures that if a particular RDD is cached, RDDs earlier in the dependency chain
* are not computed. It constructs the following chain of dependencies:
* +---+ shuffle +---+ +---+ +---+
* | A |<--------| B |<---| C |<---| D |
* +---+ +---+ +---+ +---+
* Here, B is derived from A by performing a shuffle, C has a one-to-one dependency on B,
* and D similarly has a one-to-one dependency on C. If none of the RDDs were cached, this
* set of RDDs would result in a two stage job: one ShuffleMapStage, and a ResultStage that
* reads the shuffled data from RDD A. This test ensures that if C is cached, the scheduler
* doesn't perform a shuffle, and instead computes the result using a single ResultStage
* that reads C's cached data.
*/
test("getMissingParentStages should consider all ancestor RDDs' cache statuses") {
val rddA = new MyRDD(sc, 1, Nil)
val rddB = new MyRDD(sc, 1, List(new ShuffleDependency(rddA, new HashPartitioner(1))),
tracker = mapOutputTracker)
val rddC = new MyRDD(sc, 1, List(new OneToOneDependency(rddB))).cache()
val rddD = new MyRDD(sc, 1, List(new OneToOneDependency(rddC)))
cacheLocations(rddC.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
submit(rddD, Array(0))
assert(scheduler.runningStages.size === 1)
// Make sure that the scheduler is running the final result stage.
// Because C is cached, the shuffle map stage to compute A does not need to be run.
assert(scheduler.runningStages.head.isInstanceOf[ResultStage])
}
test("avoid exponential blowup when getting preferred locs list") {
// Build up a complex dependency graph with repeated zip operations, without preferred locations
var rdd: RDD[_] = new MyRDD(sc, 1, Nil)
(1 to 30).foreach(_ => rdd = rdd.zip(rdd))
// getPreferredLocs runs quickly, indicating that exponential graph traversal is avoided.
failAfter(10.seconds) {
val preferredLocs = scheduler.getPreferredLocs(rdd, 0)
// No preferred locations are returned.
assert(preferredLocs.length === 0)
}
}
test("unserializable task") {
val unserializableRdd = new MyRDD(sc, 1, Nil) {
class UnserializableClass
val unserializable = new UnserializableClass
}
submit(unserializableRdd, Array(0))
assert(failure.getMessage.startsWith(
"Job aborted due to stage failure: Task not serializable:"))
assert(sparkListener.failedStages === Seq(0))
assertDataStructuresEmpty()
}
test("trivial job failure") {
submit(new MyRDD(sc, 1, Nil), Array(0))
failed(taskSets(0), "some failure")
assert(failure.getMessage === "Job aborted due to stage failure: some failure")
assert(sparkListener.failedStages === Seq(0))
assertDataStructuresEmpty()
}
test("trivial job cancellation") {
val rdd = new MyRDD(sc, 1, Nil)
val jobId = submit(rdd, Array(0))
cancel(jobId)
assert(failure.getMessage === s"Job $jobId cancelled ")
assert(sparkListener.failedStages === Seq(0))
assertDataStructuresEmpty()
}
test("job cancellation no-kill backend") {
// make sure that the DAGScheduler doesn't crash when the TaskScheduler
// doesn't implement killTask()
val noKillTaskScheduler = new TaskScheduler() {
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start(): Unit = {}
override def stop(): Unit = {}
override def submitTasks(taskSet: TaskSet): Unit = {
taskSets += taskSet
}
override def cancelTasks(stageId: Int, interruptThread: Boolean): Unit = {
throw new UnsupportedOperationException
}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = {
throw new UnsupportedOperationException
}
override def killAllTaskAttempts(
stageId: Int, interruptThread: Boolean, reason: String): Unit = {
throw new UnsupportedOperationException
}
override def notifyPartitionCompletion(stageId: Int, partitionId: Int): Unit = {
throw new UnsupportedOperationException
}
override def setDAGScheduler(dagScheduler: DAGScheduler): Unit = {}
override def defaultParallelism(): Int = 2
override def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId,
executorUpdates: Map[(Int, Int), ExecutorMetrics]): Boolean = true
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
}
val noKillScheduler = new DAGScheduler(
sc,
noKillTaskScheduler,
sc.listenerBus,
mapOutputTracker,
blockManagerMaster,
sc.env)
dagEventProcessLoopTester = new DAGSchedulerEventProcessLoopTester(noKillScheduler)
val jobId = submit(new MyRDD(sc, 1, Nil), Array(0))
cancel(jobId)
// Because the job wasn't actually cancelled, we shouldn't have received a failure message.
assert(failure === null)
// When the task set completes normally, state should be correctly updated.
complete(taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
assert(sparkListener.failedStages.isEmpty)
assert(sparkListener.successfulStages.contains(0))
}
test("run trivial shuffle") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
complete(taskSets(1), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("run trivial shuffle with fetch failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
// the 2nd ResultTask failed
complete(taskSets(1), Seq(
(Success, 42),
(FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"), null)))
// this will get called
// blockManagerMaster.removeExecutor("exec-hostA")
// ask the scheduler to try it again
scheduler.resubmitFailedStages()
// have the 2nd attempt pass
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
// we can see both result blocks now
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
complete(taskSets(3), Seq((Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty()
}
private val shuffleFileLossTests = Seq(
("slave lost with shuffle service", SlaveLost("", false), true, false),
("worker lost with shuffle service", SlaveLost("", true), true, true),
("worker lost without shuffle service", SlaveLost("", true), false, true),
("executor failure with shuffle service", ExecutorKilled, true, false),
("executor failure without shuffle service", ExecutorKilled, false, true))
for ((eventDescription, event, shuffleServiceOn, expectFileLoss) <- shuffleFileLossTests) {
val maybeLost = if (expectFileLoss) {
"lost"
} else {
"not lost"
}
test(s"shuffle files $maybeLost when $eventDescription") {
// reset the test context with the right shuffle service config
afterEach()
val conf = new SparkConf()
conf.set(config.SHUFFLE_SERVICE_ENABLED.key, shuffleServiceOn.toString)
init(conf)
assert(sc.env.blockManager.externalShuffleServiceEnabled == shuffleServiceOn)
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
runEvent(ExecutorLost("exec-hostA", event))
if (expectFileLoss) {
intercept[MetadataFetchFailedException] {
mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0)
}
} else {
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
}
}
}
test("SPARK-28967 properties must be cloned before posting to listener bus for 0 partition") {
val properties = new Properties()
val func = (context: TaskContext, it: Iterator[(_)]) => 1
val resultHandler = (taskIndex: Int, result: Int) => {}
val assertionError = new AtomicReference[TestFailedException](
new TestFailedException("Listener didn't receive expected JobStart event", 0))
val listener = new SparkListener() {
override def onJobStart(event: SparkListenerJobStart): Unit = {
try {
assert(event.properties.equals(properties), "Expected same content of properties, " +
s"but got properties with different content. props in caller ${properties} /" +
s" props in event ${event.properties}")
assert(event.properties.ne(properties), "Expected instance with different identity, " +
"but got same instance.")
assertionError.set(null)
} catch {
case e: TestFailedException => assertionError.set(e)
}
}
}
sc.addSparkListener(listener)
// 0 partition
val testRdd = new MyRDD(sc, 0, Nil)
val waiter = scheduler.submitJob(testRdd, func, Seq.empty, CallSite.empty,
resultHandler, properties)
sc.listenerBus.waitUntilEmpty()
assert(assertionError.get() === null)
}
// Helper function to validate state when creating tests for task failures
private def checkStageId(stageId: Int, attempt: Int, stageAttempt: TaskSet): Unit = {
assert(stageAttempt.stageId === stageId)
assert(stageAttempt.stageAttemptId == attempt)
}
// Helper functions to extract commonly used code in Fetch Failure test cases
private def setupStageAbortTest(sc: SparkContext): Unit = {
sc.listenerBus.addToSharedQueue(new EndListener())
ended = false
jobResult = null
}
// Create a new Listener to confirm that the listenerBus sees the JobEnd message
// when we abort the stage. This message will also be consumed by the EventLoggingListener
// so this will propagate up to the user.
var ended = false
var jobResult : JobResult = null
class EndListener extends SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
jobResult = jobEnd.jobResult
ended = true
}
}
/**
* Common code to get the next stage attempt, confirm it's the one we expect, and complete it
* successfully.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
* @param numShufflePartitions - The number of partitions in the next stage
*/
private def completeShuffleMapStageSuccessfully(
stageId: Int,
attemptIdx: Int,
numShufflePartitions: Int): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
complete(stageAttempt, stageAttempt.tasks.zipWithIndex.map {
case (task, idx) =>
(Success, makeMapStatus("host" + ('A' + idx).toChar, numShufflePartitions))
}.toSeq)
}
/**
* Common code to get the next stage attempt, confirm it's the one we expect, and complete it
* with all FetchFailure.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
* @param shuffleDep - The shuffle dependency of the stage with a fetch failure
*/
private def completeNextStageWithFetchFailure(
stageId: Int,
attemptIdx: Int,
shuffleDep: ShuffleDependency[_, _, _]): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
complete(stageAttempt, stageAttempt.tasks.zipWithIndex.map { case (task, idx) =>
(FetchFailed(makeBlockManagerId("hostA"), shuffleDep.shuffleId, 0L, 0, idx, "ignored"), null)
}.toSeq)
}
/**
* Common code to get the next result stage attempt, confirm it's the one we expect, and
* complete it with a success where we return 42.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
*/
private def completeNextResultStageWithSuccess(
stageId: Int,
attemptIdx: Int,
partitionToResult: Int => Int = _ => 42): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
assert(scheduler.stageIdToStage(stageId).isInstanceOf[ResultStage])
val taskResults = stageAttempt.tasks.zipWithIndex.map { case (task, idx) =>
(Success, partitionToResult(idx))
}
complete(stageAttempt, taskResults.toSeq)
}
/**
* In this test, we simulate a job where many tasks in the same stage fail. We want to show
* that many fetch failures inside a single stage attempt do not trigger an abort
* on their own, but only when there are enough failing stage attempts.
*/
test("Single stage fetch failure should not abort the stage.") {
setupStageAbortTest(sc)
val parts = 8
val shuffleMapRdd = new MyRDD(sc, parts, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(parts))
val reduceRdd = new MyRDD(sc, parts, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, (0 until parts).toArray)
completeShuffleMapStageSuccessfully(0, 0, numShufflePartitions = parts)
completeNextStageWithFetchFailure(1, 0, shuffleDep)
// Resubmit and confirm that now all is well
scheduler.resubmitFailedStages()
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
// Complete stage 0 and then stage 1 with a "42"
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = parts)
completeNextResultStageWithSuccess(1, 1)
// Confirm job finished successfully
sc.listenerBus.waitUntilEmpty()
assert(ended)
assert(results === (0 until parts).map { idx => idx -> 42 }.toMap)
assertDataStructuresEmpty()
}
/**
* In this test we simulate a job failure where the first stage completes successfully and
* the second stage fails due to a fetch failure. Multiple successive fetch failures of a stage
* trigger an overall job abort to avoid endless retries.
*/
test("Multiple consecutive stage fetch failures should lead to job being aborted.") {
setupStageAbortTest(sc)
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts) {
// Complete all the tasks for the current attempt of stage 0 successfully
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail all these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDep)
// this will trigger a resubmission of stage 0, since we've lost some of its
// map output, for the next iteration through the loop
scheduler.resubmitFailedStages()
if (attempt < scheduler.maxConsecutiveStageAttempts - 1) {
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
} else {
// Stage should have been aborted and removed from running stages
assertDataStructuresEmpty()
sc.listenerBus.waitUntilEmpty()
assert(ended)
jobResult match {
case JobFailed(reason) =>
assert(reason.getMessage.contains("ResultStage 1 () has failed the maximum"))
case other => fail(s"expected JobFailed, not $other")
}
}
}
}
/**
* In this test, we create a job with two consecutive shuffles, and simulate 2 failures for each
* shuffle fetch. In total In total, the job has had four failures overall but not four failures
* for a particular stage, and as such should not be aborted.
*/
test("Failures in different stages should not trigger an overall abort") {
setupStageAbortTest(sc)
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// In the first two iterations, Stage 0 succeeds and stage 1 fails. In the next two iterations,
// stage 2 fails.
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts) {
// Complete all the tasks for the current attempt of stage 0 successfully
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
if (attempt < scheduler.maxConsecutiveStageAttempts / 2) {
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail all these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDepOne)
} else {
completeShuffleMapStageSuccessfully(1, attempt, numShufflePartitions = 1)
// Fail stage 2
completeNextStageWithFetchFailure(2,
attempt - scheduler.maxConsecutiveStageAttempts / 2, shuffleDepTwo)
}
// this will trigger a resubmission of stage 0, since we've lost some of its
// map output, for the next iteration through the loop
scheduler.resubmitFailedStages()
}
completeShuffleMapStageSuccessfully(0, 4, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 4, numShufflePartitions = 1)
// Succeed stage2 with a "42"
completeNextResultStageWithSuccess(2, scheduler.maxConsecutiveStageAttempts / 2)
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
/**
* In this test we demonstrate that only consecutive failures trigger a stage abort. A stage may
* fail multiple times, succeed, then fail a few more times (because its run again by downstream
* dependencies). The total number of failed attempts for one stage will go over the limit,
* but that doesn't matter, since they have successes in the middle.
*/
test("Non-consecutive stage failures don't trigger abort") {
setupStageAbortTest(sc)
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// First, execute stages 0 and 1, failing stage 1 up to MAX-1 times.
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts - 1) {
// Make each task in stage 0 success
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDepOne)
scheduler.resubmitFailedStages()
// Confirm we have not yet aborted
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
}
// Rerun stage 0 and 1 to step through the task set
completeShuffleMapStageSuccessfully(0, 3, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 3, numShufflePartitions = 1)
// Fail stage 2 so that stage 1 is resubmitted when we call scheduler.resubmitFailedStages()
completeNextStageWithFetchFailure(2, 0, shuffleDepTwo)
scheduler.resubmitFailedStages()
// Rerun stage 0 to step through the task set
completeShuffleMapStageSuccessfully(0, 4, numShufflePartitions = 2)
// Now again, fail stage 1 (up to MAX_FAILURES) but confirm that this doesn't trigger an abort
// since we succeeded in between.
completeNextStageWithFetchFailure(1, 4, shuffleDepOne)
scheduler.resubmitFailedStages()
// Confirm we have not yet aborted
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
// Next, succeed all and confirm output
// Rerun stage 0 + 1
completeShuffleMapStageSuccessfully(0, 5, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 5, numShufflePartitions = 1)
// Succeed stage 2 and verify results
completeNextResultStageWithSuccess(2, 1)
assertDataStructuresEmpty()
sc.listenerBus.waitUntilEmpty()
assert(ended)
assert(results === Map(0 -> 42))
}
test("trivial shuffle with multiple fetch failures") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
// The MapOutputTracker should know about both map output locations.
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
assert(sparkListener.failedStages.contains(1))
// The second ResultTask fails, with a fetch failure for the output from the second mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 1L, 1, 1, "ignored"),
null))
// The SparkListener should not receive redundant failure events.
assert(sparkListener.failedStages.size === 1)
}
test("Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq(0, 1)))
scheduler.resubmitFailedStages()
// Complete the map stage.
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = 2)
// Complete the result stage.
completeNextResultStageWithSuccess(1, 1)
sc.listenerBus.waitUntilEmpty()
assertDataStructuresEmpty()
}
test("Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq(1)))
// The second map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
TaskKilled("test"),
null))
assert(sparkListener.failedStages === Seq(0))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq(0, 1)))
scheduler.resubmitFailedStages()
// Complete the map stage.
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = 2)
// Complete the result stage.
completeNextResultStageWithSuccess(1, 0)
sc.listenerBus.waitUntilEmpty()
assertDataStructuresEmpty()
}
test("Fail the job if a barrier ResultTask failed") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
.barrier()
.mapPartitions(iter => iter)
submit(reduceRdd, Array(0, 1))
// Complete the map stage.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostA", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// The first ResultTask fails
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
TaskKilled("test"),
null))
// Assert the stage has been cancelled.
sc.listenerBus.waitUntilEmpty()
assert(failure.getMessage.startsWith("Job aborted due to stage failure: Could not recover " +
"from a failed barrier ResultStage."))
}
/**
* This tests the case where another FetchFailed comes in while the map stage is getting
* re-run.
*/
test("late fetch failures don't cause multiple concurrent attempts for the same map stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
val mapStageId = 0
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == mapStageId)
}
// The map stage should have been submitted.
assert(countSubmittedMapStageAttempts() === 1)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// The MapOutputTracker should know about both map output locations.
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 1).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
assert(sparkListener.failedStages.contains(1))
// Trigger resubmission of the failed map stage.
runEvent(ResubmitFailedStages)
// Another attempt for the map stage should have been submitted, resulting in 2 total attempts.
assert(countSubmittedMapStageAttempts() === 2)
// The second ResultTask fails, with a fetch failure for the output from the second mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleId, 1L, 1, 1, "ignored"),
null))
// Another ResubmitFailedStages event should not result in another attempt for the map
// stage being run concurrently.
// NOTE: the actual ResubmitFailedStages may get called at any time during this, but it
// shouldn't effect anything -- our calling it just makes *SURE* it gets called between the
// desired event and our check.
runEvent(ResubmitFailedStages)
assert(countSubmittedMapStageAttempts() === 2)
}
/**
* This tests the case where a late FetchFailed comes in after the map stage has finished getting
* retried and a new reduce stage starts running.
*/
test("extremely late fetch failures don't cause multiple concurrent attempts for " +
"the same stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
def countSubmittedReduceStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == 1)
}
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == 0)
}
// The map stage should have been submitted.
assert(countSubmittedMapStageAttempts() === 1)
// Complete the map stage.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// The reduce stage should have been submitted.
assert(countSubmittedReduceStageAttempts() === 1)
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
// Trigger resubmission of the failed map stage and finish the re-started map task.
runEvent(ResubmitFailedStages)
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
// Because the map stage finished, another attempt for the reduce stage should have been
// submitted, resulting in 2 total attempts for each the map and the reduce stage.
assert(countSubmittedMapStageAttempts() === 2)
assert(countSubmittedReduceStageAttempts() === 2)
// A late FetchFailed arrives from the second task in the original reduce stage.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleId, 1L, 1, 1, "ignored"),
null))
// Running ResubmitFailedStages shouldn't result in any more attempts for the map stage, because
// the FetchFailed should have been ignored
runEvent(ResubmitFailedStages)
// The FetchFailed from the original reduce stage should be ignored.
assert(countSubmittedMapStageAttempts() === 2)
}
test("task events always posted in speculation / when stage is killed") {
val baseRdd = new MyRDD(sc, 4, Nil)
val finalRdd = new MyRDD(sc, 4, List(new OneToOneDependency(baseRdd)))
submit(finalRdd, Array(0, 1, 2, 3))
// complete two tasks
runEvent(makeCompletionEvent(
taskSets(0).tasks(0), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(1), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(1)))
// verify stage exists
assert(scheduler.stageIdToStage.contains(0))
assert(sparkListener.endedTasks.size === 2)
// finish other 2 tasks
runEvent(makeCompletionEvent(
taskSets(0).tasks(2), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(2)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(3)))
assert(sparkListener.endedTasks.size === 4)
// verify the stage is done
assert(!scheduler.stageIdToStage.contains(0))
// Stage should be complete. Finish one other Successful task to simulate what can happen
// with a speculative task and make sure the event is sent out
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(5)))
assert(sparkListener.endedTasks.size === 5)
// make sure non successful tasks also send out event
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), UnknownReason, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(6)))
assert(sparkListener.endedTasks.size === 6)
}
test("ignore late map task completions") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
// pretend we were told hostA went away
val oldEpoch = mapOutputTracker.getEpoch
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
val newEpoch = mapOutputTracker.getEpoch
assert(newEpoch > oldEpoch)
// now start completing some tasks in the shuffle map stage, under different hosts
// and epochs, and make sure scheduler updates its state correctly
val taskSet = taskSets(0)
val shuffleStage = scheduler.stageIdToStage(taskSet.stageId).asInstanceOf[ShuffleMapStage]
assert(shuffleStage.numAvailableOutputs === 0)
// should be ignored for being too old
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 0)
// should work because it's a non-failed host (so the available map outputs will increase)
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostB", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 1)
// should be ignored for being too old
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 1)
// should work because it's a new epoch, which will increase the number of available map
// outputs, and also finish the stage
taskSet.tasks(1).epoch = newEpoch
runEvent(makeCompletionEvent(
taskSet.tasks(1),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 2)
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostB"), makeBlockManagerId("hostA")))
// finish the next stage normally, which completes the job
complete(taskSets(1), Seq((Success, 42), (Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty()
}
test("run shuffle with map stage failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
// Fail the map stage. This should cause the entire job to fail.
val stageFailureMessage = "Exception failure in map stage"
failed(taskSets(0), stageFailureMessage)
assert(failure.getMessage === s"Job aborted due to stage failure: $stageFailureMessage")
// Listener bus should get told about the map stage failing, but not the reduce stage
// (since the reduce stage hasn't been started yet).
assert(sparkListener.failedStages.toSet === Set(0))
assertDataStructuresEmpty()
}
/**
* Run two jobs, with a shared dependency. We simulate a fetch failure in the second job, which
* requires regenerating some outputs of the shared dependency. One key aspect of this test is
* that the second job actually uses a different stage for the shared dependency (a "skipped"
* stage).
*/
test("shuffle fetch failure in a reused shuffle dependency") {
// Run the first job successfully, which creates one shuffle dependency
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep))
submit(reduceRdd, Array(0, 1))
completeShuffleMapStageSuccessfully(0, 0, 2)
completeNextResultStageWithSuccess(1, 0)
assert(results === Map(0 -> 42, 1 -> 42))
assertDataStructuresEmpty()
// submit another job w/ the shared dependency, and have a fetch failure
val reduce2 = new MyRDD(sc, 2, List(shuffleDep))
submit(reduce2, Array(0, 1))
// Note that the stage numbering here is only b/c the shared dependency produces a new, skipped
// stage. If instead it reused the existing stage, then this would be stage 2
completeNextStageWithFetchFailure(3, 0, shuffleDep)
scheduler.resubmitFailedStages()
// the scheduler now creates a new task set to regenerate the missing map output, but this time
// using a different stage, the "skipped" one
// SPARK-9809 -- this stage is submitted without a task for each partition (because some of
// the shuffle map output is still available from stage 0); make sure we've still got internal
// accumulators setup
assert(scheduler.stageIdToStage(2).latestInfo.taskMetrics != null)
completeShuffleMapStageSuccessfully(2, 0, 2)
completeNextResultStageWithSuccess(3, 1, idx => idx + 1234)
assert(results === Map(0 -> 1234, 1 -> 1235))
assertDataStructuresEmpty()
}
/**
* This test runs a three stage job, with a fetch failure in stage 1. but during the retry, we
* have completions from both the first & second attempt of stage 1. So all the map output is
* available before we finish any task set for stage 1. We want to make sure that we don't
* submit stage 2 until the map output for stage 1 is registered
*/
test("don't submit stage until its dependencies map outputs are registered (SPARK-5259)") {
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(3))
val firstShuffleId = firstShuffleDep.shuffleId
val shuffleMapRdd = new MyRDD(sc, 3, List(firstShuffleDep))
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep))
submit(reduceRdd, Array(0))
// things start out smoothly, stage 0 completes with no issues
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostB", shuffleMapRdd.partitions.length)),
(Success, makeMapStatus("hostB", shuffleMapRdd.partitions.length)),
(Success, makeMapStatus("hostA", shuffleMapRdd.partitions.length))
))
// then one executor dies, and a task fails in stage 1
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(null, firstShuffleId, 2L, 2, 0, "Fetch failed"),
null))
// so we resubmit stage 0, which completes happily
scheduler.resubmitFailedStages()
val stage0Resubmit = taskSets(2)
assert(stage0Resubmit.stageId == 0)
assert(stage0Resubmit.stageAttemptId === 1)
val task = stage0Resubmit.tasks(0)
assert(task.partitionId === 2)
runEvent(makeCompletionEvent(
task,
Success,
makeMapStatus("hostC", shuffleMapRdd.partitions.length)))
// now here is where things get tricky : we will now have a task set representing
// the second attempt for stage 1, but we *also* have some tasks for the first attempt for
// stage 1 still going
val stage1Resubmit = taskSets(3)
assert(stage1Resubmit.stageId == 1)
assert(stage1Resubmit.stageAttemptId === 1)
assert(stage1Resubmit.tasks.length === 3)
// we'll have some tasks finish from the first attempt, and some finish from the second attempt,
// so that we actually have all stage outputs, though no attempt has completed all its
// tasks
runEvent(makeCompletionEvent(
taskSets(3).tasks(0),
Success,
makeMapStatus("hostC", reduceRdd.partitions.length)))
runEvent(makeCompletionEvent(
taskSets(3).tasks(1),
Success,
makeMapStatus("hostC", reduceRdd.partitions.length)))
// late task finish from the first attempt
runEvent(makeCompletionEvent(
taskSets(1).tasks(2),
Success,
makeMapStatus("hostB", reduceRdd.partitions.length)))
// What should happen now is that we submit stage 2. However, we might not see an error
// b/c of DAGScheduler's error handling (it tends to swallow errors and just log them). But
// we can check some conditions.
// Note that the really important thing here is not so much that we submit stage 2 *immediately*
// but that we don't end up with some error from these interleaved completions. It would also
// be OK (though sub-optimal) if stage 2 simply waited until the resubmission of stage 1 had
// all its tasks complete
// check that we have all the map output for stage 0 (it should have been there even before
// the last round of completions from stage 1, but just to double check it hasn't been messed
// up) and also the newly available stage 1
val stageToReduceIdxs = Seq(
0 -> (0 until 3),
1 -> (0 until 1)
)
for {
(stage, reduceIdxs) <- stageToReduceIdxs
reduceIdx <- reduceIdxs
} {
// this would throw an exception if the map status hadn't been registered
val statuses = mapOutputTracker.getMapSizesByExecutorId(stage, reduceIdx)
// really we should have already thrown an exception rather than fail either of these
// asserts, but just to be extra defensive let's double check the statuses are OK
assert(statuses != null)
assert(statuses.nonEmpty)
}
// and check that stage 2 has been submitted
assert(taskSets.size == 5)
val stage2TaskSet = taskSets(4)
assert(stage2TaskSet.stageId == 2)
assert(stage2TaskSet.stageAttemptId == 0)
}
/**
* We lose an executor after completing some shuffle map tasks on it. Those tasks get
* resubmitted, and when they finish the job completes normally
*/
test("register map outputs correctly after ExecutorLost and task Resubmitted") {
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 5, List(firstShuffleDep))
submit(reduceRdd, Array(0))
// complete some of the tasks from the first stage, on one host
runEvent(makeCompletionEvent(
taskSets(0).tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.length)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
Success,
makeMapStatus("hostA", reduceRdd.partitions.length)))
// now that host goes down
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
// so we resubmit those tasks
runEvent(makeCompletionEvent(taskSets(0).tasks(0), Resubmitted, null))
runEvent(makeCompletionEvent(taskSets(0).tasks(1), Resubmitted, null))
// now complete everything on a different host
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostB", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))
))
// now we should submit stage 1, and the map output from stage 0 should be registered
// check that we have all the map output for stage 0
(0 until reduceRdd.partitions.length).foreach { reduceIdx =>
val statuses = mapOutputTracker.getMapSizesByExecutorId(0, reduceIdx)
// really we should have already thrown an exception rather than fail either of these
// asserts, but just to be extra defensive let's double check the statuses are OK
assert(statuses != null)
assert(statuses.nonEmpty)
}
// and check that stage 1 has been submitted
assert(taskSets.size == 2)
val stage1TaskSet = taskSets(1)
assert(stage1TaskSet.stageId == 1)
assert(stage1TaskSet.stageAttemptId == 0)
}
/**
* Makes sure that failures of stage used by multiple jobs are correctly handled.
*
* This test creates the following dependency graph:
*
* shuffleMapRdd1 shuffleMapRDD2
* | \\ |
* | \\ |
* | \\ |
* | \\ |
* reduceRdd1 reduceRdd2
*
* We start both shuffleMapRdds and then fail shuffleMapRdd1. As a result, the job listeners for
* reduceRdd1 and reduceRdd2 should both be informed that the job failed. shuffleMapRDD2 should
* also be cancelled, because it is only used by reduceRdd2 and reduceRdd2 cannot complete
* without shuffleMapRdd1.
*/
test("failure of stage used by two jobs") {
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleMapRdd2 = new MyRDD(sc, 2, Nil)
val shuffleDep2 = new ShuffleDependency(shuffleMapRdd2, new HashPartitioner(2))
val reduceRdd1 = new MyRDD(sc, 2, List(shuffleDep1), tracker = mapOutputTracker)
val reduceRdd2 = new MyRDD(sc, 2, List(shuffleDep1, shuffleDep2), tracker = mapOutputTracker)
// We need to make our own listeners for this test, since by default submit uses the same
// listener for all jobs, and here we want to capture the failure for each job separately.
class FailureRecordingJobListener() extends JobListener {
var failureMessage: String = _
override def taskSucceeded(index: Int, result: Any): Unit = {}
override def jobFailed(exception: Exception): Unit = { failureMessage = exception.getMessage }
}
val listener1 = new FailureRecordingJobListener()
val listener2 = new FailureRecordingJobListener()
submit(reduceRdd1, Array(0, 1), listener = listener1)
submit(reduceRdd2, Array(0, 1), listener = listener2)
val stageFailureMessage = "Exception failure in map stage"
failed(taskSets(0), stageFailureMessage)
assert(cancelledStages.toSet === Set(0, 2))
// Make sure the listeners got told about both failed stages.
assert(sparkListener.successfulStages.isEmpty)
assert(sparkListener.failedStages.toSet === Set(0, 2))
assert(listener1.failureMessage === s"Job aborted due to stage failure: $stageFailureMessage")
assert(listener2.failureMessage === s"Job aborted due to stage failure: $stageFailureMessage")
assertDataStructuresEmpty()
}
def checkJobPropertiesAndPriority(taskSet: TaskSet, expected: String, priority: Int): Unit = {
assert(taskSet.properties != null)
assert(taskSet.properties.getProperty("testProperty") === expected)
assert(taskSet.priority === priority)
}
def launchJobsThatShareStageAndCancelFirst(): ShuffleDependency[Int, Int, Nothing] = {
val baseRdd = new MyRDD(sc, 1, Nil)
val shuffleDep1 = new ShuffleDependency(baseRdd, new HashPartitioner(1))
val intermediateRdd = new MyRDD(sc, 1, List(shuffleDep1))
val shuffleDep2 = new ShuffleDependency(intermediateRdd, new HashPartitioner(1))
val finalRdd1 = new MyRDD(sc, 1, List(shuffleDep2))
val finalRdd2 = new MyRDD(sc, 1, List(shuffleDep2))
val job1Properties = new Properties()
val job2Properties = new Properties()
job1Properties.setProperty("testProperty", "job1")
job2Properties.setProperty("testProperty", "job2")
// Run jobs 1 & 2, both referencing the same stage, then cancel job1.
// Note that we have to submit job2 before we cancel job1 to have them actually share
// *Stages*, and not just shuffle dependencies, due to skipped stages (at least until
// we address SPARK-10193.)
val jobId1 = submit(finalRdd1, Array(0), properties = job1Properties)
val jobId2 = submit(finalRdd2, Array(0), properties = job2Properties)
assert(scheduler.activeJobs.nonEmpty)
val testProperty1 = scheduler.jobIdToActiveJob(jobId1).properties.getProperty("testProperty")
// remove job1 as an ActiveJob
cancel(jobId1)
// job2 should still be running
assert(scheduler.activeJobs.nonEmpty)
val testProperty2 = scheduler.jobIdToActiveJob(jobId2).properties.getProperty("testProperty")
assert(testProperty1 != testProperty2)
// NB: This next assert isn't necessarily the "desired" behavior; it's just to document
// the current behavior. We've already submitted the TaskSet for stage 0 based on job1, but
// even though we have cancelled that job and are now running it because of job2, we haven't
// updated the TaskSet's properties. Changing the properties to "job2" is likely the more
// correct behavior.
val job1Id = 0 // TaskSet priority for Stages run with "job1" as the ActiveJob
checkJobPropertiesAndPriority(taskSets(0), "job1", job1Id)
complete(taskSets(0), Seq((Success, makeMapStatus("hostA", 1))))
shuffleDep1
}
/**
* Makes sure that tasks for a stage used by multiple jobs are submitted with the properties of a
* later, active job if they were previously run under a job that is no longer active
*/
test("stage used by two jobs, the first no longer active (SPARK-6880)") {
launchJobsThatShareStageAndCancelFirst()
// The next check is the key for SPARK-6880. For the stage which was shared by both job1 and
// job2 but never had any tasks submitted for job1, the properties of job2 are now used to run
// the stage.
checkJobPropertiesAndPriority(taskSets(1), "job2", 1)
complete(taskSets(1), Seq((Success, makeMapStatus("hostA", 1))))
assert(taskSets(2).properties != null)
complete(taskSets(2), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(scheduler.activeJobs.isEmpty)
assertDataStructuresEmpty()
}
/**
* Makes sure that tasks for a stage used by multiple jobs are submitted with the properties of a
* later, active job if they were previously run under a job that is no longer active, even when
* there are fetch failures
*/
test("stage used by two jobs, some fetch failures, and the first job no longer active " +
"(SPARK-6880)") {
val shuffleDep1 = launchJobsThatShareStageAndCancelFirst()
val job2Id = 1 // TaskSet priority for Stages run with "job2" as the ActiveJob
// lets say there is a fetch failure in this task set, which makes us go back and
// run stage 0, attempt 1
complete(taskSets(1), Seq(
(FetchFailed(makeBlockManagerId("hostA"),
shuffleDep1.shuffleId, 0L, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// stage 0, attempt 1 should have the properties of job2
assert(taskSets(2).stageId === 0)
assert(taskSets(2).stageAttemptId === 1)
checkJobPropertiesAndPriority(taskSets(2), "job2", job2Id)
// run the rest of the stages normally, checking that they have the correct properties
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
checkJobPropertiesAndPriority(taskSets(3), "job2", job2Id)
complete(taskSets(3), Seq((Success, makeMapStatus("hostA", 1))))
checkJobPropertiesAndPriority(taskSets(4), "job2", job2Id)
complete(taskSets(4), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(scheduler.activeJobs.isEmpty)
assertDataStructuresEmpty()
}
/**
* In this test, we run a map stage where one of the executors fails but we still receive a
* "zombie" complete message from a task that ran on that executor. We want to make sure the
* stage is resubmitted so that the task that ran on the failed executor is re-executed, and
* that the stage is only marked as finished once that task completes.
*/
test("run trivial shuffle with out-of-band executor failure and retry") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
// Tell the DAGScheduler that hostA was lost.
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// At this point, no more tasks are running for the stage (and the TaskSetManager considers the
// stage complete), but the tasks that ran on HostA need to be re-run, so the DAGScheduler
// should re-submit the stage with one task (the task that originally ran on HostA).
assert(taskSets.size === 2)
assert(taskSets(1).tasks.size === 1)
// Make sure that the stage that was re-submitted was the ShuffleMapStage (not the reduce
// stage, which shouldn't be run until all of the tasks in the ShuffleMapStage complete on
// alive executors).
assert(taskSets(1).tasks(0).isInstanceOf[ShuffleMapTask])
// have hostC complete the resubmitted task
complete(taskSets(1), Seq((Success, makeMapStatus("hostC", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
// Make sure that the reduce stage was now submitted.
assert(taskSets.size === 3)
assert(taskSets(2).tasks(0).isInstanceOf[ResultTask[_, _]])
// Complete the reduce stage.
complete(taskSets(2), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("recursive shuffle failures") {
val shuffleOneRdd = new MyRDD(sc, 2, Nil)
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker)
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// have the first stage complete normally
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// have the second stage complete normally
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostC", 1))))
// fail the third stage because hostA went down
complete(taskSets(2), Seq(
(FetchFailed(makeBlockManagerId("hostA"),
shuffleDepTwo.shuffleId, 0L, 0, 0, "ignored"), null)))
// TODO assert this:
// blockManagerMaster.removeExecutor("exec-hostA")
// have DAGScheduler try again
scheduler.resubmitFailedStages()
complete(taskSets(3), Seq((Success, makeMapStatus("hostA", 2))))
complete(taskSets(4), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(5), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("cached post-shuffle") {
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
cacheLocations(shuffleTwoRdd.id -> 0) = Seq(makeBlockManagerId("hostD"))
cacheLocations(shuffleTwoRdd.id -> 1) = Seq(makeBlockManagerId("hostC"))
// complete stage 0
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// complete stage 1
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// pretend stage 2 failed because hostA went down
complete(taskSets(2), Seq(
(FetchFailed(makeBlockManagerId("hostA"),
shuffleDepTwo.shuffleId, 0L, 0, 0, "ignored"), null)))
// TODO assert this:
// blockManagerMaster.removeExecutor("exec-hostA")
// DAGScheduler should notice the cached copy of the second shuffle and try to get it rerun.
scheduler.resubmitFailedStages()
assertLocations(taskSets(3), Seq(Seq("hostD")))
// allow hostD to recover
complete(taskSets(3), Seq((Success, makeMapStatus("hostD", 1))))
complete(taskSets(4), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("misbehaved accumulator should not crash DAGScheduler and SparkContext") {
val acc = new LongAccumulator {
override def add(v: java.lang.Long): Unit = throw new DAGSchedulerSuiteDummyException
override def add(v: Long): Unit = throw new DAGSchedulerSuiteDummyException
}
sc.register(acc)
// Run this on executors
sc.parallelize(1 to 10, 2).foreach { item => acc.add(1) }
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("misbehaved accumulator should not impact other accumulators") {
val bad = new LongAccumulator {
override def merge(other: AccumulatorV2[java.lang.Long, java.lang.Long]): Unit = {
throw new DAGSchedulerSuiteDummyException
}
}
sc.register(bad, "bad")
val good = sc.longAccumulator("good")
sc.parallelize(1 to 10, 2).foreach { item =>
bad.add(1)
good.add(1)
}
// This is to ensure the `bad` accumulator did fail to update its value
assert(bad.value == 0L)
// Should be able to update the "good" accumulator
assert(good.value == 10L)
}
/**
* The job will be failed on first task throwing an error.
* Any subsequent task WILL throw a legitimate java.lang.UnsupportedOperationException.
* If multiple tasks, there exists a race condition between the SparkDriverExecutionExceptions
* and their differing causes as to which will represent result for job...
*/
test("misbehaved resultHandler should not crash DAGScheduler and SparkContext") {
failAfter(1.minute) { // If DAGScheduler crashes, the following test will hang forever
for (error <- Seq(
new DAGSchedulerSuiteDummyException,
new AssertionError, // E.g., assert(foo == bar) fails
new NotImplementedError // E.g., call a method with `???` implementation.
)) {
val e = intercept[SparkDriverExecutionException] {
// Number of parallelized partitions implies number of tasks of job
val rdd = sc.parallelize(1 to 10, 2)
sc.runJob[Int, Int](
rdd,
(context: TaskContext, iter: Iterator[Int]) => iter.size,
// For a robust test assertion, limit number of job tasks to 1; that is,
// if multiple RDD partitions, use id of any one partition, say, first partition id=0
Seq(0),
(part: Int, result: Int) => throw error)
}
assert(e.getCause eq error)
// Make sure we can still run commands on our SparkContext
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
}
}
test(s"invalid ${SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL} should not crash DAGScheduler") {
sc.setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, "invalid")
try {
intercept[SparkException] {
sc.parallelize(1 to 1, 1).foreach { _ =>
throw new DAGSchedulerSuiteDummyException
}
}
// Verify the above job didn't crash DAGScheduler by running a simple job
assert(sc.parallelize(1 to 10, 2).count() === 10)
} finally {
sc.setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null)
}
}
test("getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)") {
val e1 = intercept[DAGSchedulerSuiteDummyException] {
val rdd = new MyRDD(sc, 2, Nil) {
override def getPartitions: Array[Partition] = {
throw new DAGSchedulerSuiteDummyException
}
}
rdd.reduceByKey(_ + _, 1).count()
}
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)") {
val e1 = intercept[SparkException] {
val rdd = new MyRDD(sc, 2, Nil) {
override def getPreferredLocations(split: Partition): Seq[String] = {
throw new DAGSchedulerSuiteDummyException
}
}
rdd.count()
}
assert(e1.getMessage.contains(classOf[DAGSchedulerSuiteDummyException].getName))
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("accumulator not calculated for resubmitted result stage") {
// just for register
val accum = AccumulatorSuite.createLongAccum("a")
val finalRdd = new MyRDD(sc, 1, Nil)
submit(finalRdd, Array(0))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(accum.value === 1)
assertDataStructuresEmpty()
}
test("accumulator not calculated for resubmitted task in result stage") {
val accum = AccumulatorSuite.createLongAccum("a")
val finalRdd = new MyRDD(sc, 2, Nil)
submit(finalRdd, Array(0, 1))
// finish the first task
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
// verify stage exists
assert(scheduler.stageIdToStage.contains(0))
// finish the first task again (simulate a speculative task or a resubmitted task)
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
// The accumulator should only be updated once.
assert(accum.value === 1)
runEvent(makeCompletionEvent(taskSets(0).tasks(1), Success, 42))
assertDataStructuresEmpty()
}
test("accumulators are updated on exception failures and task killed") {
val acc1 = AccumulatorSuite.createLongAccum("ingenieur")
val acc2 = AccumulatorSuite.createLongAccum("boulanger")
val acc3 = AccumulatorSuite.createLongAccum("agriculteur")
assert(AccumulatorContext.get(acc1.id).isDefined)
assert(AccumulatorContext.get(acc2.id).isDefined)
assert(AccumulatorContext.get(acc3.id).isDefined)
val accUpdate1 = new LongAccumulator
accUpdate1.metadata = acc1.metadata
accUpdate1.setValue(15)
val accUpdate2 = new LongAccumulator
accUpdate2.metadata = acc2.metadata
accUpdate2.setValue(13)
val accUpdate3 = new LongAccumulator
accUpdate3.metadata = acc3.metadata
accUpdate3.setValue(18)
val accumUpdates1 = Seq(accUpdate1, accUpdate2)
val accumInfo1 = accumUpdates1.map(AccumulatorSuite.makeInfo)
val exceptionFailure = new ExceptionFailure(
new SparkException("fondue?"),
accumInfo1).copy(accums = accumUpdates1)
submit(new MyRDD(sc, 1, Nil), Array(0))
runEvent(makeCompletionEvent(taskSets.head.tasks.head, exceptionFailure, "result"))
assert(AccumulatorContext.get(acc1.id).get.value === 15L)
assert(AccumulatorContext.get(acc2.id).get.value === 13L)
val accumUpdates2 = Seq(accUpdate3)
val accumInfo2 = accumUpdates2.map(AccumulatorSuite.makeInfo)
val taskKilled = new TaskKilled( "test", accumInfo2, accums = accumUpdates2)
runEvent(makeCompletionEvent(taskSets.head.tasks.head, taskKilled, "result"))
assert(AccumulatorContext.get(acc3.id).get.value === 18L)
}
test("reduce tasks should be placed locally with map output") {
// Create a shuffleMapRdd with 1 partition
val shuffleMapRdd = new MyRDD(sc, 1, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA")))
// Reducer should run on the same host that map task ran
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(Seq("hostA")))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("reduce task locality preferences should only include machines with largest map outputs") {
val numMapTasks = 4
// Create a shuffleMapRdd with more partitions
val shuffleMapRdd = new MyRDD(sc, numMapTasks, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
val statuses = (1 to numMapTasks).map { i =>
(Success, makeMapStatus("host" + i, 1, (10*i).toByte))
}
complete(taskSets(0), statuses)
// Reducer should prefer the last 3 hosts as they have 20%, 30% and 40% of data
val hosts = (1 to numMapTasks).map(i => "host" + i).reverse.take(numMapTasks - 1)
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(hosts))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("stages with both narrow and shuffle dependencies use narrow ones for locality") {
// Create an RDD that has both a shuffle dependency and a narrow dependency (e.g. for a join)
val rdd1 = new MyRDD(sc, 1, Nil)
val rdd2 = new MyRDD(sc, 1, Nil, locations = Seq(Seq("hostB")))
val shuffleDep = new ShuffleDependency(rdd1, new HashPartitioner(1))
val narrowDep = new OneToOneDependency(rdd2)
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep, narrowDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA")))
// Reducer should run where RDD 2 has preferences, even though it also has a shuffle dep
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(Seq("hostB")))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("Spark exceptions should include call site in stack trace") {
val e = intercept[SparkException] {
sc.parallelize(1 to 10, 2).map { _ => throw new RuntimeException("uh-oh!") }.count()
}
// Does not include message, ONLY stack trace.
val stackTraceString = Utils.exceptionString(e)
// should actually include the RDD operation that invoked the method:
assert(stackTraceString.contains("org.apache.spark.rdd.RDD.count"))
// should include the FunSuite setup:
assert(stackTraceString.contains("org.scalatest.FunSuite"))
}
test("catch errors in event loop") {
// this is a test of our testing framework -- make sure errors in event loop don't get ignored
// just run some bad event that will throw an exception -- we'll give a null TaskEndReason
val rdd1 = new MyRDD(sc, 1, Nil)
submit(rdd1, Array(0))
intercept[Exception] {
complete(taskSets(0), Seq(
(null, makeMapStatus("hostA", 1))))
}
}
test("simple map stage submission") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
// Submit a map stage by itself
submitMapStage(shuffleDep)
assert(results.size === 0) // No results yet
completeShuffleMapStageSuccessfully(0, 0, 1)
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
// Submit a reduce job that depends on this map stage; it should directly do the reduce
submit(reduceRdd, Array(0))
completeNextResultStageWithSuccess(2, 0)
assert(results === Map(0 -> 42))
results.clear()
assertDataStructuresEmpty()
// Check that if we submit the map stage again, no tasks run
submitMapStage(shuffleDep)
assert(results.size === 1)
assertDataStructuresEmpty()
}
test("map stage submission with reduce stage also depending on the data") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
// Submit the map stage by itself
submitMapStage(shuffleDep)
// Submit a reduce job that depends on this map stage
submit(reduceRdd, Array(0))
// Complete tasks for the map stage
completeShuffleMapStageSuccessfully(0, 0, 1)
assert(results.size === 1)
results.clear()
// Complete tasks for the reduce stage
completeNextResultStageWithSuccess(1, 0)
assert(results === Map(0 -> 42))
results.clear()
assertDataStructuresEmpty()
// Check that if we submit the map stage again, no tasks run
submitMapStage(shuffleDep)
assert(results.size === 1)
assertDataStructuresEmpty()
}
test("map stage submission with fetch failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
// Submit a map stage by itself
submitMapStage(shuffleDep)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
// Submit a reduce job that depends on this map stage, but where one reduce will fail a fetch
submit(reduceRdd, Array(0, 1))
complete(taskSets(1), Seq(
(Success, 42),
(FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"), null)))
// Ask the scheduler to try it again; TaskSet 2 will rerun the map task that we couldn't fetch
// from, then TaskSet 3 will run the reduce stage
scheduler.resubmitFailedStages()
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
complete(taskSets(3), Seq((Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
results.clear()
assertDataStructuresEmpty()
// Run another reduce job without a failure; this should just work
submit(reduceRdd, Array(0, 1))
complete(taskSets(4), Seq(
(Success, 44),
(Success, 45)))
assert(results === Map(0 -> 44, 1 -> 45))
results.clear()
assertDataStructuresEmpty()
// Resubmit the map stage; this should also just work
submitMapStage(shuffleDep)
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
}
/**
* In this test, we have three RDDs with shuffle dependencies, and we submit map stage jobs
* that are waiting on each one, as well as a reduce job on the last one. We test that all of
* these jobs complete even if there are some fetch failures in both shuffles.
*/
test("map stage submission with multiple shared stages and failures") {
val rdd1 = new MyRDD(sc, 2, Nil)
val dep1 = new ShuffleDependency(rdd1, new HashPartitioner(2))
val rdd2 = new MyRDD(sc, 2, List(dep1), tracker = mapOutputTracker)
val dep2 = new ShuffleDependency(rdd2, new HashPartitioner(2))
val rdd3 = new MyRDD(sc, 2, List(dep2), tracker = mapOutputTracker)
val listener1 = new SimpleListener
val listener2 = new SimpleListener
val listener3 = new SimpleListener
submitMapStage(dep1, listener1)
submitMapStage(dep2, listener2)
submit(rdd3, Array(0, 1), listener = listener3)
// Complete the first stage
assert(taskSets(0).stageId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", rdd1.partitions.length)),
(Success, makeMapStatus("hostB", rdd1.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
assert(listener1.results.size === 1)
// When attempting the second stage, show a fetch failure
assert(taskSets(1).stageId === 1)
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", rdd2.partitions.length)),
(FetchFailed(makeBlockManagerId("hostA"), dep1.shuffleId, 0L, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
assert(listener2.results.size === 0) // Second stage listener should not have a result yet
// Stage 0 should now be running as task set 2; make its task succeed
assert(taskSets(2).stageId === 0)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
assert(listener2.results.size === 0) // Second stage listener should still not have a result
// Stage 1 should now be running as task set 3; make its first task succeed
assert(taskSets(3).stageId === 1)
complete(taskSets(3), Seq(
(Success, makeMapStatus("hostB", rdd2.partitions.length)),
(Success, makeMapStatus("hostD", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep2.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostB"), makeBlockManagerId("hostD")))
assert(listener2.results.size === 1)
// Finally, the reduce job should be running as task set 4; make it see a fetch failure,
// then make it run again and succeed
assert(taskSets(4).stageId === 2)
complete(taskSets(4), Seq(
(Success, 52),
(FetchFailed(makeBlockManagerId("hostD"), dep2.shuffleId, 0L, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// TaskSet 5 will rerun stage 1's lost task, then TaskSet 6 will rerun stage 2
assert(taskSets(5).stageId === 1)
complete(taskSets(5), Seq(
(Success, makeMapStatus("hostE", rdd2.partitions.length))))
complete(taskSets(6), Seq(
(Success, 53)))
assert(listener3.results === Map(0 -> 52, 1 -> 53))
assertDataStructuresEmpty()
}
test("Trigger mapstage's job listener in submitMissingTasks") {
val rdd1 = new MyRDD(sc, 2, Nil)
val dep1 = new ShuffleDependency(rdd1, new HashPartitioner(2))
val rdd2 = new MyRDD(sc, 2, List(dep1), tracker = mapOutputTracker)
val dep2 = new ShuffleDependency(rdd2, new HashPartitioner(2))
val listener1 = new SimpleListener
val listener2 = new SimpleListener
submitMapStage(dep1, listener1)
submitMapStage(dep2, listener2)
// Complete the stage0.
assert(taskSets(0).stageId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", rdd1.partitions.length)),
(Success, makeMapStatus("hostB", rdd1.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
assert(listener1.results.size === 1)
// When attempting stage1, trigger a fetch failure.
assert(taskSets(1).stageId === 1)
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length)),
(FetchFailed(makeBlockManagerId("hostA"), dep1.shuffleId, 0L, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// Stage1 listener should not have a result yet
assert(listener2.results.size === 0)
// Speculative task succeeded in stage1.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
Success,
makeMapStatus("hostD", rdd2.partitions.length)))
// stage1 listener still should not have a result, though there's no missing partitions
// in it. Because stage1 has been failed and is not inside `runningStages` at this moment.
assert(listener2.results.size === 0)
// Stage0 should now be running as task set 2; make its task succeed
assert(taskSets(2).stageId === 0)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
Set(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
// After stage0 is finished, stage1 will be submitted and found there is no missing
// partitions in it. Then listener got triggered.
assert(listener2.results.size === 1)
assertDataStructuresEmpty()
}
/**
* In this test, we run a map stage where one of the executors fails but we still receive a
* "zombie" complete message from that executor. We want to make sure the stage is not reported
* as done until all tasks have completed.
*
* Most of the functionality in this test is tested in "run trivial shuffle with out-of-band
* executor failure and retry". However, that test uses ShuffleMapStages that are followed by
* a ResultStage, whereas in this test, the ShuffleMapStage is tested in isolation, without a
* ResultStage after it.
*/
test("map stage submission with executor failure late map task completions") {
val shuffleMapRdd = new MyRDD(sc, 3, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
submitMapStage(shuffleDep)
val oldTaskSet = taskSets(0)
runEvent(makeCompletionEvent(oldTaskSet.tasks(0), Success, makeMapStatus("hostA", 2)))
assert(results.size === 0) // Map stage job should not be complete yet
// Pretend host A was lost. This will cause the TaskSetManager to resubmit task 0, because it
// completed on hostA.
val oldEpoch = mapOutputTracker.getEpoch
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
val newEpoch = mapOutputTracker.getEpoch
assert(newEpoch > oldEpoch)
// Suppose we also get a completed event from task 1 on the same host; this should be ignored
runEvent(makeCompletionEvent(oldTaskSet.tasks(1), Success, makeMapStatus("hostA", 2)))
assert(results.size === 0) // Map stage job should not be complete yet
// A completion from another task should work because it's a non-failed host
runEvent(makeCompletionEvent(oldTaskSet.tasks(2), Success, makeMapStatus("hostB", 2)))
// At this point, no more tasks are running for the stage (and the TaskSetManager considers
// the stage complete), but the task that ran on hostA needs to be re-run, so the map stage
// shouldn't be marked as complete, and the DAGScheduler should re-submit the stage.
assert(results.size === 0)
assert(taskSets.size === 2)
// Now complete tasks in the second task set
val newTaskSet = taskSets(1)
// 2 tasks should have been re-submitted, for tasks 0 and 1 (which ran on hostA).
assert(newTaskSet.tasks.size === 2)
// Complete task 0 from the original task set (i.e., not hte one that's currently active).
// This should still be counted towards the job being complete (but there's still one
// outstanding task).
runEvent(makeCompletionEvent(newTaskSet.tasks(0), Success, makeMapStatus("hostB", 2)))
assert(results.size === 0)
// Complete the final task, from the currently active task set. There's still one
// running task, task 0 in the currently active stage attempt, but the success of task 0 means
// the DAGScheduler can mark the stage as finished.
runEvent(makeCompletionEvent(newTaskSet.tasks(1), Success, makeMapStatus("hostB", 2)))
assert(results.size === 1) // Map stage job should now finally be complete
assertDataStructuresEmpty()
// Also test that a reduce stage using this shuffled data can immediately run
val reduceRDD = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
results.clear()
submit(reduceRDD, Array(0, 1))
complete(taskSets(2), Seq((Success, 42), (Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
results.clear()
assertDataStructuresEmpty()
}
/**
* Checks the DAGScheduler's internal logic for traversing an RDD DAG by making sure that
* getShuffleDependencies correctly returns the direct shuffle dependencies of a particular
* RDD. The test creates the following RDD graph (where n denotes a narrow dependency and s
* denotes a shuffle dependency):
*
* A <------------s---------,
* \\
* B <--s-- C <--s-- D <--n------ E
*
* Here, the direct shuffle dependency of C is just the shuffle dependency on B. The direct
* shuffle dependencies of E are the shuffle dependency on A and the shuffle dependency on C.
*/
test("getShuffleDependencies correctly returns only direct shuffle parents") {
val rddA = new MyRDD(sc, 2, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(1))
val rddB = new MyRDD(sc, 2, Nil)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(1))
val rddC = new MyRDD(sc, 1, List(shuffleDepB))
val shuffleDepC = new ShuffleDependency(rddC, new HashPartitioner(1))
val rddD = new MyRDD(sc, 1, List(shuffleDepC))
val narrowDepD = new OneToOneDependency(rddD)
val rddE = new MyRDD(sc, 1, List(shuffleDepA, narrowDepD), tracker = mapOutputTracker)
assert(scheduler.getShuffleDependencies(rddA) === Set())
assert(scheduler.getShuffleDependencies(rddB) === Set())
assert(scheduler.getShuffleDependencies(rddC) === Set(shuffleDepB))
assert(scheduler.getShuffleDependencies(rddD) === Set(shuffleDepC))
assert(scheduler.getShuffleDependencies(rddE) === Set(shuffleDepA, shuffleDepC))
}
test("SPARK-17644: After one stage is aborted for too many failed attempts, subsequent stages" +
"still behave correctly on fetch failures") {
// Runs a job that always encounters a fetch failure, so should eventually be aborted
def runJobWithPersistentFetchFailure: Unit = {
val rdd1 = sc.makeRDD(Array(1, 2, 3, 4), 2).map(x => (x, 1)).groupByKey()
val shuffleHandle =
rdd1.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]].shuffleHandle
rdd1.map {
case (x, _) if (x == 1) =>
throw new FetchFailedException(
BlockManagerId("1", "1", 1), shuffleHandle.shuffleId, 0L, 0, 0, "test")
case (x, _) => x
}.count()
}
// Runs a job that encounters a single fetch failure but succeeds on the second attempt
def runJobWithTemporaryFetchFailure: Unit = {
val rdd1 = sc.makeRDD(Array(1, 2, 3, 4), 2).map(x => (x, 1)).groupByKey()
val shuffleHandle =
rdd1.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]].shuffleHandle
rdd1.map {
case (x, _) if (x == 1) && FailThisAttempt._fail.getAndSet(false) =>
throw new FetchFailedException(
BlockManagerId("1", "1", 1), shuffleHandle.shuffleId, 0L, 0, 0, "test")
}
}
failAfter(10.seconds) {
val e = intercept[SparkException] {
runJobWithPersistentFetchFailure
}
assert(e.getMessage.contains("org.apache.spark.shuffle.FetchFailedException"))
}
// Run a second job that will fail due to a fetch failure.
// This job will hang without the fix for SPARK-17644.
failAfter(10.seconds) {
val e = intercept[SparkException] {
runJobWithPersistentFetchFailure
}
assert(e.getMessage.contains("org.apache.spark.shuffle.FetchFailedException"))
}
failAfter(10.seconds) {
try {
runJobWithTemporaryFetchFailure
} catch {
case e: Throwable => fail("A job with one fetch failure should eventually succeed")
}
}
}
test("[SPARK-19263] DAGScheduler should not submit multiple active tasksets," +
" even with late completions from earlier stage attempts") {
// Create 3 RDDs with shuffle dependencies on each other: rddA <--- rddB <--- rddC
val rddA = new MyRDD(sc, 2, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(2))
val shuffleIdA = shuffleDepA.shuffleId
val rddB = new MyRDD(sc, 2, List(shuffleDepA), tracker = mapOutputTracker)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(2))
val rddC = new MyRDD(sc, 2, List(shuffleDepB), tracker = mapOutputTracker)
submit(rddC, Array(0, 1))
// Complete both tasks in rddA.
assert(taskSets(0).stageId === 0 && taskSets(0).stageAttemptId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostA", 2))))
// Fetch failed for task(stageId=1, stageAttemptId=0, partitionId=0) running on hostA
// and task(stageId=1, stageAttemptId=0, partitionId=1) is still running.
assert(taskSets(1).stageId === 1 && taskSets(1).stageAttemptId === 0)
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleIdA, 0L, 0, 0,
"Fetch failure of task: stageId=1, stageAttempt=0, partitionId=0"),
result = null))
// Both original tasks in rddA should be marked as failed, because they ran on the
// failed hostA, so both should be resubmitted. Complete them on hostB successfully.
scheduler.resubmitFailedStages()
assert(taskSets(2).stageId === 0 && taskSets(2).stageAttemptId === 1
&& taskSets(2).tasks.size === 2)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostB", 2)),
(Success, makeMapStatus("hostB", 2))))
// Complete task(stageId=1, stageAttemptId=0, partitionId=1) running on failed hostA
// successfully. The success should be ignored because the task started before the
// executor failed, so the output may have been lost.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1), Success, makeMapStatus("hostA", 2)))
// task(stageId=1, stageAttemptId=1, partitionId=1) should be marked completed when
// task(stageId=1, stageAttemptId=0, partitionId=1) finished
// ideally we would verify that but no way to get into task scheduler to verify
// Both tasks in rddB should be resubmitted, because none of them has succeeded truly.
// Complete the task(stageId=1, stageAttemptId=1, partitionId=0) successfully.
// Task(stageId=1, stageAttemptId=1, partitionId=1) of this new active stage attempt
// is still running.
assert(taskSets(3).stageId === 1 && taskSets(3).stageAttemptId === 1
&& taskSets(3).tasks.size === 2)
runEvent(makeCompletionEvent(
taskSets(3).tasks(0), Success, makeMapStatus("hostB", 2)))
// At this point there should be no active task set for stageId=1 and we need
// to resubmit because the output from (stageId=1, stageAttemptId=0, partitionId=1)
// was ignored due to executor failure
assert(taskSets.size === 5)
assert(taskSets(4).stageId === 1 && taskSets(4).stageAttemptId === 2
&& taskSets(4).tasks.size === 1)
// Complete task(stageId=1, stageAttempt=2, partitionId=1) successfully.
runEvent(makeCompletionEvent(
taskSets(4).tasks(0), Success, makeMapStatus("hostB", 2)))
// Now the ResultStage should be submitted, because all of the tasks of rddB have
// completed successfully on alive executors.
assert(taskSets.size === 6 && taskSets(5).tasks(0).isInstanceOf[ResultTask[_, _]])
complete(taskSets(5), Seq(
(Success, 1),
(Success, 1)))
}
test("task end event should have updated accumulators (SPARK-20342)") {
val tasks = 10
val accumId = new AtomicLong()
val foundCount = new AtomicLong()
val listener = new SparkListener() {
override def onTaskEnd(event: SparkListenerTaskEnd): Unit = {
event.taskInfo.accumulables.find(_.id == accumId.get).foreach { _ =>
foundCount.incrementAndGet()
}
}
}
sc.addSparkListener(listener)
// Try a few times in a loop to make sure. This is not guaranteed to fail when the bug exists,
// but it should at least make the test flaky. If the bug is fixed, this should always pass.
(1 to 10).foreach { i =>
foundCount.set(0L)
val accum = sc.longAccumulator(s"accum$i")
accumId.set(accum.id)
sc.parallelize(1 to tasks, tasks).foreach { _ =>
accum.add(1L)
}
sc.listenerBus.waitUntilEmpty()
assert(foundCount.get() === tasks)
}
}
test("Barrier task failures from the same stage attempt don't trigger multiple stage retries") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
val mapStageId = 0
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == mapStageId)
}
// The map stage should have been submitted.
assert(countSubmittedMapStageAttempts() === 1)
// The first map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(0),
TaskKilled("test"),
null))
assert(sparkListener.failedStages === Seq(0))
// The second map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
TaskKilled("test"),
null))
// Trigger resubmission of the failed map stage.
runEvent(ResubmitFailedStages)
// Another attempt for the map stage should have been submitted, resulting in 2 total attempts.
assert(countSubmittedMapStageAttempts() === 2)
}
test("Barrier task failures from a previous stage attempt don't trigger stage retry") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
val mapStageId = 0
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == mapStageId)
}
// The map stage should have been submitted.
assert(countSubmittedMapStageAttempts() === 1)
// The first map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(0),
TaskKilled("test"),
null))
assert(sparkListener.failedStages === Seq(0))
// Trigger resubmission of the failed map stage.
runEvent(ResubmitFailedStages)
// Another attempt for the map stage should have been submitted, resulting in 2 total attempts.
assert(countSubmittedMapStageAttempts() === 2)
// The second map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
TaskKilled("test"),
null))
// The second map task failure doesn't trigger stage retry.
runEvent(ResubmitFailedStages)
assert(countSubmittedMapStageAttempts() === 2)
}
private def constructIndeterminateStageFetchFailed(): (Int, Int) = {
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil, indeterminate = true)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleId1 = shuffleDep1.shuffleId
val shuffleMapRdd2 = new MyRDD(sc, 2, List(shuffleDep1), tracker = mapOutputTracker)
val shuffleDep2 = new ShuffleDependency(shuffleMapRdd2, new HashPartitioner(2))
val shuffleId2 = shuffleDep2.shuffleId
val finalRdd = new MyRDD(sc, 2, List(shuffleDep2), tracker = mapOutputTracker)
submit(finalRdd, Array(0, 1))
// Finish the first shuffle map stage.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId1) === Some(Seq.empty))
// Finish the second shuffle map stage.
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostC", 2)),
(Success, makeMapStatus("hostD", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId2) === Some(Seq.empty))
// The first task of the final stage failed with fetch failure
runEvent(makeCompletionEvent(
taskSets(2).tasks(0),
FetchFailed(makeBlockManagerId("hostC"), shuffleId2, 0L, 0, 0, "ignored"),
null))
(shuffleId1, shuffleId2)
}
test("SPARK-25341: abort stage while using old fetch protocol") {
// reset the test context with using old fetch protocol
afterEach()
val conf = new SparkConf()
conf.set(config.SHUFFLE_USE_OLD_FETCH_PROTOCOL.key, "true")
init(conf)
// Construct the scenario of indeterminate stage fetch failed.
constructIndeterminateStageFetchFailed()
// The job should fail because Spark can't rollback the shuffle map stage while
// using old protocol.
assert(failure != null && failure.getMessage.contains(
"Spark can only do this while using the new shuffle block fetching protocol"))
}
test("SPARK-25341: retry all the succeeding stages when the map stage is indeterminate") {
val (shuffleId1, shuffleId2) = constructIndeterminateStageFetchFailed()
// Check status for all failedStages
val failedStages = scheduler.failedStages.toSeq
assert(failedStages.map(_.id) == Seq(1, 2))
// Shuffle blocks of "hostC" is lost, so first task of the `shuffleMapRdd2` needs to retry.
assert(failedStages.collect {
case stage: ShuffleMapStage if stage.shuffleDep.shuffleId == shuffleId2 => stage
}.head.findMissingPartitions() == Seq(0))
// The result stage is still waiting for its 2 tasks to complete
assert(failedStages.collect {
case stage: ResultStage => stage
}.head.findMissingPartitions() == Seq(0, 1))
scheduler.resubmitFailedStages()
// The first task of the `shuffleMapRdd2` failed with fetch failure
runEvent(makeCompletionEvent(
taskSets(3).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId1, 0L, 0, 0, "ignored"),
null))
val newFailedStages = scheduler.failedStages.toSeq
assert(newFailedStages.map(_.id) == Seq(0, 1))
scheduler.resubmitFailedStages()
// First shuffle map stage resubmitted and reran all tasks.
assert(taskSets(4).stageId == 0)
assert(taskSets(4).stageAttemptId == 1)
assert(taskSets(4).tasks.length == 2)
// Finish all stage.
complete(taskSets(4), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId1) === Some(Seq.empty))
complete(taskSets(5), Seq(
(Success, makeMapStatus("hostC", 2)),
(Success, makeMapStatus("hostD", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId2) === Some(Seq.empty))
complete(taskSets(6), Seq((Success, 11), (Success, 12)))
// Job successful ended.
assert(results === Map(0 -> 11, 1 -> 12))
results.clear()
assertDataStructuresEmpty()
}
test("SPARK-25341: continuous indeterminate stage roll back") {
// shuffleMapRdd1/2/3 are all indeterminate.
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil, indeterminate = true)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleId1 = shuffleDep1.shuffleId
val shuffleMapRdd2 = new MyRDD(
sc, 2, List(shuffleDep1), tracker = mapOutputTracker, indeterminate = true)
val shuffleDep2 = new ShuffleDependency(shuffleMapRdd2, new HashPartitioner(2))
val shuffleId2 = shuffleDep2.shuffleId
val shuffleMapRdd3 = new MyRDD(
sc, 2, List(shuffleDep2), tracker = mapOutputTracker, indeterminate = true)
val shuffleDep3 = new ShuffleDependency(shuffleMapRdd3, new HashPartitioner(2))
val shuffleId3 = shuffleDep3.shuffleId
val finalRdd = new MyRDD(sc, 2, List(shuffleDep3), tracker = mapOutputTracker)
submit(finalRdd, Array(0, 1), properties = new Properties())
// Finish the first 2 shuffle map stages.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId1) === Some(Seq.empty))
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostB", 2)),
(Success, makeMapStatus("hostD", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId2) === Some(Seq.empty))
// Executor lost on hostB, both of stage 0 and 1 should be reran.
runEvent(makeCompletionEvent(
taskSets(2).tasks(0),
FetchFailed(makeBlockManagerId("hostB"), shuffleId2, 0L, 0, 0, "ignored"),
null))
mapOutputTracker.removeOutputsOnHost("hostB")
assert(scheduler.failedStages.toSeq.map(_.id) == Seq(1, 2))
scheduler.resubmitFailedStages()
def checkAndCompleteRetryStage(
taskSetIndex: Int,
stageId: Int,
shuffleId: Int): Unit = {
assert(taskSets(taskSetIndex).stageId == stageId)
assert(taskSets(taskSetIndex).stageAttemptId == 1)
assert(taskSets(taskSetIndex).tasks.length == 2)
complete(taskSets(taskSetIndex), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
}
// Check all indeterminate stage roll back.
checkAndCompleteRetryStage(3, 0, shuffleId1)
checkAndCompleteRetryStage(4, 1, shuffleId2)
checkAndCompleteRetryStage(5, 2, shuffleId3)
// Result stage success, all job ended.
complete(taskSets(6), Seq((Success, 11), (Success, 12)))
assert(results === Map(0 -> 11, 1 -> 12))
results.clear()
assertDataStructuresEmpty()
}
test("SPARK-29042: Sampled RDD with unordered input should be indeterminate") {
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil, indeterminate = false)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleMapRdd2 = new MyRDD(sc, 2, List(shuffleDep1), tracker = mapOutputTracker)
assert(shuffleMapRdd2.outputDeterministicLevel == DeterministicLevel.UNORDERED)
val sampledRdd = shuffleMapRdd2.sample(true, 0.3, 1000L)
assert(sampledRdd.outputDeterministicLevel == DeterministicLevel.INDETERMINATE)
}
private def assertResultStageFailToRollback(mapRdd: MyRDD): Unit = {
val shuffleDep = new ShuffleDependency(mapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val finalRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(finalRdd, Array(0, 1))
completeShuffleMapStageSuccessfully(taskSets.length - 1, 0, numShufflePartitions = 2)
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// Finish the first task of the result stage
runEvent(makeCompletionEvent(
taskSets.last.tasks(0), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
// Fail the second task with FetchFailed.
runEvent(makeCompletionEvent(
taskSets.last.tasks(1),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
// The job should fail because Spark can't rollback the result stage.
assert(failure != null && failure.getMessage.contains("Spark cannot rollback"))
}
test("SPARK-23207: cannot rollback a result stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil, indeterminate = true)
assertResultStageFailToRollback(shuffleMapRdd)
}
test("SPARK-23207: local checkpoint fail to rollback (checkpointed before)") {
val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true)
shuffleMapRdd.localCheckpoint()
shuffleMapRdd.doCheckpoint()
assertResultStageFailToRollback(shuffleMapRdd)
}
test("SPARK-23207: local checkpoint fail to rollback (checkpointing now)") {
val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true)
shuffleMapRdd.localCheckpoint()
assertResultStageFailToRollback(shuffleMapRdd)
}
private def assertResultStageNotRollbacked(mapRdd: MyRDD): Unit = {
val shuffleDep = new ShuffleDependency(mapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val finalRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(finalRdd, Array(0, 1))
completeShuffleMapStageSuccessfully(taskSets.length - 1, 0, numShufflePartitions = 2)
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// Finish the first task of the result stage
runEvent(makeCompletionEvent(
taskSets.last.tasks(0), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
// Fail the second task with FetchFailed.
runEvent(makeCompletionEvent(
taskSets.last.tasks(1),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
assert(failure == null, "job should not fail")
val failedStages = scheduler.failedStages.toSeq
assert(failedStages.length == 2)
// Shuffle blocks of "hostA" is lost, so first task of the `shuffleMapRdd2` needs to retry.
assert(failedStages.collect {
case stage: ShuffleMapStage if stage.shuffleDep.shuffleId == shuffleId => stage
}.head.findMissingPartitions() == Seq(0))
// The first task of result stage remains completed.
assert(failedStages.collect {
case stage: ResultStage => stage
}.head.findMissingPartitions() == Seq(1))
}
test("SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)") {
withTempDir { dir =>
sc.setCheckpointDir(dir.getCanonicalPath)
val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true)
shuffleMapRdd.checkpoint()
shuffleMapRdd.doCheckpoint()
assertResultStageNotRollbacked(shuffleMapRdd)
}
}
test("SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)") {
withTempDir { dir =>
sc.setCheckpointDir(dir.getCanonicalPath)
val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true)
shuffleMapRdd.checkpoint()
assertResultStageFailToRollback(shuffleMapRdd)
}
}
test("SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete") {
val latch = new CountDownLatch(1)
val jobListener = new SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
latch.countDown()
}
}
sc.addSparkListener(jobListener)
sc.emptyRDD[Int].countApprox(10000).getFinalValue()
assert(latch.await(10, TimeUnit.SECONDS))
}
test("Completions in zombie tasksets update status of non-zombie taskset") {
val parts = 4
val shuffleMapRdd = new MyRDD(sc, parts, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(parts))
val reduceRdd = new MyRDD(sc, parts, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, (0 until parts).toArray)
assert(taskSets.length == 1)
// Finish the first task of the shuffle map stage.
runEvent(makeCompletionEvent(
taskSets(0).tasks(0), Success, makeMapStatus("hostA", 4),
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
// The second task of the shuffle map stage failed with FetchFailed.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleDep.shuffleId, 0L, 0, 0, "ignored"),
null))
scheduler.resubmitFailedStages()
assert(taskSets.length == 2)
// The first partition has completed already, so the new attempt only need to run 3 tasks.
assert(taskSets(1).tasks.length == 3)
// Finish the first task of the second attempt of the shuffle map stage.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0), Success, makeMapStatus("hostA", 4),
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
// Finish the third task of the first attempt of the shuffle map stage.
runEvent(makeCompletionEvent(
taskSets(0).tasks(2), Success, makeMapStatus("hostA", 4),
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
assert(tasksMarkedAsCompleted.length == 1)
assert(tasksMarkedAsCompleted.head.partitionId == 2)
// Finish the forth task of the first attempt of the shuffle map stage.
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, makeMapStatus("hostA", 4),
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
assert(tasksMarkedAsCompleted.length == 2)
assert(tasksMarkedAsCompleted.last.partitionId == 3)
// Now the shuffle map stage is completed, and the next stage is submitted.
assert(taskSets.length == 3)
// Finish
complete(taskSets(2), Seq((Success, 42), (Success, 42), (Success, 42), (Success, 42)))
assertDataStructuresEmpty()
}
/**
* Assert that the supplied TaskSet has exactly the given hosts as its preferred locations.
* Note that this checks only the host and not the executor ID.
*/
private def assertLocations(taskSet: TaskSet, hosts: Seq[Seq[String]]): Unit = {
assert(hosts.size === taskSet.tasks.size)
for ((taskLocs, expectedLocs) <- taskSet.tasks.map(_.preferredLocations).zip(hosts)) {
assert(taskLocs.map(_.host).toSet === expectedLocs.toSet)
}
}
private def assertDataStructuresEmpty(): Unit = {
assert(scheduler.activeJobs.isEmpty)
assert(scheduler.failedStages.isEmpty)
assert(scheduler.jobIdToActiveJob.isEmpty)
assert(scheduler.jobIdToStageIds.isEmpty)
assert(scheduler.stageIdToStage.isEmpty)
assert(scheduler.runningStages.isEmpty)
assert(scheduler.shuffleIdToMapStage.isEmpty)
assert(scheduler.waitingStages.isEmpty)
assert(scheduler.outputCommitCoordinator.isEmpty)
}
// Nothing in this test should break if the task info's fields are null, but
// OutputCommitCoordinator requires the task info itself to not be null.
private def createFakeTaskInfo(): TaskInfo = {
val info = new TaskInfo(0, 0, 0, 0L, "", "", TaskLocality.ANY, false)
info.finishTime = 1
info
}
private def createFakeTaskInfoWithId(taskId: Long): TaskInfo = {
val info = new TaskInfo(taskId, 0, 0, 0L, "", "", TaskLocality.ANY, false)
info.finishTime = 1
info
}
private def makeCompletionEvent(
task: Task[_],
reason: TaskEndReason,
result: Any,
extraAccumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty,
metricPeaks: Array[Long] = Array.empty,
taskInfo: TaskInfo = createFakeTaskInfo()): CompletionEvent = {
val accumUpdates = reason match {
case Success => task.metrics.accumulators()
case ef: ExceptionFailure => ef.accums
case tk: TaskKilled => tk.accums
case _ => Seq.empty
}
CompletionEvent(task, reason, result, accumUpdates ++ extraAccumUpdates, metricPeaks, taskInfo)
}
}
object DAGSchedulerSuite {
def makeMapStatus(host: String, reduces: Int, sizes: Byte = 2, mapTaskId: Long = -1): MapStatus =
MapStatus(makeBlockManagerId(host), Array.fill[Long](reduces)(sizes), mapTaskId)
def makeBlockManagerId(host: String): BlockManagerId =
BlockManagerId("exec-" + host, host, 12345)
}
object FailThisAttempt {
val _fail = new AtomicBoolean(true)
}
| caneGuy/spark | core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala | Scala | apache-2.0 | 132,609 |
package com.github.luzhuomi.regex.deriv
import com.github.luzhuomi.scalazparsec.NonBacktracking._
import com.github.luzhuomi.regex.pderiv.{ RE => PD, Common => PC }
import com.github.luzhuomi.regex.deriv.{ RE => D, Common => C }
import com.github.luzhuomi.regex.pderiv.ExtPattern._
import com.github.luzhuomi.regex.pderiv.IntPattern._
import com.github.luzhuomi.regex.pderiv.Parser._
import com.github.luzhuomi.regex.pderiv.Translate._
object Parse {
def coerce(r:PD.RE):D.RE = r match
{
case PD.Phi => D.Phi
case PD.Empty => D.Eps
case PD.L(c) => D.L(c)
case PD.Seq(r1,r2) => D.Seq(coerce(r1),coerce(r2))
case PD.Choice(r1,r2,gf) => D.Choice(List(coerce(r1),coerce(r2)),coerce(gf))
case PD.Star(r,gf) => D.Star(coerce(r),coerce(gf))
case PD.Any => D.Any
case PD.Not(cs) => D.Not(cs.toSet)
}
def coerce(gf:PC.GFlag):C.GFlag = gf match
{
case PC.Greedy => C.Greedy
case PC.NotGreedy => C.NotGreedy
}
def parse(regex:String):Option[D.RE] =
{
parseEPat(regex) match
{
case Consumed(Some((ep,Nil))) =>
{
val ip = translate(ep)
val r = strip(ip)
Some(coerce(r))
}
case _ => None // compilation fail
}
}
}
| luzhuomi/scala-deriv | src/main/scala/com/github/luzhuomi/regex/deriv/Parse.scala | Scala | apache-2.0 | 1,248 |
package org.workcraft.gui
import javax.swing.JFileChooser
import org.workcraft.services.ModelServiceProvider
import org.workcraft.services.GlobalServiceProvider
import org.workcraft.services.GlobalServiceManager
import org.workcraft.services.ExporterService
import javax.swing.JOptionPane
import java.awt.Window
import javax.swing.filechooser.FileFilter
import java.io.File
import org.workcraft.services.DefaultFormatService
import org.workcraft.services.Format
import org.workcraft.services.ExportJob
import java.io.FileOutputStream
import org.workcraft.scala.effects.IO
import org.workcraft.scala.effects.IO._
import scalaz.Scalaz._
import org.workcraft.services.ExportError
object SaveDialog {
// Save }
// Save as... } use default format
//
// Export... } choose format
def partitionEither[A, B](list: List[Either[A, B]]) =
list.foldRight((List[A](), List[B]()))((item, lists) => item match {
case Left(left) => (left :: lists._1, lists._2)
case Right(right) => (lists._1, right :: lists._2)
})
def chooseFile(currentFile: Option[File], parentWindow: Window, format: Format): IO[Option[File]] = ioPure.pure {
val fc = new JFileChooser()
fc.setDialogType(JFileChooser.SAVE_DIALOG)
fc.setFileFilter(new FileFilter {
def accept(file: File) = file.isDirectory || file.getName.endsWith(format.extension)
def getDescription = format.description + "(" + format.extension + ")"
})
fc.setAcceptAllFileFilterUsed(false)
currentFile.foreach(f => fc.setCurrentDirectory(f.getParentFile))
def choose: Option[File] = if (fc.showSaveDialog(parentWindow) == JFileChooser.APPROVE_OPTION) {
var path = fc.getSelectedFile().getPath()
if (!path.endsWith(format.extension))
path += format.extension
val f = new File(path)
if (!f.exists())
Some(f)
else if (JOptionPane.showConfirmDialog(parentWindow, "The file \"" + f.getName() + "\" already exists. Do you want to overwrite it?", "Confirm",
JOptionPane.YES_NO_OPTION) == JOptionPane.YES_OPTION)
Some(f)
else
choose
} else
None
choose
}
def export(parentWindow: Window, model: ModelServiceProvider, format: Format, exporter: ExportJob): IO[Option[IO[Option[ExportError]]]] = chooseFile (None, parentWindow, format).map(_.map(exporter.job(_)))
def saveAs(parentWindow: Window, model: ModelServiceProvider, globalServices: GlobalServiceManager): IO[Option[(File, IO[Option[ExportError]])]] = model.implementation(DefaultFormatService) match {
case None => ioPure.pure {
JOptionPane.showMessageDialog(parentWindow, "Current model does not define a default file format.\nTry using export and choosing a specific format instead.", "Error", JOptionPane.ERROR_MESSAGE)
None
}
case Some(format) => {
val exporters = globalServices.implementations(ExporterService).filter(_.targetFormat == format)
val (unapplicable, applicable) = partitionEither(exporters.map(_.export(model)))
if (applicable.isEmpty) {
val explanation = if (exporters.isEmpty)
"Because no export plug-ins are available for this format."
else
"Because:\n" + unapplicable.map("- " + _.toString).reduceRight(_ + "\n" + _)
ioPure.pure {
JOptionPane.showMessageDialog(parentWindow,
"Workcraft was unable to save this model in its default format:\n" + format.description + " (" + format.extension + ")\n" + explanation, "Error", JOptionPane.ERROR_MESSAGE)
None
}
} else
// TODO: handle more than one exporter
chooseFile(None, parentWindow, format).map(_.map( f => (f, applicable.head.job(f))))
}
}
}
/*
String path;
try {
File destination = new File(path);
Workspace ws = framework.getWorkspace();
final Path<String> wsFrom = we.getWorkspacePath();
Path<String> wsTo = ws.getWorkspacePath(destination);
if(wsTo == null)
wsTo = ws.tempMountExternalFile(destination);
ws.moved(wsFrom, wsTo);
if (we.getModelEntry() != null)
framework.save(we.getModelEntry(), we.getFile().getPath());
else
throw new RuntimeException ("Cannot save workspace entry - it does not have an associated Workcraft model.");
lastSavePath = fc.getCurrentDirectory().getPath();
} catch (SerialisationException e) {
e.printStackTrace();
JOptionPane.showMessageDialog(this, e.getMessage(), "Model export failed", JOptionPane.ERROR_MESSAGE);
} catch (IOException e) {
throw new RuntimeException(e);
}
}*/
| mechkg/workcraft | Gui/src/main/scala/org/workcraft/gui/SaveDialog.scala | Scala | gpl-3.0 | 4,603 |
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.controller
import scala.concurrent.Future
import scala.util.Failure
import scala.util.Success
import akka.actor.ActorSystem
import spray.http.StatusCodes._
import spray.httpx.SprayJsonSupport._
import spray.json.DeserializationException
import spray.routing.Directive.pimpApply
import spray.routing.RequestContext
import whisk.common.TransactionId
import whisk.core.database.DocumentConflictException
import whisk.core.database.NoDocumentException
import whisk.core.entitlement._
import whisk.core.entity._
import whisk.core.entity.types.EntityStore
import whisk.http.ErrorResponse.terminate
import whisk.http.Messages._
/** A trait implementing the rules API */
trait WhiskRulesApi extends WhiskCollectionAPI with ReferencedEntities {
services: WhiskServices =>
protected override val collection = Collection(Collection.RULES)
/** An actor system for timed based futures. */
protected implicit val actorSystem: ActorSystem
/** Database service to CRUD rules. */
protected val entityStore: EntityStore
/** Path to Rules REST API. */
protected val rulesPath = "rules"
/**
* Creates or updates rule if it already exists. The PUT content is deserialized into a WhiskRulePut
* which is a subset of WhiskRule (it eschews the namespace, entity name and status since the former
* are derived from the authenticated user and the URI and the status is managed automatically).
* The WhiskRulePut is merged with the existing WhiskRule in the datastore, overriding old values
* with new values that are defined. Any values not defined in the PUT content are replaced with
* old values.
*
* The rule will not update if the status of the entity in the datastore is not INACTIVE. It rejects
* such requests with Conflict.
*
* The create/update is also guarded by a predicate that confirm the trigger and action are valid.
* Otherwise rejects the request with Bad Request and an appropriate message. It is true that the
* trigger/action may be deleted after creation but at the very least confirming dependences here
* prevents use errors where a rule is created with an invalid trigger/action which then fails
* testing (fire a trigger and expect an action activation to occur).
*
* Responses are one of (Code, Message)
* - 200 WhiskRule as JSON
* - 400 Bad Request
* - 409 Conflict
* - 500 Internal Server Error
*/
override def create(user: Identity, entityName: FullyQualifiedEntityName)(implicit transid: TransactionId) = {
parameter('overwrite ? false) { overwrite =>
entity(as[WhiskRulePut]) { content =>
val request = content.resolve(entityName.namespace)
onComplete(entitlementProvider.check(user, Privilege.READ, referencedEntities(request))) {
case Success(_) =>
putEntity(WhiskRule, entityStore, entityName.toDocId, overwrite,
update(request) _, () => { create(request, entityName) },
postProcess = Some { rule: WhiskRule =>
completeAsRuleResponse(rule, Status.ACTIVE)
})
case Failure(f) =>
handleEntitlementFailure(f)
}
}
}
}
/**
* Toggles rule status from enabled -> disabled and vice versa. The action are not confirmed
* to still exist. This is deferred to trigger activation which will fail to post activations
* for non-existent actions.
*
* Responses are one of (Code, Message)
* - 200 OK rule in desired state
* - 202 Accepted rule state change accepted
* - 404 Not Found
* - 409 Conflict
* - 500 Internal Server Error
*/
override def activate(user: Identity, entityName: FullyQualifiedEntityName, env: Option[Parameters])(implicit transid: TransactionId) = {
extractStatusRequest { requestedState =>
val docid = entityName.toDocId
getEntity(WhiskRule, entityStore, docid, Some {
rule: WhiskRule =>
val ruleName = rule.fullyQualifiedName(false)
val changeStatus = getTrigger(rule.trigger) map { trigger =>
getStatus(trigger, ruleName)
} flatMap { oldStatus =>
if (requestedState != oldStatus) {
logging.info(this, s"[POST] rule state change initiated: ${oldStatus} -> $requestedState")
Future successful requestedState
} else {
logging.info(this, s"[POST] rule state will not be changed, the requested state is the same as the old state: ${oldStatus} -> $requestedState")
Future failed { IgnoredRuleActivation(requestedState == oldStatus) }
}
} flatMap {
case (newStatus) =>
logging.info(this, s"[POST] attempting to set rule state to: ${newStatus}")
WhiskTrigger.get(entityStore, rule.trigger.toDocId) flatMap { trigger =>
val newTrigger = trigger.removeRule(ruleName)
val triggerLink = ReducedRule(rule.action, newStatus)
WhiskTrigger.put(entityStore, newTrigger.addRule(ruleName, triggerLink))
}
}
onComplete(changeStatus) {
case Success(response) =>
complete(OK)
case Failure(t) => t match {
case _: DocumentConflictException =>
logging.info(this, s"[POST] rule update conflict")
terminate(Conflict, conflictMessage)
case IgnoredRuleActivation(ok) =>
logging.info(this, s"[POST] rule update ignored")
if (ok) complete(OK) else terminate(Conflict)
case _: NoDocumentException =>
logging.info(this, s"[POST] the trigger attached to the rule doesn't exist")
terminate(NotFound, "Only rules with existing triggers can be activated")
case _: DeserializationException =>
logging.error(this, s"[POST] rule update failed: ${t.getMessage}")
terminate(InternalServerError, corruptedEntity)
case _: Throwable =>
logging.error(this, s"[POST] rule update failed: ${t.getMessage}")
terminate(InternalServerError)
}
}
})
}
}
/**
* Deletes rule iff rule is inactive.
*
* Responses are one of (Code, Message)
* - 200 WhiskRule as JSON
* - 404 Not Found
* - 409 Conflict
* - 500 Internal Server Error
*/
override def remove(user: Identity, entityName: FullyQualifiedEntityName)(implicit transid: TransactionId) = {
deleteEntity(WhiskRule, entityStore, entityName.toDocId, (r: WhiskRule) => {
val ruleName = FullyQualifiedEntityName(r.namespace, r.name)
getTrigger(r.trigger) map { trigger =>
(getStatus(trigger, ruleName), trigger)
} flatMap {
case (status, triggerOpt) =>
triggerOpt map { trigger =>
WhiskTrigger.put(entityStore, trigger.removeRule(ruleName)) map { _ => {} }
} getOrElse Future.successful({})
}
}, postProcess = Some { rule: WhiskRule =>
completeAsRuleResponse(rule, Status.INACTIVE)
})
}
/**
* Gets rule. The rule name is prefixed with the namespace to create the primary index key.
*
* Responses are one of (Code, Message)
* - 200 WhiskRule has JSON
* - 404 Not Found
* - 500 Internal Server Error
*/
override def fetch(user: Identity, entityName: FullyQualifiedEntityName, env: Option[Parameters])(implicit transid: TransactionId) = {
getEntity(WhiskRule, entityStore, entityName.toDocId, Some { rule: WhiskRule =>
val getRuleWithStatus = getTrigger(rule.trigger) map { trigger =>
getStatus(trigger, entityName)
} map { status =>
rule.withStatus(status)
}
onComplete(getRuleWithStatus) {
case Success(r) => complete(OK, r)
case Failure(t) => terminate(InternalServerError)
}
})
}
/**
* Gets all rules in namespace.
*
* Responses are one of (Code, Message)
* - 200 [] or [WhiskRule as JSON]
* - 500 Internal Server Error
*/
override def list(user: Identity, namespace: EntityPath, excludePrivate: Boolean)(implicit transid: TransactionId) = {
// for consistency, all the collections should support the same list API
// but because supporting docs on actions is difficult, the API does not
// offer an option to fetch entities with full docs yet; see comment in
// Actions API for more.
val docs = false
parameter('skip ? 0, 'limit ? collection.listLimit, 'count ? false) {
(skip, limit, count) =>
listEntities {
WhiskRule.listCollectionInNamespace(entityStore, namespace, skip, limit, docs) map {
list =>
val rules = if (docs) {
list.right.get map { WhiskRule.serdes.write(_) }
} else list.left.get
FilterEntityList.filter(rules, excludePrivate)
}
}
}
}
/** Creates a WhiskRule from PUT content, generating default values where necessary. */
private def create(content: WhiskRulePut, ruleName: FullyQualifiedEntityName)(implicit transid: TransactionId): Future[WhiskRule] = {
if (content.trigger.isDefined && content.action.isDefined) {
val triggerName = content.trigger.get
val actionName = content.action.get
checkTriggerAndActionExist(triggerName, actionName) recoverWith {
case t => Future.failed(RejectRequest(BadRequest, t))
} flatMap {
case (trigger, action) =>
val rule = WhiskRule(
ruleName.path,
ruleName.name,
content.trigger.get,
content.action.get,
content.version getOrElse SemVer(),
content.publish getOrElse false,
content.annotations getOrElse Parameters())
val triggerLink = ReducedRule(actionName, Status.ACTIVE)
logging.info(this, s"about to put ${trigger.addRule(ruleName, triggerLink)}")
WhiskTrigger.put(entityStore, trigger.addRule(ruleName, triggerLink)) map { _ => rule }
}
} else Future.failed(RejectRequest(BadRequest, "rule requires a valid trigger and a valid action"))
}
/** Updates a WhiskTrigger from PUT content, merging old trigger where necessary. */
private def update(content: WhiskRulePut)(rule: WhiskRule)(implicit transid: TransactionId): Future[WhiskRule] = {
val ruleName = FullyQualifiedEntityName(rule.namespace, rule.name)
val oldTriggerName = rule.trigger
getTrigger(oldTriggerName) flatMap { oldTriggerOpt =>
val newTriggerEntity = content.trigger getOrElse rule.trigger
val newTriggerName = newTriggerEntity
val actionEntity = content.action getOrElse rule.action
val actionName = actionEntity
checkTriggerAndActionExist(newTriggerName, actionName) recoverWith {
case t => Future.failed(RejectRequest(BadRequest, t))
} flatMap {
case (newTrigger, newAction) =>
val r = WhiskRule(
rule.namespace,
rule.name,
newTriggerEntity,
actionEntity,
content.version getOrElse rule.version.upPatch,
content.publish getOrElse rule.publish,
content.annotations getOrElse rule.annotations).
revision[WhiskRule](rule.docinfo.rev)
// Deletes reference from the old trigger iff it is different from the new one
val deleteOldLink = for {
isDifferentTrigger <- content.trigger.filter(_ => newTriggerName != oldTriggerName)
oldTrigger <- oldTriggerOpt
} yield {
WhiskTrigger.put(entityStore, oldTrigger.removeRule(ruleName))
}
val triggerLink = ReducedRule(actionName, Status.INACTIVE)
val update = WhiskTrigger.put(entityStore, newTrigger.addRule(ruleName, triggerLink))
Future.sequence(Seq(deleteOldLink.getOrElse(Future.successful(true)), update)).map(_ => r)
}
}
}
/**
* Gets a WhiskTrigger defined by the given DocInfo. Gracefully falls back to None iff the trigger is not found.
*
* @param tid DocInfo defining the trigger to get
* @return a WhiskTrigger iff found, else None
*/
private def getTrigger(t: FullyQualifiedEntityName)(implicit transid: TransactionId): Future[Option[WhiskTrigger]] = {
WhiskTrigger.get(entityStore, t.toDocId) map {
trigger => Some(trigger)
} recover {
case _: NoDocumentException | DeserializationException(_, _, _) => None
}
}
/**
* Extracts the Status for the rule out of a WhiskTrigger that may be there. Falls back to INACTIVE if the trigger
* could not be found or the rule being worked on has not yet been written into the trigger record.
*
* @param triggerOpt Option containing a WhiskTrigger
* @param ruleName Namespace the name of the rule being worked on
* @return Status of the rule
*/
private def getStatus(triggerOpt: Option[WhiskTrigger], ruleName: FullyQualifiedEntityName)(implicit transid: TransactionId): Status = {
val statusFromTrigger = for {
trigger <- triggerOpt
rules <- trigger.rules
rule <- rules.get(ruleName)
} yield {
rule.status
}
statusFromTrigger getOrElse Status.INACTIVE
}
/**
* Completes an HTTP request with a WhiskRule including the computed Status
*
* @param rule the rule to send
* @param status the status to include in the response
*/
private def completeAsRuleResponse(rule: WhiskRule, status: Status = Status.INACTIVE): RequestContext => Unit = {
complete(OK, rule.withStatus(status))
}
/**
* Checks if trigger and action are valid documents (that is, they exist) in the datastore.
*
* @param trigger the trigger id
* @param action the action id
* @return future that completes with references trigger and action if they exist
*/
private def checkTriggerAndActionExist(trigger: FullyQualifiedEntityName, action: FullyQualifiedEntityName)(
implicit transid: TransactionId): Future[(WhiskTrigger, WhiskAction)] = {
for {
triggerExists <- WhiskTrigger.get(entityStore, trigger.toDocId) recoverWith {
case _: NoDocumentException => Future.failed {
new NoDocumentException(s"trigger ${trigger.qualifiedNameWithLeadingSlash} does not exist")
}
case _: DeserializationException => Future.failed {
new DeserializationException(s"trigger ${trigger.qualifiedNameWithLeadingSlash} is corrupted")
}
}
actionExists <- WhiskAction.resolveAction(entityStore, action) flatMap {
resolvedName => WhiskAction.get(entityStore, resolvedName.toDocId)
} recoverWith {
case _: NoDocumentException => Future.failed {
new NoDocumentException(s"action ${action.qualifiedNameWithLeadingSlash} does not exist")
}
case _: DeserializationException => Future.failed {
new DeserializationException(s"action ${action.qualifiedNameWithLeadingSlash} is corrupted")
}
}
} yield (triggerExists, actionExists)
}
/** Extracts status request subject to allowed values. */
private def extractStatusRequest = {
implicit val statusSerdes = Status.serdesRestricted
entity(as[Status])
}
}
private case class IgnoredRuleActivation(noop: Boolean) extends Throwable
| xin-cai/openwhisk | core/controller/src/main/scala/whisk/core/controller/Rules.scala | Scala | apache-2.0 | 17,907 |
/*
* Copyright (C) 2015 47 Degrees, LLC http://47deg.com [email protected]
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.fortysevendeg.android.functionalview.ui.main
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
trait MainBusinessLogic {
import MainBusinessLogic._
def fetchAsyncData: Future[Seq[Item]] = Future {
Thread.sleep(1000)
categories flatMap { category =>
1 to 5 map { i =>
Item(category, i, selected = false)
}
}
}
}
object MainBusinessLogic {
val animals = "animals"
val city = "city"
val food = "food"
val people = "people"
val sports = "sports"
val technics = "technics"
val nature = "nature"
val categories = Seq(animals, city, food, people, sports, technics)
}
| 47deg/functional-views-android | src/main/scala/com/fortysevendeg/android/functionalview/ui/main/MainBusinessLogic.scala | Scala | apache-2.0 | 1,312 |
package com.etsy.cascading
import com.twitter.scalding._
import org.specs._
class InstrumentedMapJob(args : Args) extends TracingJob(args) {
trace(Tsv("input", ('x, 'y)), Tsv("subsample"))
.mapTo(('x, 'y) -> 'z){ x : (Int, Int) => x._1 + x._2 }
.write(Tsv("output"))
}
class InstrumentedMapTest extends Specification with TupleConversions {
import Dsl._
"Instrumented flow with only mappers" should {
//Set up the job:
"work" in {
JobTest("com.etsy.cascading.InstrumentedMapJob")
.source(Tsv("input", ('x,'y)), List(("0","1"), ("1","3"), ("2","9")))
.sink[(Int)](Tsv("output")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(3)
unordered((1)) must be_==(true)
unordered((4)) must be_==(true)
unordered((11)) must be_==(true)
}
.sink[(Int,Int)](Tsv("subsample")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(3)
unordered((0,1)) must be_==(true)
unordered((1,3)) must be_==(true)
unordered((2,9)) must be_==(true)
}
.runHadoop
.finish
}
}
}
class InstrumentedGroupByJob(args : Args) extends TracingJob(args) {
trace(Tsv("input", ('x, 'y)), Tsv("foo/input")).groupBy('x){ _.sum('y -> 'y) }
.filter('x) { x : Int => x < 2 }
.map('y -> 'y){ y : Double => y.toInt }
.project('x, 'y)
.write(Tsv("output"))
}
class InstrumentedGroupByTest extends Specification with TupleConversions {
import Dsl._
"Instrumented flow with aggregation" should {
//Set up the job:
"work" in {
JobTest("com.etsy.cascading.InstrumentedGroupByJob")
.source(Tsv("input", ('x,'y)), List(("0","1"), ("0","3"), ("1","9"), ("1", "1"), ("2", "5"), ("2", "3"), ("3", "3")))
.sink[(Int,Int)](Tsv("output")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(2)
unordered((0,4)) must be_==(true)
unordered((1,10)) must be_==(true)
}
.sink[(Int,Int)](Tsv("foo/input")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(4)
unordered((0,1)) must be_==(true)
unordered((0,3)) must be_==(true)
unordered((1,1)) must be_==(true)
unordered((1,9)) must be_==(true)
}
.runHadoop
.finish
}
}
}
class InstrumentedGroupByNopJob(args : Args) extends TracingJob(args) {
trace(Tsv("input", ('x, 'y)), Tsv("foo/input"))
.groupBy('x){ _.reducers(1) }
.filter('x) { x : Int => x < 2 }
.write(Tsv("output"))
}
class InstrumentedGroupByNopTest extends Specification with TupleConversions {
import Dsl._
"instrumented flow with grouping and no every" should {
//Set up the job:
"work" in {
JobTest("com.etsy.cascading.InstrumentedGroupByNopJob")
.source(Tsv("input", ('x,'y)), List(("0","1"), ("0","3"), ("1","9"), ("1", "1"), ("2", "5"), ("2", "3"), ("3", "3")))
.sink[(Int,Int)](Tsv("output")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(4)
unordered((0,1)) must be_==(true)
unordered((0,3)) must be_==(true)
unordered((1,1)) must be_==(true)
unordered((1,9)) must be_==(true)
}
.sink[(Int,Int)](Tsv("foo/input")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(4)
unordered((0,1)) must be_==(true)
unordered((0,3)) must be_==(true)
unordered((1,1)) must be_==(true)
unordered((1,9)) must be_==(true)
}
.runHadoop
.finish
}
}
}
class InstrumentedGroupByFoldJob(args : Args) extends TracingJob(args) {
trace(Tsv("input", ('x, 'y)), Tsv("foo/input"))
.groupBy('x){ _.foldLeft[Double,Int]('y -> 'y)(0.0){ (a : Double, b : Int) => a + b } }
.filter('x) { x : Int => x < 2 }
.map('y -> 'y){ y : Double => y.toInt }
.write(Tsv("output"))
}
class InstrumentedGroupByFoldTest extends Specification with TupleConversions {
import Dsl._
"instrumented flow with grouping and no aggregation" should {
//Set up the job:
"work" in {
JobTest("com.etsy.cascading.InstrumentedGroupByFoldJob")
.source(Tsv("input", ('x,'y)), List(("0","1"), ("0","3"), ("1","9"), ("1", "1"), ("2", "5"), ("2", "3"), ("3", "3")))
.sink[(Int,Int)](Tsv("output")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(2)
unordered((0,4)) must be_==(true)
unordered((1,10)) must be_==(true)
}
.sink[(Int,Int)](Tsv("foo/input")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(4)
unordered((0,1)) must be_==(true)
unordered((0,3)) must be_==(true)
unordered((1,1)) must be_==(true)
unordered((1,9)) must be_==(true)
}
.runHadoop
.finish
}
}
}
class InstrumentedJoinJob(args : Args) extends TracingJob(args) {
trace(Tsv("input", ('x, 'y)), Tsv("foo/input"))
.joinWithSmaller('x -> 'x, trace(Tsv("input2", ('x, 'z)), Tsv("bar/input2")))
.project('x, 'y, 'z)
.write(Tsv("output"))
}
class InstrumentedJoinTest extends Specification with TupleConversions {
import Dsl._
"instrumented coGroup" should {
//Set up the job:
"work" in {
JobTest("com.etsy.cascading.InstrumentedJoinJob")
.arg("write_sources", "true")
.source(Tsv("input", ('x,'y)), List(("0","1"), ("1","3"), ("2","9"), ("10", "0")))
.source(Tsv("input2", ('x, 'z)), List(("5","1"), ("1","4"), ("2","7")))
.sink[(Int,Int,Int)](Tsv("output")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(2)
unordered((1,3,4)) must be_==(true)
unordered((2,9,7)) must be_==(true)
}
.sink[(Int,Int)](Tsv("foo/input")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(2)
unordered((1,3)) must be_==(true)
unordered((2,9)) must be_==(true)
}
.sink[(Int,Int)](Tsv("bar/input2")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(2)
unordered((1,4)) must be_==(true)
unordered((2,7)) must be_==(true)
}
.runHadoop
.finish
}
}
}
class InstrumentedJoinTinyJob(args : Args) extends TracingJob(args) {
trace(Tsv("input", ('x, 'y)), Tsv("foo/input"))
.joinWithTiny('x -> 'x, trace(Tsv("input2", ('x, 'z)), Tsv("bar/input2")))
.project('x, 'y, 'z)
.write(Tsv("output"))
}
class InstrumentedJoinTinyTest extends Specification with TupleConversions {
import Dsl._
"instrumented hashjoin" should {
//Set up the job:
"work" in {
JobTest("com.etsy.cascading.InstrumentedJoinTinyJob")
.arg("write_sources", "true")
.source(Tsv("input", ('x,'y)), List(("0","1"), ("1","3"), ("2","9"), ("10", "0")))
.source(Tsv("input2", ('x, 'z)), List(("5","1"), ("1","4"), ("2","7")))
.sink[(Int,Int,Int)](Tsv("output")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(2)
unordered((1,3,4)) must be_==(true)
unordered((2,9,7)) must be_==(true)
}
.sink[(Int,Int)](Tsv("foo/input")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(2)
unordered((1,3)) must be_==(true)
unordered((2,9)) must be_==(true)
}
.sink[(Int,Int)](Tsv("bar/input2")) { outBuf =>
val unordered = outBuf.toSet
unordered.size must be_==(2)
unordered((1,4)) must be_==(true)
unordered((2,7)) must be_==(true)
}
.runHadoop
.finish
}
}
}
| rjhall/cascading-trace | src/test/scala/com/etsy/cascading/InputTracingTest.scala | Scala | mit | 7,876 |
package dpla.ingestion3.enrichments
import dpla.ingestion3.enrichments.normalizations.StringNormalizationUtils._
import dpla.ingestion3.enrichments.normalizations.FilterList
import org.scalatest.{BeforeAndAfter, FlatSpec}
class StringNormalizationUtilsTest extends FlatSpec with BeforeAndAfter {
// Helper objects
object BlockList extends FilterList {
override val termList: Set[String] = Set(
"jpeg",
"jpeg/2000",
"tiff",
"bitmap image",
"application+pdf"
)
}
object AllowList extends FilterList {
override val termList: Set[String] = Set(
"moving image",
"film",
"audio",
"image"
)
}
// Tests
"cleanupGeocoordinates" should "strip out N and W" in {
val originalValue = "35.58343N, 83.50822W"
val enrichedValue = originalValue.cleanupGeocoordinates
val expectedValue = "35.58343, 83.50822"
assert(enrichedValue === expectedValue)
}
it should "erase when W and N in wrong order" in {
val originalValue = "35.58343W, 83.50822N"
val enrichedValue = originalValue.cleanupGeocoordinates
val expectedValue = ""
assert(enrichedValue === expectedValue)
}
it should "strip out N and W when alone" in {
val originalValue = "N, W"
val enrichedValue = originalValue.cleanupGeocoordinates
val expectedValue = ""
assert(enrichedValue === expectedValue)
}
it should "pass through coordinates without cardinal directions" in {
val originalValue = "35.58343, 83.50822"
val enrichedValue = originalValue.cleanupGeocoordinates
val expectedValue = "35.58343, 83.50822"
assert(enrichedValue === expectedValue)
}
it should "not passthrough craziness" in {
val originalValue = "pork chop sandwiches"
val enrichedValue = originalValue.cleanupGeocoordinates
val expectedValue = ""
assert(enrichedValue === expectedValue)
}
"convertToSentenceCase" should "capitalize the first character in each sentence" in {
val originalValue = "this is a sentence about Moomins. this is another about Snorks."
val enrichedValue = originalValue.convertToSentenceCase
val expectedValue = "This is a sentence about Moomins. This is another about Snorks."
assert(enrichedValue === expectedValue)
}
"splitAtDelimiter" should "split a string around semi-colon" in {
val originalValue = "subject-one; subject-two; subject-three"
val enrichedValue = originalValue.splitAtDelimiter(";")
val expectedValue = Array("subject-one", "subject-two", "subject-three")
assert(enrichedValue === expectedValue)
}
it should "drop empty values" in {
val originalValue = "subject-one; ; subject-three"
val enrichedValue = originalValue.splitAtDelimiter(";")
val expectedValue = Array("subject-one", "subject-three")
assert(enrichedValue === expectedValue)
}
it should "split a string around comma." in {
val originalValue = "subject-one, subject-two; subject-three"
val enrichedValue = originalValue.splitAtDelimiter(",")
val expectedValue = Array("subject-one", "subject-two; subject-three")
assert(enrichedValue === expectedValue)
}
"stripHMTL" should "remove html from a string" in {
val expectedValue = "foo bar baz buzz"
val originalValue = f"<p>$expectedValue%s</p>"
val enrichedValue = originalValue.stripHTML
assert(enrichedValue === expectedValue)
}
it should "remove unbalanced and invalid html from a given string" in {
val expectedValue = "foo bar baz buzz"
val originalValue = f"<p>$expectedValue%s</i><html>"
val enrichedValue = originalValue.stripHTML
assert(enrichedValue === expectedValue)
}
it should "not modify strings that do not contain html markup" in {
val expectedValue = "foo bar baz buzz"
val originalValue = expectedValue
val enrichedValue = originalValue.stripHTML
assert(enrichedValue === expectedValue)
}
it should "not emit HTML entities" in {
val expectedValue = "foo bar baz > buzz"
val originalValue = expectedValue
val enrichedValue = originalValue.stripHTML
assert(enrichedValue === expectedValue)
}
it should "not turn html entities into html" in {
val originalValue = "foo bar baz <p> buzz"
val expectedValue = "foo bar baz buzz"
val enrichedValue = originalValue.stripHTML
assert(enrichedValue === expectedValue)
}
"cleanupLeadingPunctuation" should "strip leading punctuation from a string" in {
val originalValue = ": ;; -- It's @@ OK --- "
val enrichedValue = originalValue.cleanupLeadingPunctuation
val expectedValue = "It's @@ OK --- "
assert(enrichedValue === expectedValue)
}
it should "remove whitespace" in {
val originalValue = " A good string "
val enrichedValue = originalValue.cleanupLeadingPunctuation
val expectedValue = "A good string "
assert(enrichedValue === expectedValue)
}
it should "remove tabs" in {
val originalValue = "\\t\\t\\tA \\tgood string "
val enrichedValue = originalValue.cleanupLeadingPunctuation
val expectedValue = "A \\tgood string "
assert(enrichedValue === expectedValue)
}
it should "remove new line characters" in {
val originalValue = "\\n\\n\\r\\nA good string "
val enrichedValue = originalValue.cleanupLeadingPunctuation
val expectedValue = "A good string "
assert(enrichedValue === expectedValue)
}
it should "do nothing if there is no punctuation" in {
val originalValue = "A good string "
val enrichedValue = originalValue.cleanupLeadingPunctuation
val expectedValue = "A good string "
assert(enrichedValue === expectedValue)
}
"cleanupEndingPunctuation" should "strip punctuation following the last letter or digit character" in {
val originalValue = ".. It's OK ;; .. ,, // \\n"
val enrichedValue = originalValue.cleanupEndingPunctuation
val expectedValue = ".. It's OK.."
assert(enrichedValue === expectedValue)
}
it should "not remove .) from Synagogues -- Washington (D.C.)" in {
val originalValue = "Synagogues -- Washington (D.C.)"
val enrichedValue = originalValue.cleanupEndingPunctuation
val expectedValue = "Synagogues -- Washington (D.C.)"
assert(enrichedValue === expectedValue)
}
it should "remove whitespace" in {
val originalValue = "A good string "
val enrichedValue = originalValue.cleanupEndingPunctuation
val expectedValue = "A good string"
assert(enrichedValue === expectedValue)
}
it should "remove tabs" in {
val originalValue = "A \\tgood string\\t\\t\\t"
val enrichedValue = originalValue.cleanupEndingPunctuation
val expectedValue = "A \\tgood string"
assert(enrichedValue === expectedValue)
}
it should "remove new line characters" in {
val originalValue = "A good string\\n\\n\\r\\n"
val enrichedValue = originalValue.cleanupEndingPunctuation
val expectedValue = "A good string"
assert(enrichedValue === expectedValue)
}
it should "do nothing if there is no ending punctuation" in {
val originalValue = "A good string"
val enrichedValue = originalValue.cleanupEndingPunctuation
val expectedValue = "A good string"
assert(enrichedValue === expectedValue)
}
"limitCharacters" should "limit the number of characters in long strings" in {
val longString = "Now is the time for all good people to come to the aid of the party."
val enrichedValue = longString.limitCharacters(10)
assert(enrichedValue.size === 10)
}
it should "not limit strings shorter or equal to the limit" in {
val shortString = "Now is the time"
val enrichedValue = shortString.limitCharacters(shortString.length)
assert(enrichedValue.size === shortString.length)
}
"reduceWhitespace" should "reduce two whitespaces to one whitespace" in {
val originalValue = "foo bar"
val enrichedValue = originalValue.reduceWhitespace
assert(enrichedValue === "foo bar")
}
it should "reduce five whitespaces to one whitespace" in {
val originalValue = "foo bar"
val enrichedValue = originalValue.reduceWhitespace
assert(enrichedValue === "foo bar")
}
it should "reduce multiple occurrences duplicate whitespace to single whitespace" in {
val originalValue = "foo bar choo"
val enrichedValue = originalValue.reduceWhitespace
assert(enrichedValue === "foo bar choo")
}
it should "reduce remove leading and trailing white space" in {
val originalValue = " foo bar choo "
val enrichedValue = originalValue.reduceWhitespace
assert(enrichedValue === "foo bar choo")
}
"capitalizeFirstChar" should "not capitalize the b in '3 blind mice'" in {
val originalValue = "3 blind mice"
val enrichedValue = originalValue.capitalizeFirstChar
assert(enrichedValue === "3 blind mice")
}
it should "capitalize the t in 'three blind mice'" in {
val originalValue = "three blind mice"
val enrichedValue = originalValue.capitalizeFirstChar
assert(enrichedValue === "Three blind mice")
}
it should "capitalize the v in '...vacationland...'" in {
val originalValue = "...vacationland..."
val enrichedValue = originalValue.capitalizeFirstChar
assert(enrichedValue === "...Vacationland...")
}
it should "capitalize the t in ' telephone'" in {
val originalValue = " telephone"
val enrichedValue = originalValue.capitalizeFirstChar
assert(enrichedValue === " Telephone")
}
it should "not capitalize anything in a string with alphanumeric characters" in {
val originalValue = "...@..|}"
val enrichedValue = originalValue.capitalizeFirstChar
assert(enrichedValue === "...@..|}")
}
it should "not capitalize anything in an empty string" in {
val originalValue = ""
val enrichedValue = originalValue.capitalizeFirstChar
assert(enrichedValue === "")
}
"applyBlockFilter" should "remove a block term" in {
val originalValue = "jpeg"
val enrichedValue = originalValue.applyBlockFilter(BlockList.termList)
assert(enrichedValue === "")
}
it should "remove a block term if surrounded by extra white space" in {
val originalValue = " jpeg "
val enrichedValue = originalValue.applyBlockFilter(BlockList.termList)
assert(enrichedValue === "")
}
it should "remove a blocked term from a string" in {
val originalValue = "jpeg photo"
val enrichedValue = originalValue.applyBlockFilter(BlockList.termList)
assert(enrichedValue === "photo")
}
it should "return the original string if it does not contain a blocked term" in {
val originalValue = "photo"
val enrichedValue = originalValue.applyBlockFilter(BlockList.termList)
assert(enrichedValue === "photo")
}
"applyAllowFilter" should "return the original string if it matches the allow list" in {
val originalValue = "moving image"
val enrichedValue = originalValue.applyAllowFilter(AllowList.termList)
assert(enrichedValue === "moving image")
}
it should "not match if the string contains an allowed term" in {
val originalValue = "film 8mm"
val enrichedValue = originalValue.applyAllowFilter(AllowList.termList)
assert(enrichedValue === "film 8mm")
}
it should "return an empty string if the original string is not on the allow list" in {
val originalValue = "dvd"
val enrichedValue = originalValue.applyAllowFilter(AllowList.termList)
assert(enrichedValue === "")
}
it should "match and remove extraneous white space (' moving image ' returns 'moving image')" in {
val originalValue = " moving image "
val enrichedValue = originalValue.applyAllowFilter(AllowList.termList)
assert(enrichedValue === "moving image")
}
"stripBrackets" should "remove leading and trailing ( )" in {
val originalValue = "(hello)"
val enrichedValue = originalValue.stripBrackets
assert(enrichedValue === "hello")
}
it should "remove [ ] from [Discharge of Four Army Reserve Soldiers]" in {
val originalValue = "[Discharge of Four Army Reserve Soldiers]"
val enrichedValue = originalValue.stripBrackets
assert(enrichedValue === "Discharge of Four Army Reserve Soldiers")
}
it should "remove leading and trailing [ ]" in {
val originalValue = "[hello]"
val enrichedValue = originalValue.stripBrackets
assert(enrichedValue === "hello")
}
it should "remove leading and trailing { }" in {
val originalValue = "{hello}"
val enrichedValue = originalValue.stripBrackets
assert(enrichedValue === "hello")
}
it should "ignore whitespace and remove leading and trailing { } " in {
val originalValue = " \\t{hello} \\n"
val enrichedValue = originalValue.stripBrackets
assert(enrichedValue === "hello")
}
it should "leave interior brackets alone" in {
val originalValue = "Hello ()[]{} Goodbye"
val enrichedValue = originalValue.stripBrackets
assert(enrichedValue === "Hello ()[]{} Goodbye")
}
it should "remove surrounding brackets and interior brackets alone" in {
val originalValue = "( {Hello ()[]{} Goodbye)"
val enrichedValue = originalValue.stripBrackets
assert(enrichedValue === "{Hello ()[]{} Goodbye")
}
it should "do nothing with unmatched brackets" in {
val originalValue = "(Hello"
val enrichedValue = originalValue.stripBrackets
assert(enrichedValue === "(Hello")
}
"stripEndingPeriod" should "remove a single trailing period" in {
val originalValue = "Hello."
val enrichedValue = originalValue.stripEndingPeriod
val expectedValue = "Hello"
assert(enrichedValue === expectedValue)
}
it should "not remove ellipsis" in {
val originalValue = "Hello..."
val enrichedValue = originalValue.stripEndingPeriod
val expectedValue = "Hello..."
assert(enrichedValue === expectedValue)
}
it should "not remove leading or interior periods" in {
val originalValue = "H.e.l.l.o."
val enrichedValue = originalValue.stripEndingPeriod
val expectedValue = "H.e.l.l.o"
assert(enrichedValue === expectedValue)
}
it should "return the original value if only given a single period (e.g. '.')" in {
val originalValue = "."
val enrichedValue = originalValue.stripEndingPeriod
val expectedValue = "."
assert(enrichedValue === expectedValue)
}
it should "remove a trailing period if it followed by whitespace" in {
val originalValue = "Hello. "
val enrichedValue = originalValue.stripEndingPeriod
val expectedValue = "Hello"
assert(enrichedValue === expectedValue)
}
it should "not remove a period followed by a closing paren" in {
val originalValue = "Synagogues -- Washington (D.C.)"
val enrichedValue = originalValue.stripEndingPeriod
val expectedValue = "Synagogues -- Washington (D.C.)"
assert(enrichedValue === expectedValue)
}
"stripDblQuotes" should "remove all double quotes" in {
val originalValue = """ "Hello John" """
assert(originalValue.stripDblQuotes == " Hello John ")
}
}
| dpla/ingestion3 | src/test/scala/dpla/ingestion3/enrichments/StringNormalizationUtilsTest.scala | Scala | mit | 14,960 |
/*
* Part of NDLA learningpath-api.
* Copyright (C) 2016 NDLA
*
* See LICENSE
*
*/
package no.ndla.learningpathapi.service
import no.ndla.learningpathapi.LearningpathApiProperties.DefaultLanguage
import java.util.Date
import java.util.concurrent.Executors
import no.ndla.learningpathapi.integration.{SearchApiClient, TaxonomyApiClient}
import no.ndla.learningpathapi.model.api.{config, _}
import no.ndla.learningpathapi.model.api.config.UpdateConfigValue
import no.ndla.learningpathapi.model.domain
import no.ndla.learningpathapi.model.domain.config.{ConfigKey, ConfigMeta}
import no.ndla.learningpathapi.model.domain.{LearningPathStatus, UserInfo, LearningPath => _, LearningStep => _, _}
import no.ndla.learningpathapi.repository.{ConfigRepository, LearningPathRepositoryComponent}
import no.ndla.learningpathapi.service.search.SearchIndexService
import no.ndla.learningpathapi.validation.{LearningPathValidator, LearningStepValidator}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}
trait UpdateService {
this: LearningPathRepositoryComponent
with ReadService
with ConfigRepository
with ConverterService
with SearchIndexService
with Clock
with LearningStepValidator
with LearningPathValidator
with TaxonomyApiClient
with SearchApiClient =>
val updateService: UpdateService
class UpdateService {
def updateTaxonomyForLearningPath(
pathId: Long,
createResourceIfMissing: Boolean,
language: String,
fallback: Boolean,
userInfo: UserInfo
): Try[LearningPathV2] = {
writeOrAccessDenied(userInfo.isWriter) {
readService.withIdAndAccessGranted(pathId, userInfo) match {
case Failure(ex) => Failure(ex)
case Success(lp) =>
taxononyApiClient
.updateTaxonomyForLearningPath(lp, createResourceIfMissing)
.flatMap(l => converterService.asApiLearningpathV2(l, language, fallback, userInfo))
}
}
}
def insertDump(dump: domain.LearningPath) = {
learningPathRepository.insert(dump)
}
private[service] def writeDuringWriteRestrictionOrAccessDenied[T](owner: UserInfo)(w: => Try[T]): Try[T] =
writeOrAccessDenied(readService.canWriteNow(owner),
"You do not have write access while write restriction is active.")(w)
private[service] def writeOrAccessDenied[T](
willExecute: Boolean,
reason: String = "You do not have permission to perform this action.")(w: => Try[T]): Try[T] =
if (willExecute) w
else Failure(AccessDeniedException(reason))
def newFromExistingV2(id: Long, newLearningPath: NewCopyLearningPathV2, owner: UserInfo): Try[LearningPathV2] =
writeDuringWriteRestrictionOrAccessDenied(owner) {
learningPathRepository.withId(id).map(_.isOwnerOrPublic(owner)) match {
case None => Failure(NotFoundException("Could not find learningpath to copy."))
case Some(Failure(ex)) => Failure(ex)
case Some(Success(existing)) =>
val toInsert = converterService.newFromExistingLearningPath(existing, newLearningPath, owner)
learningPathValidator.validate(toInsert, allowUnknownLanguage = true)
converterService.asApiLearningpathV2(learningPathRepository.insert(toInsert),
newLearningPath.language,
fallback = true,
owner)
}
}
def addLearningPathV2(newLearningPath: NewLearningPathV2, owner: UserInfo): Try[LearningPathV2] =
writeDuringWriteRestrictionOrAccessDenied(owner) {
val learningPath = converterService.newLearningPath(newLearningPath, owner)
learningPathValidator.validate(learningPath)
converterService.asApiLearningpathV2(learningPathRepository.insert(learningPath),
newLearningPath.language,
fallback = true,
owner)
}
def updateLearningPathV2(id: Long,
learningPathToUpdate: UpdatedLearningPathV2,
owner: UserInfo): Try[LearningPathV2] = writeDuringWriteRestrictionOrAccessDenied(owner) {
learningPathValidator.validate(learningPathToUpdate)
withId(id).flatMap(_.canEditLearningpath(owner)) match {
case Failure(ex) => Failure(ex)
case Success(existing) =>
val toUpdate = converterService.mergeLearningPaths(existing, learningPathToUpdate, owner)
// Imported learningpaths may contain fields with language=unknown.
// We should still be able to update it, but not add new fields with language=unknown.
learningPathValidator.validate(toUpdate, allowUnknownLanguage = true)
val updatedLearningPath = learningPathRepository.update(toUpdate)
updateSearchAndTaxonomy(updatedLearningPath).flatMap(
_ =>
converterService.asApiLearningpathV2(
updatedLearningPath,
learningPathToUpdate.language,
fallback = true,
owner
))
}
}
private def updateSearchAndTaxonomy(learningPath: domain.LearningPath) = {
val sRes = searchIndexService.indexDocument(learningPath)
if (learningPath.isPublished) {
searchApiClient.indexLearningPathDocument(learningPath)
} else {
deleteIsBasedOnReference(learningPath)
}
sRes.flatMap(lp => taxononyApiClient.updateTaxonomyForLearningPath(lp, false))
}
def updateLearningPathStatusV2(learningPathId: Long,
status: LearningPathStatus.Value,
owner: UserInfo,
language: String,
message: Option[String] = None): Try[LearningPathV2] =
writeDuringWriteRestrictionOrAccessDenied(owner) {
withId(learningPathId, includeDeleted = true).flatMap(_.canSetStatus(status, owner)) match {
case Failure(ex) => Failure(ex)
case Success(existing) =>
val validatedLearningPath =
if (status == domain.LearningPathStatus.PUBLISHED) existing.validateForPublishing() else Success(existing)
validatedLearningPath.flatMap(valid => {
val newMessage = message match {
case Some(msg) if owner.isAdmin => Some(domain.Message(msg, owner.userId, clock.now()))
case _ => valid.message
}
val updatedLearningPath = learningPathRepository.update(
valid.copy(message = newMessage, status = status, lastUpdated = clock.now()))
updateSearchAndTaxonomy(updatedLearningPath)
.flatMap(
_ =>
converterService.asApiLearningpathV2(
updatedLearningPath,
language,
fallback = true,
owner
))
})
}
}
private[service] def deleteIsBasedOnReference(updatedLearningPath: domain.LearningPath): Unit = {
learningPathRepository
.learningPathsWithIsBasedOn(updatedLearningPath.id.get)
.foreach(lp => {
learningPathRepository.update(
lp.copy(
lastUpdated = clock.now(),
isBasedOn = None
)
)
})
}
def addLearningStepV2(learningPathId: Long,
newLearningStep: NewLearningStepV2,
owner: UserInfo): Try[LearningStepV2] = writeDuringWriteRestrictionOrAccessDenied(owner) {
optimisticLockRetries(10) {
withId(learningPathId).flatMap(_.canEditLearningpath(owner)) match {
case Failure(ex) => Failure(ex)
case Success(learningPath) =>
val validated = for {
newStep <- converterService.asDomainLearningStep(newLearningStep, learningPath)
validated <- learningStepValidator.validate(newStep)
} yield validated
validated match {
case Failure(ex) => Failure(ex)
case Success(newStep) =>
val (insertedStep, updatedPath) = inTransaction { implicit session =>
val insertedStep =
learningPathRepository.insertLearningStep(newStep)
val toUpdate = converterService.insertLearningStep(learningPath, insertedStep, owner)
val updatedPath = learningPathRepository.update(toUpdate)
(insertedStep, updatedPath)
}
updateSearchAndTaxonomy(updatedPath)
.flatMap(
_ =>
converterService.asApiLearningStepV2(
insertedStep,
updatedPath,
newLearningStep.language,
fallback = true,
owner
))
}
}
}
}
def updateLearningStepV2(learningPathId: Long,
learningStepId: Long,
learningStepToUpdate: UpdatedLearningStepV2,
owner: UserInfo): Try[LearningStepV2] = writeDuringWriteRestrictionOrAccessDenied(owner) {
withId(learningPathId).flatMap(_.canEditLearningpath(owner)) match {
case Failure(ex) => Failure(ex)
case Success(learningPath) =>
learningPathRepository.learningStepWithId(learningPathId, learningStepId) match {
case None =>
Failure(NotFoundException(
s"Could not find learningstep with id '$learningStepId' to update with learningpath id '$learningPathId'."))
case Some(existing) =>
val validated = for {
toUpdate <- converterService.mergeLearningSteps(existing, learningStepToUpdate)
validated <- learningStepValidator.validate(toUpdate, allowUnknownLanguage = true)
} yield validated
validated match {
case Failure(ex) => Failure(ex)
case Success(toUpdate) =>
learningStepValidator.validate(toUpdate, allowUnknownLanguage = true)
val (updatedStep, updatedPath) = inTransaction { implicit session =>
val updatedStep =
learningPathRepository.updateLearningStep(toUpdate)
val pathToUpdate = converterService.insertLearningStep(learningPath, updatedStep, owner)
val updatedPath = learningPathRepository.update(pathToUpdate)
(updatedStep, updatedPath)
}
updateSearchAndTaxonomy(updatedPath)
.flatMap(
_ =>
converterService.asApiLearningStepV2(
updatedStep,
updatedPath,
learningStepToUpdate.language,
fallback = true,
owner
))
}
}
}
}
def updateLearningStepStatusV2(learningPathId: Long,
learningStepId: Long,
newStatus: StepStatus.Value,
owner: UserInfo): Try[LearningStepV2] =
writeDuringWriteRestrictionOrAccessDenied(owner) {
withId(learningPathId).flatMap(_.canEditLearningpath(owner)) match {
case Failure(ex) => Failure(ex)
case Success(learningPath) =>
learningPathRepository.learningStepWithId(learningPathId, learningStepId) match {
case None =>
Failure(
NotFoundException(
s"Learningstep with id $learningStepId for learningpath with id $learningPathId not found"))
case Some(learningStep) =>
val stepToUpdate = learningStep.copy(status = newStatus)
val stepsToChangeSeqNoOn = learningPathRepository
.learningStepsFor(learningPathId)
.filter(step => step.seqNo >= stepToUpdate.seqNo && step.id != stepToUpdate.id)
val stepsWithChangedSeqNo = stepToUpdate.status match {
case StepStatus.DELETED =>
stepsToChangeSeqNoOn.map(step => step.copy(seqNo = step.seqNo - 1))
case StepStatus.ACTIVE =>
stepsToChangeSeqNoOn.map(step => step.copy(seqNo = step.seqNo + 1))
}
val (updatedPath, updatedStep) = inTransaction { implicit session =>
val updatedStep = learningPathRepository.updateLearningStep(stepToUpdate)
val newLearningSteps = learningPath.learningsteps
.getOrElse(Seq.empty)
.filterNot(
step =>
stepsWithChangedSeqNo
.map(_.id)
.contains(step.id)) ++ stepsWithChangedSeqNo
val lp = converterService.insertLearningSteps(learningPath, newLearningSteps, owner)
val updatedPath = learningPathRepository.update(lp)
stepsWithChangedSeqNo.foreach(learningPathRepository.updateLearningStep)
(updatedPath, updatedStep)
}
updateSearchAndTaxonomy(updatedPath).flatMap(
_ =>
converterService.asApiLearningStepV2(
updatedStep,
updatedPath,
DefaultLanguage,
fallback = true,
owner
))
}
}
}
def updateConfig(configKey: ConfigKey.Value,
value: UpdateConfigValue,
userInfo: UserInfo): Try[config.ConfigMeta] = {
writeOrAccessDenied(userInfo.isAdmin, "Only administrators can edit configuration.") {
ConfigMeta(configKey, value.value, new Date(), userInfo.userId).validate.flatMap(newConfigValue => {
configRepository.updateConfigParam(newConfigValue).map(converterService.asApiConfig)
})
}
}
def updateSeqNo(learningPathId: Long, learningStepId: Long, seqNo: Int, owner: UserInfo): Try[LearningStepSeqNo] =
writeDuringWriteRestrictionOrAccessDenied(owner) {
optimisticLockRetries(10) {
withId(learningPathId).flatMap(_.canEditLearningpath(owner)) match {
case Failure(ex) => Failure(ex)
case Success(learningPath) =>
learningPathRepository.learningStepWithId(learningPathId, learningStepId) match {
case None =>
None
Failure(
NotFoundException(
s"LearningStep with id $learningStepId in learningPath $learningPathId not found"))
case Some(learningStep) =>
learningPath.validateSeqNo(seqNo)
val from = learningStep.seqNo
val to = seqNo
val toUpdate = learningPath.learningsteps
.getOrElse(Seq.empty)
.filter(step => rangeToUpdate(from, to).contains(step.seqNo))
def addOrSubtract(seqNo: Int): Int = if (from > to) seqNo + 1 else seqNo - 1
inTransaction { implicit session =>
learningPathRepository.updateLearningStep(learningStep.copy(seqNo = seqNo))
toUpdate.foreach(step => {
learningPathRepository.updateLearningStep(step.copy(seqNo = addOrSubtract(step.seqNo)))
})
}
Success(LearningStepSeqNo(seqNo))
}
}
}
}
def rangeToUpdate(from: Int, to: Int): Range = if (from > to) to until from else from + 1 to to
private def withId(learningPathId: Long, includeDeleted: Boolean = false): Try[domain.LearningPath] = {
val lpOpt = if (includeDeleted) {
learningPathRepository.withIdIncludingDeleted(learningPathId)
} else {
learningPathRepository.withId(learningPathId)
}
lpOpt match {
case Some(learningPath) => Success(learningPath)
case None => Failure(NotFoundException(s"Could not find learningpath with id '$learningPathId'."))
}
}
def optimisticLockRetries[T](n: Int)(fn: => T): T = {
try {
fn
} catch {
case ole: OptimisticLockException =>
if (n > 1) optimisticLockRetries(n - 1)(fn) else throw ole
case t: Throwable => throw t
}
}
}
}
| NDLANO/learningpath-api | src/main/scala/no/ndla/learningpathapi/service/UpdateService.scala | Scala | gpl-3.0 | 17,093 |
package com.github.andr83.parsek.pipe
import java.security.KeyPairGenerator
import javax.crypto.Cipher
import com.github.andr83.parsek._
import com.typesafe.config.ConfigFactory
import org.apache.commons.lang.RandomStringUtils
import org.scalatest.{FlatSpec, Inside, Matchers}
import scala.collection.JavaConversions._
/**
* @author Andrei Tupitcyn
*/
class DecryptRsaSpec extends FlatSpec with Matchers with Inside {
implicit val context = new PipeContext()
// at first necessary convert RSA private key:
// openssl pkcs8 -topk8 -inform PEM -outform DER -in private.pem -out private.der -nocrypt
"The content " should " be a string encoded with RSA" in {
val rawBody = RandomStringUtils.random(40, true, false)
//Generating RSA private key
val keyPairGenerator = KeyPairGenerator.getInstance("RSA")
keyPairGenerator.initialize(1024)
val keyPair = keyPairGenerator.genKeyPair()
//Encrypt body with RSA
val rsaCipher = Cipher.getInstance("RSA/ECB/NoPadding")
rsaCipher.init(Cipher.ENCRYPT_MODE, keyPair.getPublic)
val body = rsaCipher.doFinal(rawBody.asBytes).asStr
val config = ConfigFactory.parseMap(Map(
"privateKey" -> keyPair.getPrivate.getEncoded.asStr,
"algorithm" -> "RSA/ECB/NoPadding",
"field" -> "body"
))
val decryptor = DecryptRsaPipe(config)
val result: Option[PMap] = decryptor.run(PMap(
"body" -> PString(body)
)).map(_.asInstanceOf[PMap])
assert(result.nonEmpty)
inside(result) {
case Some(PMap(map)) =>
map should contain key "body"
val body = map.get("body").get.asInstanceOf[PString].value
assert(body == rawBody)
}
}
}
| andr83/parsek | core/src/test/scala/com/github/andr83/parsek/pipe/DecryptRsaSpec.scala | Scala | mit | 1,684 |
package repository
import java.util.UUID
import models.{Task, User}
import org.specs2.specification.Scope
import play.api.test.PlaySpecification
import reactivemongo.api.indexes.IndexType
import reactivemongo.bson.BSONObjectID
import utils.StartedFakeApplication
class TaskRepositoryISpec extends PlaySpecification with StartedFakeApplication {
def randomString = UUID.randomUUID().toString
def randomUser = User(BSONObjectID.generate, randomString, None)
"User task repository" should {
"list tasks" in new TaskRepositoryTestCase {
// Given
implicit val user = randomUser
val first = Task(Some(BSONObjectID.generate), "first")
val second = Task(Some(BSONObjectID.generate), "second")
// And
val created2 = await(taskRepository.create(second))
val created1 = await(taskRepository.create(first))
// When
val list = await(taskRepository.findAll)
// Then
list must contain(created1, created2).inOrder.atMost
// Cleanup
await(taskRepository.delete(created1.id.get.stringify)) must be equalTo true
await(taskRepository.delete(created2.id.get.stringify)) must be equalTo true
}
"retrieve and delete a saved task" in new TaskRepositoryTestCase {
// Given
implicit val user = randomUser
val text = s"text-${UUID.randomUUID()}"
// When
val created = await(taskRepository.create(Task(None, text)))
// Then
val id = created.id.get.stringify
val found = await(taskRepository.find(id))
found.get must be equalTo created
// Cleanup
await(taskRepository.delete(id)) must be equalTo true
await(taskRepository.find(id)) must be equalTo None
await(taskRepository.delete(id)) must be equalTo false
}
"update a task" in new TaskRepositoryTestCase {
// Given
implicit val user = randomUser
val text = s"text-${UUID.randomUUID()}"
val created = await(taskRepository.create(Task(None, text)))
created.done must beFalse
// When
val update = created.copy(done = true)
await(taskRepository.update(update))
// Then
val id = created.id.get.stringify
val reRead = await(taskRepository.find(id))
reRead.get.done must beTrue
// Cleanup
await(taskRepository.delete(id)) must be equalTo true
}
"batch delete tasks" in new TaskRepositoryTestCase {
// Given
implicit val user = randomUser
val doneTask = Task(Some(BSONObjectID.generate), "done", done = true)
val notDoneTask = Task(Some(BSONObjectID.generate), "not-done", done = false)
// And
val createdDoneTask = await(taskRepository.create(doneTask))
val createdNotDoneTask = await(taskRepository.create(notDoneTask))
// And
val created = await(taskRepository.findAll)
created must contain(createdDoneTask, createdNotDoneTask)
// When
await(taskRepository.deleteDone)
// Then
val remaining = await(taskRepository.findAll)
remaining must contain(createdNotDoneTask).atMostOnce
remaining must not contain createdDoneTask
// Cleanup
await(taskRepository.delete(createdNotDoneTask.id.get.stringify)) must be equalTo true
}
"have an index on user" in new TaskRepositoryTestCase {
// Given
val repo = taskRepository
// When
val indexes = await(repo.indexes())
// Then
indexes.filter(_.key == Seq("user" -> IndexType.Ascending)) must not be empty
}
}
trait TaskRepositoryTestCase extends Scope {
val taskRepository = MongoTaskRepository
}
}
| timothygordon32/reactive-todolist | it/repository/TaskRepositoryISpec.scala | Scala | mit | 3,623 |
/*
* Copyright (c) 2015 Alexandros Pappas p_alx hotmail com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package gr.gnostix.freeswitch.actors
import akka.actor.{Props, ActorRef, Actor, ActorLogging}
import gr.gnostix.freeswitch.actors.ActorsProtocol._
import gr.gnostix.freeswitch.actors.ServletProtocol.{ApiReplyError, ApiReplyData, ApiReply}
import scala.collection.SortedMap
/**
* Created by rebel on 10/10/15.
*/
object DialCodesActor {
def props(dialCodes: Map[String, SortedMap[String, String]]): Props =
Props(new DialCodesActor(dialCodes: Map[String, SortedMap[String, String]]))
}
class DialCodesActor(dialCodes: Map[String, SortedMap[String, String]]) extends Actor with ActorLogging {
def idle(dialCodes: Map[String, SortedMap[String, String]]): Receive = {
case x@AddDialCodeList(filename, dialCodesS) =>
val newDialCodes = dialCodes + (x.fileName -> x.dialCodes)
//sender ! ApiReply(200, "DialCodes added successfully")
context become idle(newDialCodes)
case GetNumberDialCode(number) =>
val dialCodeCountry = dialCodes.last._2.par.filter(d => number.startsWith(d._1))
.toList.sortBy(_._1.length).lastOption
dialCodeCountry match {
case Some(dt) => sender ! NumberDialCodeCountry(number, Some(dt._1), Some(dt._2))
case None => sender ! NumberDialCodeCountry(number, None, None)
}
case x@DelDialCodeList(fileName) =>
dialCodes.size match {
case 1 => sender ! ApiReply(400, "We cannot remove the default list of DialCodes ")
case _ =>
val newMap = dialCodes.filterNot(_._1 == fileName)
context become idle(newMap)
sender ! ApiReply(200, s"DialCodes with filename $fileName, removed successfully")
}
case x@GetDialCodeList(fileName) =>
val dialC = dialCodes.get(fileName)
//log info s"-------> $dialC"
dialC match {
case Some(map) => sender ! dialC
case None => sender ! None
}
case x@GetAllDialCodeList =>
sender ! dialCodes.map(d => AllDialCodes(d._1, d._2.size))
}
def receive: Receive =
idle(dialCodes)
}
| gnostix/freeswitch-monitoring | src/main/scala/gr/gnostix/freeswitch/actors/DialCodesActor.scala | Scala | apache-2.0 | 2,687 |
/*
* Happy Melly Teller
* Copyright (C) 2013, Happy Melly http://www.happymelly.com
*
* This file is part of the Happy Melly Teller.
*
* Happy Melly Teller is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Happy Melly Teller is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Happy Melly Teller. If not, see <http://www.gnu.org/licenses/>.
*
* If you have questions concerning this license or the applicable additional terms, you may contact
* by email Sergey Kotlov, [email protected] or
* in writing Happy Melly One, Handelsplein 37, Rotterdam, The Netherlands, 3071 PR
*/
package controllers
import Forms._
import models.{ Person, Activity, Organisation }
import play.api.mvc._
import securesocial.core.{ SecuredRequest, SecureSocial }
import play.api.data._
import play.api.data.Forms._
import play.api.i18n.Messages
import org.joda.time.DateTime
import models.UserRole.Role._
import models.OrganisationCategory
import securesocial.core.SecuredRequest
import scala.Some
import play.api.data.format.Formatter
object Organisations extends Controller with Security {
/**
* Formatter used to define a form mapping for the `OrganisationCategory` enumeration.
*/
implicit def categoryFormat: Formatter[OrganisationCategory.Value] = new Formatter[OrganisationCategory.Value] {
def bind(key: String, data: Map[String, String]) = {
try {
data.get(key).map(OrganisationCategory.withName(_)).toRight(Seq.empty)
} catch {
case e: NoSuchElementException β Left(Seq(FormError(key, "error.invalid")))
}
}
def unbind(key: String, value: OrganisationCategory.Value) = Map(key -> value.toString)
}
val categoryMapping = of[OrganisationCategory.Value]
/**
* HTML form mapping for creating and editing.
*/
def organisationForm(implicit request: SecuredRequest[_]) = Form(mapping(
"id" -> ignored(Option.empty[Long]),
"name" -> nonEmptyText,
"street1" -> optional(text),
"street2" -> optional(text),
"city" -> optional(text),
"province" -> optional(text),
"postCode" -> optional(text),
"country" -> nonEmptyText,
"vatNumber" -> optional(text),
"registrationNumber" -> optional(text),
"category" -> optional(categoryMapping),
"webSite" -> optional(webUrl),
"blog" -> optional(webUrl),
"active" -> ignored(true),
"created" -> ignored(DateTime.now()),
"createdBy" -> ignored(request.user.fullName),
"updated" -> ignored(DateTime.now()),
"updatedBy" -> ignored(request.user.fullName))(Organisation.apply)(Organisation.unapply))
/**
* Form target for toggling whether an organisation is active.
*/
def activation(id: Long) = SecuredRestrictedAction(Editor) { implicit request β
implicit handler β
Organisation.find(id).map { organisation β
Form("active" -> boolean).bindFromRequest.fold(
form β {
BadRequest("invalid form data")
},
active β {
Organisation.activate(id, active)
val activity = Activity.insert(request.user.fullName, if (active) Activity.Predicate.Activated else Activity.Predicate.Deactivated, organisation.name)
Redirect(routes.Organisations.details(id)).flashing("success" -> activity.toString)
})
} getOrElse {
Redirect(routes.Organisations.index).flashing("error" -> Messages("error.notFound", Messages("models.Organisation")))
}
}
/**
* Create page.
*/
def add = SecuredRestrictedAction(Editor) { implicit request β
implicit handler β
Ok(views.html.organisation.form(request.user, None, organisationForm))
}
/**
* Create form submits to this action.
*/
def create = SecuredRestrictedAction(Editor) { implicit request β
implicit handler β
organisationForm.bindFromRequest.fold(
formWithErrors β
BadRequest(views.html.organisation.form(request.user, None, formWithErrors)),
organisation β {
val org = organisation.save
val activity = Activity.insert(request.user.fullName, Activity.Predicate.Created, organisation.name)
Redirect(routes.Organisations.index()).flashing("success" -> activity.toString)
})
}
/**
* Deletes an organisation.
* @param id Organisation ID
*/
def delete(id: Long) = SecuredRestrictedAction(Editor) { implicit request β
implicit handler β
Organisation.find(id).map {
organisation β
Organisation.delete(id)
val activity = Activity.insert(request.user.fullName, Activity.Predicate.Deleted, organisation.name)
Redirect(routes.Organisations.index).flashing("success" -> activity.toString)
}.getOrElse(NotFound)
}
/**
* Details page.
* @param id Organisation ID
*/
def details(id: Long) = SecuredRestrictedAction(Viewer) { implicit request β
implicit handler β
Organisation.find(id).map {
organisation β
val members = organisation.members
val otherPeople = Person.findActive.filterNot(person β members.contains(person))
Ok(views.html.organisation.details(request.user, organisation, members, otherPeople))
} getOrElse {
//TODO return 404
Redirect(routes.Organisations.index).flashing("error" -> Messages("error.notFound", Messages("models.Organisation")))
}
}
/**
* Edit page.
* @param id Organisation ID
*/
def edit(id: Long) = SecuredRestrictedAction(Editor) { implicit request β
implicit handler β
Organisation.find(id).map {
organisation β
Ok(views.html.organisation.form(request.user, Some(id), organisationForm.fill(organisation)))
}.getOrElse(NotFound)
}
/**
* List page.
*/
def index = SecuredRestrictedAction(Viewer) { implicit request β
implicit handler β
val organisations = Organisation.findAll
Ok(views.html.organisation.index(request.user, organisations))
}
/**
* Edit form submits to this action.
* @param id Organisation ID
*/
def update(id: Long) = SecuredRestrictedAction(Editor) { implicit request β
implicit handler β
organisationForm.bindFromRequest.fold(
formWithErrors β
BadRequest(views.html.organisation.form(request.user, Some(id), formWithErrors)),
organisation β {
organisation.copy(id = Some(id)).save
val activity = Activity.insert(request.user.fullName, Activity.Predicate.Updated, organisation.name)
Redirect(routes.Organisations.details(id)).flashing("success" -> activity.toString)
})
}
}
| cvogt/teller | app/controllers/Organisations.scala | Scala | gpl-3.0 | 7,112 |
/*
* Copyright 2011 Delving B.V.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package processors
import util.{ OrganizationConfigurationHandler, Logging }
import models.dos.{ Task }
import play.api.Play
import play.api.Play.current
import models.OrganizationConfiguration
/**
*
* @author Manuel Bernhardt <[email protected]>
*/
trait Processor extends Logging {
/**
* Does its thing given a path and optional parameters. The path may or may not exist on the file system.
*/
def process(task: Task, processorParams: Map[String, AnyRef] = Map.empty[String, AnyRef])(implicit configuration: OrganizationConfiguration)
def isImage(name: String) = name.contains(".") && !name.startsWith(".") && (
name.split("\\.").last.toLowerCase match {
case "jpg" | "tif" | "tiff" | "jpeg" => true
case _ => false
})
def parameterList(task: Task) = task.params.map(p => s"${p._1}:${p._2}").mkString(", ")
/** image name without extension **/
def getImageName(name: String) = if (name.indexOf(".") > 0) name.substring(0, name.lastIndexOf(".")) else name
protected def getStore(orgId: String) = {
import controllers.dos.fileStore
fileStore(OrganizationConfigurationHandler.getByOrgId(orgId))
}
} | delving/culture-hub | modules/dos/app/processors/Processor.scala | Scala | apache-2.0 | 1,765 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.