code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright (C) 2014 Szu-Hsien Lee ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.a30corner.twculture.server
import java.io.{File, Reader}
import scala.io.Source
import spray.json._
import scala.Predef._
import scala.Some
import scala.collection.{SeqView, Iterable, AbstractSeq}
case class Category(categoryCode: String, categoryName: String)
case class RawInfo(UID: String,
category: String,
title: String,
comment: String,
descriptionFilterHtml: String,
startDate: String,
endDate: String,
imageUrl: String,
masterUnit: List[String],
otherUnit: List[String],
showUnit: String,
subUnit: List[String],
supportUnit: List[String],
sourceWebName: String,
sourceWebPromote: String,
webSales: String,
discountInfo: String,
showInfo: Seq[ShowInfo])
case class Info(UID: String,
category: String,
title: String,
comment: Option[String],
description: Option[String],
startDate: String,
endDate: Option[String],
imageUrl: Option[String],
iconImage: Option[String],
masterUnit: String,
otherUnit: Option[String],
showUnit: Option[String],
subUnit: Option[String],
supportUnit: Option[String],
sourceWebName: String,
sourceWebPromote: Option[String],
webSales: Option[String],
discountInfo: Option[String],
locations: String,
showinfo: Seq[ShowInfo]
)
case class ShowInfo(
lat: Option[Double],
lon: Option[Double],
location: String,
locationName: String,
onSales: Boolean,
price: Option[String],
time: String
)
case class RawShowInfo(
lat: Option[Double],
lon: Option[Double],
location: String,
locationName: String,
onSales: Boolean,
price: Option[String],
time: String
)
object InfoJsonProtocol extends DefaultJsonProtocol {
import Purify._
implicit val f0 = jsonFormat2(Category)
implicit object ShowJsonFormat extends RootJsonFormat[ShowInfo] {
def write(p: ShowInfo) = JsObject(
"latitude" -> JsString(p.lat.getOrElse("").toString),
"longitude" -> JsString(p.lon.getOrElse("").toString),
"location" -> JsString(p.location),
"locationName" -> JsString(p.locationName),
"onSales" -> JsString(if (p.onSales) "Y" else "N"),
"price" -> JsString(p.price.getOrElse("")),
"time" -> JsString(p.time)
)
def read(value: JsValue) = {
value.asJsObject.getFields("latitude", "longitude", "location", "locationName", "onSales", "price", "time") match {
case Seq(JsString(lat), JsString(lon), JsString(loc), JsString(lname), JsString(sale), JsString(price), JsString(time)) =>
new ShowInfo(opt(lat, toDouble), opt(lon, toDouble), loc, lname, sale == "Y", opt(price), time)
case _ => throw new DeserializationException("ShowInfo expected")
}
}
}
implicit val f1 = jsonFormat18(RawInfo)
implicit val f2 = jsonFormat20(Info)
}
object OpenData {
import InfoJsonProtocol._
import Purify._
val host = "http://cloud.culture.tw"
val jsonpath = s"$host/frontsite/inquiry/queryAction.do"
val categories = s"$host/frontsite/trans/SearchShowAction.do?method=doFindAllTypeJ"
val efile = new File("extra.json")
def openData(code: String) = s"$host/frontsite/trans/SearchShowAction.do?method=doFindTypeJ&category=$code"
def fetch(url: String): Reader = {
println(s"fetch=> $url")
Source.fromURL(url).bufferedReader()
}
def fetchHtml(url: String): String =
Source.fromURL(url).mkString
def getCategories: Seq[Category] =
fetchHtml(categories).asJson.convertTo[Seq[Category]]
def getInfo(code: String, iconMap: Map[String, Data]): Seq[Info] =
fetchHtml(openData(code)).asJson.convertTo[Seq[RawInfo]] map (r => convert(r, iconMap))
}
private object Purify {
implicit def convert(raw: RawInfo, extraMap: Map[String, Data]): Info = {
//icon image is large, but in our json icon is for small
val image = opt(raw.imageUrl) match {
case d@Some(_) => d
case None => extraMap.get(raw.UID) match {
case Some(data) => data.iconImageUrl match {
case img@Some(_) => img
case None => opt(data.imageUrl)
}
case None => opt(raw.imageUrl)
}
}
var locations = extraMap.get(raw.UID) match {
case Some(x) => x.showInfoList.map(e =>
cityMap.getOrElse(e.cityId, location2Area(e.location))).
distinct.sorted.foldRight("")(_ + _)
case None => extractLocation(raw.showInfo)
}
//Just a workaround, we don't have cityId and can't parse area from address
//So just give a trail to master unit
if (locations.isEmpty && !raw.masterUnit.isEmpty) {
locations = location2Area(raw.masterUnit.mkString)
}
extraMap.get(raw.UID)
new Info(
UID = raw.UID,
category = raw.category,
title = raw.title,
comment = opt(raw.comment),
description = opt(raw.descriptionFilterHtml),
startDate = raw.startDate,
endDate = opt(raw.endDate),
imageUrl = image,
iconImage = extraMap.get(raw.UID) match {
case Some(data) => opt(data.imageUrl).orElse(data.iconImageUrl)
case None => image
},
masterUnit = raw.masterUnit.mkString("; "),
otherUnit = opt(purifyUnit(raw.otherUnit.mkString("; "))),
showUnit = opt(purifyUnit(raw.showUnit)),
subUnit = opt(purifyUnit(raw.subUnit.mkString("; "))),
supportUnit = opt(purifyUnit(raw.supportUnit.mkString("; "))),
sourceWebName = raw.sourceWebName,
sourceWebPromote = opt(raw.sourceWebPromote),
webSales = opt(raw.webSales),
discountInfo = opt(raw.discountInfo),
locations = locations.sortBy(cityOrderMap),
showinfo = reduceShowInfo(raw.showInfo)
)
}
def reduceShowInfo(infos: Seq[ShowInfo]): Seq[ShowInfo] = {
//TODO: performance....
infos.groupBy(s => (s.location, s.locationName, s.price)).map {
case (_, v) => v.reduce((a, b) => a.copy(time = a.time + ";" + b.time))
}.toSeq
}
def purifyUnit(s: String): String = {
s.replace("/中華民國", "")
}
def mergeDate(begin: String, end: Option[String]): String =
end match {
case Some(x) if x != begin => s"$begin ~ $x"
case _ => begin
}
val cityMap = Map(
1 -> "北", // "臺北市",
2 -> "基", // "基隆市",
3 -> "新", // "新北市",
4 -> "宜", // "宜蘭縣",
5 -> "桃", // "桃園縣",
6 -> "竹", // "新竹市",
7 -> "竹", // "新竹縣",
8 -> "苗", // "苗栗縣",
10 -> "中", // "臺中市",
11 -> "彰", // "彰化縣",
12 -> "投", // "南投縣",
13 -> "雲", // "雲林縣",
14 -> "嘉", // "嘉義縣",
15 -> "嘉", // "嘉義市",
16 -> "南", // "臺南市",
18 -> "高", // "高雄市",
20 -> "屏", // "屏東縣",
21 -> "澎", // "澎湖縣",
22 -> "花", // "花蓮縣",
23 -> "東", // "台東縣",
24 -> "金", // "金門縣",
25 -> "馬") // "連江縣")
val cityOrderMap = Map(
'新' -> 1,
'北' -> 2,
'基' -> 3,
'桃' -> 4,
'竹' -> 5,
'苗' -> 6,
'中' -> 7,
'彰' -> 8,
'投' -> 9,
'雲' -> 10,
'嘉' -> 11,
'南' -> 12,
'高' -> 13,
'屏' -> 14,
'宜' -> 15,
'花' -> 16,
'東' -> 17,
'澎' -> 18,
'金' -> 19,
'馬' -> 20)
val pattern = """((?:台|臺)(?:北|中|南|東)|(?!新|台|臺|竹)北市|新北|新竹|彰化|屏東|澎湖|嘉義|雲林|南投|苗栗|桃園|高雄|基隆|宜蘭|花蓮)""".r
val shortmap = Map[String, String](
"台南" -> "南", "臺南" -> "南",
"台北" -> "北", "臺北" -> "北", "北市" -> "北",
"台中" -> "中", "臺中" -> "中",
"台東" -> "東", "臺東" -> "東",
"新北" -> "新",
"新竹" -> "竹",
"彰化" -> "彰",
"屏東" -> "屏",
"澎湖" -> "澎",
"嘉義" -> "嘉",
"雲林" -> "雲",
"南投" -> "投",
"苗栗" -> "苗",
"桃園" -> "桃",
"高雄" -> "高",
"基隆" -> "基",
"宜蘭" -> "宜",
"花蓮" -> "花"
)
def extractLocation(ss: Seq[ShowInfo]): String =
ss.map(_.location).map(location2Area).distinct.sorted.foldRight("")(_ + _)
def location2Area(loc: String): String =
pattern.findFirstMatchIn(loc) match {
case Some(m) => shortmap(m.group(0))
case None => ""
}
def location2Area(loc: Option[String]): String = loc match {
case Some(x) => pattern.findFirstMatchIn(x) match {
case Some(m) => shortmap(m.group(0))
case None => ""
}
case None => ""
}
def opt(s: String): Option[String] =
if (s == null || s == "") None else Some(s)
def opt[T](s: Option[T]): Option[T] = s
def toDouble(s: String): Double = s.toDouble
def opt[T](s: String, f: String => T): Option[T] =
if (s == null || s == "") None else Some(f(s))
}
| misgod/twculture | server/src/main/scala/com/a30corner/twculture/server/OpenData.scala | Scala | apache-2.0 | 10,256 |
/*
* Copyright 2019 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package laws
import cats.laws._
import org.http4s.util.Renderer
trait HttpCodecLaws[A] {
implicit def C: HttpCodec[A]
def httpCodecRoundTrip(a: A): IsEq[ParseResult[A]] =
C.parse(Renderer.renderString(a)) <-> Right(a)
}
object HttpCodecLaws {
def apply[A](implicit httpCodecA: HttpCodec[A]): HttpCodecLaws[A] =
new HttpCodecLaws[A] {
val C = httpCodecA
}
}
| rossabaker/http4s | laws/src/main/scala/org/http4s/laws/HttpCodecLaws.scala | Scala | apache-2.0 | 999 |
/*
* Copyright 2017-2020 Aleksey Fomkin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package korolev.effect
import java.util.{Timer, TimerTask}
import korolev.effect.Effect.Promise
import korolev.effect.syntax._
import scala.collection.concurrent.TrieMap
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration
final class Scheduler[F[_]: Effect] {
import Scheduler._
private val timer = new Timer()
def schedule(delay: FiniteDuration): F[Stream[F, Unit]] = Effect[F].delay {
new Stream[F, Unit] {
var canceled = false
var cb: Either[Throwable, Option[Unit]] => Unit = _
var task: TimerTask = _
def pull(): F[Option[Unit]] = Effect[F].promise { cb =>
if (canceled) cb(Right(None)) else {
this.cb = cb
this.task = new TimerTask { def run(): Unit = cb(Right(Some(()))) }
timer.schedule(task, delay.toMillis)
}
}
def cancel(): F[Unit] = Effect[F].delay {
if (task != null) {
canceled = true
task.cancel()
task = null
cb(Right(None))
}
}
}
}
def sleep(delay: FiniteDuration): F[Unit] = Effect[F].promise { cb =>
val task: TimerTask = new TimerTask { def run(): Unit = cb(Right(())) }
timer.schedule(task, delay.toMillis)
}
def scheduleOnce[T](delay: FiniteDuration)(job: => F[T]): F[JobHandler[F, T]] =
Effect[F].delay(unsafeScheduleOnce(delay)(job))
def unsafeScheduleOnce[T](delay: FiniteDuration)(job: => F[T]): JobHandler[F, T] =
new JobHandler[F, T] {
@volatile private var completed: Either[Throwable, T] = _
@volatile private var promise: Promise[T] = _
private val task = new TimerTask {
def run(): Unit = {
job.runAsync { errorOrResult =>
if (promise != null) promise(errorOrResult)
else completed = errorOrResult
}
}
}
def result: F[T] = Effect[F].promise { cb =>
if (completed != null) cb(completed)
else promise = cb
}
def cancel(): F[Unit] = Effect[F].delay(unsafeCancel())
def unsafeCancel(): Unit = {
task.cancel()
()
}
timer.schedule(task, delay.toMillis)
}
}
object Scheduler {
private val cache = TrieMap.empty[AnyRef, Scheduler[List]]
implicit def schedulerF[F[_]: Effect]: Scheduler[F] = cache
.getOrElseUpdate(Effect[F], new Scheduler[F].asInstanceOf[Scheduler[List]])
.asInstanceOf[Scheduler[F]]
def apply[F[_]: Scheduler]: Scheduler[F] =
implicitly[Scheduler[F]]
trait JobHandler[F[_], T] {
def unsafeCancel(): Unit
def cancel(): F[Unit]
def result: F[T]
}
}
| fomkin/korolev | modules/effect/src/main/scala/korolev/effect/Scheduler.scala | Scala | apache-2.0 | 3,224 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.jsenv.test.kit
import scala.annotation.tailrec
import scala.concurrent.Promise
import scala.concurrent.duration.Deadline
import scala.util.Try
import java.nio.ByteBuffer
import java.nio.channels.{Channels, ReadableByteChannel}
import java.io.InputStream
import java.util.concurrent._
private[kit] final class IOReader {
private val executor = Executors.newSingleThreadExecutor()
private[this] var _closed = false
private[this] var _channel: ReadableByteChannel = _
private[this] val run = Promise[Unit]()
def read(len: Int, deadline: Deadline): ByteBuffer = {
val chan = try {
waitOnChannel(deadline)
} catch {
case t: TimeoutException =>
throw new TimeoutException("timed out waiting on run to call onOutputStream")
}
val task = executor.submit(
new Callable[ByteBuffer] {
def call(): ByteBuffer = readLoop(chan, ByteBuffer.allocate(len))
}
)
try {
task.get(millisLeft(deadline), TimeUnit.MILLISECONDS)
} catch {
case e: ExecutionException =>
throw e.getCause()
case e: CancellationException =>
throw new AssertionError("unexpected exception while running read task", e)
case e: InterruptedException =>
throw new AssertionError("unexpected exception while running read task", e)
case e: TimeoutException =>
task.cancel(true)
throw new TimeoutException("timed out reading from stream")
}
}
def onInputStream(in: InputStream): Unit = synchronized {
require(_channel == null, "onInputStream called twice")
if (_closed) {
in.close()
} else {
_channel = Channels.newChannel(in)
notifyAll()
}
}
def onRunComplete(t: Try[Unit]): Unit = synchronized {
run.complete(t)
notifyAll()
}
def close(): Unit = synchronized {
if (_channel != null)
_channel.close()
_closed = true
}
private def waitOnChannel(deadline: Deadline) = synchronized {
while (_channel == null && !run.isCompleted)
wait(millisLeft(deadline))
if (_channel == null) {
throw new AssertionError(
"run completed and did not call onOutputStream", runFailureCause())
}
_channel
}
private def runFailureCause() = {
require(run.isCompleted)
run.future.value.get.failed.getOrElse(null)
}
@tailrec
private def readLoop(chan: ReadableByteChannel, buf: ByteBuffer): buf.type = {
if (chan.read(buf) == -1) {
// If we have reached the end of the stream, we wait for completion of the
// run so we can report a potential failure as a cause.
synchronized {
while (!run.isCompleted)
wait()
}
throw new AssertionError("reached end of stream", runFailureCause())
} else if (buf.hasRemaining()) {
readLoop(chan, buf)
} else {
buf.flip()
buf
}
}
private def millisLeft(deadline: Deadline): Long = {
val millis = deadline.timeLeft.toMillis
if (millis <= 0) {
throw new TimeoutException
}
millis
}
}
| nicolasstucki/scala-js | js-envs-test-kit/src/main/scala/org/scalajs/jsenv/test/kit/IOReader.scala | Scala | apache-2.0 | 3,352 |
import play.api._
import com.mongodb.casbah.Imports._
import utils.Config
object Global extends GlobalSettings {
// Override onStart to create the TTL index on the verifiers collection
override def onStart(app: Application) {
val key = MongoDBObject("created-at" -> 1)
val option = MongoDBObject("expireAfterSeconds" -> 300)
global.db("verifiers").createIndex(key, option)
}
}
// global object to store app globals
package object global {
// App utility globals
val config = Config
val db = MongoClient("localhost", 27017)("whereu@")
// GCM-related constants
val GCM_RETRIES = 5
val GCM_TYPE_REQUEST = "AT_REQUEST"
val GCM_TYPE_RESPONSE = "AT_RESPONSE"
// JSON argument globals
val OS_IOS = "IOS"
val OS_ANDROID = "ANDROID"
// Miscellaneous globals
val EARTH_RADIUS = 6371
}
| whereuat/whereuat-server | app/Global.scala | Scala | apache-2.0 | 824 |
package io.eels.component.jdbc
import java.sql.Connection
import com.sksamuel.exts.Logging
import com.sksamuel.exts.jdbc.ResultSetIterator
import io.eels.Row
import io.eels.schema.StructType
class JdbcInserter(val connFn: () => Connection,
val table: String,
val schema: StructType,
val autoCommit: Boolean,
val dialect: JdbcDialect) extends Logging {
logger.debug("Connecting to JDBC to insert.. ..")
val conn = connFn()
conn.setAutoCommit(autoCommit)
logger.debug(s"Connected successfully; autoCommit=$autoCommit")
def insertBatch(batch: Seq[Row]): Unit = {
val stmt = conn.prepareStatement(dialect.insertQuery(schema, table))
try {
batch.foreach { row =>
row.values.zipWithIndex.foreach { case (value, k) =>
stmt.setObject(k + 1, value)
}
stmt.addBatch()
}
val result = stmt.executeBatch()
if (!autoCommit) conn.commit()
} catch {
case t: Throwable =>
logger.error("Batch failure", t)
if (!autoCommit)
conn.rollback()
throw t
} finally {
stmt.close()
}
}
def ensureTableCreated(): Unit = {
logger.info(s"Ensuring table [$table] is created")
def tableExists(): Boolean = {
logger.debug(s"Fetching list of tables to detect if $table exists")
val tables = ResultSetIterator.strings(conn.getMetaData.getTables(null, null, null, Array("TABLE"))).toList
val tableNames = tables.map(x => x(3).toLowerCase)
val exists = tableNames.contains(table.toLowerCase())
logger.debug(s"${tables.size} tables found; $table exists == $exists")
exists
}
if (!tableExists()) {
val sql = dialect.create(schema, table)
logger.info(s"Creating table $table [$sql]")
val stmt = conn.createStatement()
try {
stmt.executeUpdate(sql)
if (!autoCommit) conn.commit()
} catch {
case t: Throwable =>
logger.error("Batch failure", t)
if (!autoCommit)
conn.rollback()
throw t
} finally {
stmt.close()
}
}
}
} | stheppi/eel | eel-components/src/main/scala/io/eels/component/jdbc/JdbcInserter.scala | Scala | apache-2.0 | 2,171 |
package zeroformatter
import dog._
import scalaz.std.anyVal._
sealed trait TestADT extends Union[Int] with Product with Serializable
@ZeroFormattable
final case class Test0(@Index(0) a: Int) extends TestADT {
override val key = 1
}
@ZeroFormattable
final case class Test1(@Index(0) b: Int, @Index(1) c: Int) extends TestADT {
override val key = 2
}
object UnionFormatterTest extends Base {
val `serialize Union[Int, Test0]` = TestCase {
val value: TestADT = Test0(1)
val bytes =
Array(
0x18, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00,
0x10, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x14, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00
).map(_.toByte)
for {
_ <- assert.eq(bytes, ZeroFormatter.serialize(value)).lift
} yield (value, bytes)
}
val `deserialize Union[Int, Test0]` = TestCase {
for {
values <- `serialize Union[Int, Test0]`
_ <- assert.equal(values._1, ZeroFormatter.deserialize[TestADT](values._2)).lift
} yield ()
}
val `serialize Union[Int, Test1]` = TestCase {
val value: TestADT = Test1(2, 3)
val bytes =
Array(
0x20, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x18, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00,
0x18, 0x00, 0x00, 0x00,
0x1c, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x03, 0x00, 0x00, 0x00
).map(_.toByte)
for {
_ <- assert.eq(bytes, ZeroFormatter.serialize(value)).lift
} yield (value, bytes)
}
val `deserialize Union[Int, Test1]` = TestCase {
for {
values <- `serialize Union[Int, Test1]`
_ <- assert.equal(values._1, ZeroFormatter.deserialize[TestADT](values._2)).lift
} yield ()
}
}
| pocketberserker/scala-zero-formatter | zero-formatter/src/test/scala/zeroformatter/UnionFormatterTest.scala | Scala | mit | 1,766 |
package name.abhijitsarkar.akka
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import org.scalatest.{ FlatSpec, Matchers }
import scala.concurrent.{ Await, Future }
/**
* @author Abhijit Sarkar
*/
class PartialGraphSpec extends FlatSpec with Matchers {
"partial graph" should "pick the greatest of 3 values" in {
import PartialGraph._
implicit val system = ActorSystem("partial")
implicit val materializer = ActorMaterializer()
val max: Future[Int] = g.run
import scala.concurrent.duration._
Await.result(max, 300.millis) should equal(3)
}
}
| asarkar/akka | akka-streams-learning/random-examples/src/test/scala/name/abhijitsarkar/akka/PartialGraphSpec.scala | Scala | gpl-3.0 | 598 |
import edu.uta.diql._
import org.apache.spark._
import org.apache.spark.rdd._
import org.apache.spark.sql._
import org.apache.log4j._
import scala.util.Random
import org.apache.spark.api.java.JavaRDD
object StringMatch {
def main ( args: Array[String] ) {
val repeats = args(0).toInt
val length = args(1).toLong
val conf = new SparkConf().setAppName("StringMatch")
val sc = new SparkContext(conf)
val spark = SparkSession
.builder()
.config(conf)
.getOrCreate()
import spark.implicits._
conf.set("spark.logConf","false")
conf.set("spark.eventLog.enabled","false")
LogManager.getRootLogger().setLevel(Level.WARN)
val rand = new Random()
val max: Long = 100000 // 1000 different strings
val W = sc.parallelize(1L to length/100)
.flatMap{ i => (1 to 100).map{ j => "x%03d".format(Math.abs(rand.nextInt())%max) } }
.zipWithIndex.map{ case (line,i) => (i, line)}
.cache()
val keys = List("x100", "x200", "x300")
val K = keys.zipWithIndex.map{ case (line,i) => (i.toLong, line)}
val Kbr = sc.broadcast(K)
val K2 = sc.parallelize(K).collect()
val Wds = W.toDS()
val Kds = K.toDS()
Wds.createOrReplaceTempView("Wds")
Kds.createOrReplaceTempView("Kds")
val w = Wds.count()
val k = Kds.count()
val size = sizeof(1L, "abcd") // 4 chars + length
println("*** %d %.2f GB".format(length,length.toDouble*size/(1024.0*1024.0*1024.0)))
def test () {
var t: Long = System.currentTimeMillis()
try {
val R = W.map{case (k,v) => v}.filter(v => Kbr.value.map{case (k,v) => v}.contains(v))
println(R.count)
println("**** SparkRDD run time: "+(System.currentTimeMillis()-t)/1000.0+" secs")
} catch { case x: Throwable => println(x) }
t = System.currentTimeMillis()
try {
v(sc,"""
var C: vector[String] = vector();
for i = 0, w-1 do {
for j = 0, k-1 do
if (W[i] == K2[j])
C[j] := K2[j];
};
println(C.count);
""")
println("**** Diablo run time: "+(System.currentTimeMillis()-t)/1000.0+" secs")
} catch { case x: Throwable => println(x) }
t = System.currentTimeMillis()
try{
s(sc,"""
var C: vector[String] = vector();
for i = 0, w-1 do {
for j = 0, k-1 do
if (Wds[i] == Kds[j])
C[j] := Kds[j];
};
println(C.count);
""")
println("**** SQLGen run time: "+(System.currentTimeMillis()-t)/1000.0+" secs")
} catch { case x: Throwable => println(x) }
t = System.currentTimeMillis()
try{
var R = spark.sql("SELECT /*+ BROADCAST(Kds) */ Kds._2 FROM Wds JOIN Kds ON Wds._2 == Kds._2");
R.createOrReplaceTempView("R")
println(R.count())
println("**** SparkSQL run time: "+(System.currentTimeMillis()-t)/1000.0+" secs")
} catch { case x: Throwable => println(x) }
}
for ( i <- 1 to repeats )
test()
sc.stop()
}
}
| fegaras/DIQL | benchmarks/sqlgen/StringMatch.scala | Scala | apache-2.0 | 3,069 |
package mesosphere.marathon.api.v2.json
import java.lang.{ Double => JDouble, Integer => JInt }
import java.util.concurrent.TimeUnit.SECONDS
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
import com.fasterxml.jackson.core._
import com.fasterxml.jackson.databind.Module.SetupContext
import com.fasterxml.jackson.databind._
import com.fasterxml.jackson.databind.deser.Deserializers
import com.fasterxml.jackson.databind.ser.Serializers
import org.apache.mesos.{ Protos => mesos }
import mesosphere.marathon.Protos.{ Constraint, MarathonTask }
import mesosphere.marathon.api.v2._
import mesosphere.marathon.api.validation.FieldConstraints._
import mesosphere.marathon.health.HealthCheck
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state.{ Container, PathId, Timestamp, UpgradeStrategy }
import scala.collection.immutable.Seq
import scala.concurrent.duration.FiniteDuration
class MarathonModule extends Module {
import mesosphere.marathon.api.v2.json.MarathonModule._
private val constraintClass = classOf[Constraint]
private val marathonTaskClass = classOf[MarathonTask]
private val enrichedTaskClass = classOf[EnrichedTask]
private val timestampClass = classOf[Timestamp]
private val finiteDurationClass = classOf[FiniteDuration]
private val appUpdateClass = classOf[AppUpdate]
private val groupIdClass = classOf[PathId]
private val taskIdClass = classOf[mesos.TaskID]
def getModuleName: String = "MarathonModule"
def version(): Version = new Version(0, 0, 1, null, null, null)
def setupModule(context: SetupContext) {
context.addSerializers(new Serializers.Base {
override def findSerializer(config: SerializationConfig, javaType: JavaType,
beanDesc: BeanDescription): JsonSerializer[_] = {
def matches(clazz: Class[_]): Boolean = clazz isAssignableFrom javaType.getRawClass
if (matches(constraintClass)) ConstraintSerializer
else if (matches(marathonTaskClass)) MarathonTaskSerializer
else if (matches(enrichedTaskClass)) EnrichedTaskSerializer
else if (matches(timestampClass)) TimestampSerializer
else if (matches(finiteDurationClass)) FiniteDurationSerializer
else if (matches(groupIdClass)) PathIdSerializer
else if (matches(taskIdClass)) TaskIdSerializer
else null
}
})
context.addDeserializers(new Deserializers.Base {
override def findBeanDeserializer(javaType: JavaType, config: DeserializationConfig,
beanDesc: BeanDescription): JsonDeserializer[_] = {
def matches(clazz: Class[_]): Boolean = clazz isAssignableFrom javaType.getRawClass
if (matches(constraintClass)) ConstraintDeserializer
else if (matches(marathonTaskClass)) MarathonTaskDeserializer
else if (matches(timestampClass)) TimestampDeserializer
else if (matches(finiteDurationClass)) FiniteDurationDeserializer
else if (matches(appUpdateClass)) AppUpdateDeserializer
else if (matches(groupIdClass)) PathIdDeserializer
else if (matches(taskIdClass)) TaskIdDeserializer
else null
}
})
}
object ConstraintSerializer extends JsonSerializer[Constraint] {
def serialize(constraint: Constraint, jgen: JsonGenerator, provider: SerializerProvider) {
jgen.writeStartArray()
jgen.writeString(constraint.getField)
jgen.writeString(constraint.getOperator.toString)
if (constraint.hasValue) {
jgen.writeString(constraint.getValue)
}
jgen.writeEndArray()
}
}
object ConstraintDeserializer extends JsonDeserializer[Constraint] {
def deserialize(json: JsonParser, context: DeserializationContext): Constraint = {
val builder = Constraint.newBuilder
json.nextToken() // skip [
builder.setField(json.getText)
json.nextToken()
val operatorString = json.getText.toUpperCase
try {
builder.setOperator(Constraint.Operator.valueOf(operatorString))
}
catch {
case e: IllegalArgumentException =>
throw new JsonParseException(s"Invalid operator: '$operatorString'", json.getCurrentLocation)
}
json.nextToken()
if (json.getCurrentToken == JsonToken.VALUE_STRING) {
builder.setValue(json.getText)
json.nextToken()
}
builder.build
}
}
object TimestampSerializer extends JsonSerializer[Timestamp] {
def serialize(ts: Timestamp, jgen: JsonGenerator, provider: SerializerProvider) {
jgen.writeString(ts.toString)
}
}
object TimestampDeserializer extends JsonDeserializer[Timestamp] {
def deserialize(json: JsonParser, context: DeserializationContext): Timestamp =
Timestamp(json.getText)
}
// Note: loses sub-second resolution
object FiniteDurationSerializer extends JsonSerializer[FiniteDuration] {
def serialize(fd: FiniteDuration, jgen: JsonGenerator, provider: SerializerProvider) {
jgen.writeNumber(fd.toSeconds)
}
}
// Note: loses sub-second resolution
object FiniteDurationDeserializer extends JsonDeserializer[FiniteDuration] {
def deserialize(json: JsonParser, context: DeserializationContext): FiniteDuration =
FiniteDuration(json.getLongValue, SECONDS)
}
object MarathonTaskSerializer extends JsonSerializer[MarathonTask] {
def serialize(task: MarathonTask, jgen: JsonGenerator, provider: SerializerProvider) {
jgen.writeStartObject()
writeFieldValues(task, jgen, provider)
jgen.writeEndObject()
}
def writeFieldValues(task: MarathonTask, jgen: JsonGenerator, provider: SerializerProvider) {
val startedAt = task.getStartedAt
val stagedAt = task.getStagedAt
jgen.writeObjectField("id", task.getId)
jgen.writeObjectField("host", task.getHost)
jgen.writeObjectField("ports", task.getPortsList)
jgen.writeObjectField("startedAt", if (startedAt == 0) null else Timestamp(startedAt))
jgen.writeObjectField("stagedAt", if (stagedAt == 0) null else Timestamp(stagedAt))
jgen.writeObjectField("version", task.getVersion)
}
}
// TODO: handle fields!
// Currently there is no support for handling updates to task instances (CD)
object MarathonTaskDeserializer extends JsonDeserializer[MarathonTask] {
def deserialize(json: JsonParser, context: DeserializationContext): MarathonTask = {
MarathonTask.newBuilder.build
}
}
object EnrichedTaskSerializer extends JsonSerializer[EnrichedTask] {
def serialize(enriched: EnrichedTask, jgen: JsonGenerator, provider: SerializerProvider) {
jgen.writeStartObject()
jgen.writeObjectField("appId", enriched.appId)
MarathonTaskSerializer.writeFieldValues(enriched.task, jgen, provider)
if (enriched.servicePorts.nonEmpty) {
jgen.writeObjectField("servicePorts", enriched.servicePorts)
}
if (enriched.healthCheckResults.nonEmpty) {
jgen.writeObjectField("healthCheckResults", enriched.healthCheckResults)
}
jgen.writeEndObject()
}
}
object PathIdSerializer extends JsonSerializer[PathId] {
def serialize(id: PathId, jgen: JsonGenerator, provider: SerializerProvider) {
jgen.writeString(id.toString)
}
}
object PathIdDeserializer extends JsonDeserializer[PathId] {
def deserialize(json: JsonParser, context: DeserializationContext): PathId = {
val tree: JsonNode = json.getCodec.readTree(json)
tree.textValue().toPath
}
}
object TaskIdSerializer extends JsonSerializer[mesos.TaskID] {
def serialize(id: mesos.TaskID, jgen: JsonGenerator, provider: SerializerProvider) {
jgen.writeString(id.getValue)
}
}
object TaskIdDeserializer extends JsonDeserializer[mesos.TaskID] {
def deserialize(json: JsonParser, context: DeserializationContext): mesos.TaskID = {
val tree: JsonNode = json.getCodec.readTree(json)
mesos.TaskID.newBuilder.setValue(tree.textValue).build
}
}
object AppUpdateDeserializer extends JsonDeserializer[AppUpdate] {
override def deserialize(json: JsonParser, context: DeserializationContext): AppUpdate = {
val oc = json.getCodec
val tree: JsonNode = oc.readTree(json)
val containerDeserializer = context.findRootValueDeserializer(
context.constructType(classOf[Container])
).asInstanceOf[JsonDeserializer[Container]]
val emptyContainer = tree.has("container") && tree.get("container").isNull
val appUpdate =
tree.traverse(oc).readValueAs(classOf[AppUpdateBuilder]).build
if (emptyContainer)
appUpdate.copy(container = Some(Container.Empty))
else appUpdate
}
}
}
object MarathonModule {
// TODO: make @JsonDeserialize work on the 'container' field
// of the 'AppUpdate' class and remove this workaround.
@JsonIgnoreProperties(ignoreUnknown = true)
case class AppUpdateBuilder(
id: Option[PathId] = None, //needed for updates inside a group
cmd: Option[String] = None,
args: Option[Seq[String]] = None,
user: Option[String] = None,
env: Option[Map[String, String]] = None,
instances: Option[JInt] = None,
cpus: Option[JDouble] = None,
mem: Option[JDouble] = None,
disk: Option[JDouble] = None,
executor: Option[String] = None,
constraints: Option[Set[Constraint]] = None,
uris: Option[Seq[String]] = None,
storeUrls: Option[Seq[String]] = None,
@FieldPortsArray ports: Option[Seq[JInt]] = None,
requirePorts: Option[Boolean] = None,
@FieldJsonProperty("backoffSeconds") backoff: Option[FiniteDuration] = None,
backoffFactor: Option[JDouble] = None,
container: Option[Container] = None,
healthChecks: Option[Set[HealthCheck]] = None,
dependencies: Option[Set[PathId]] = None,
upgradeStrategy: Option[UpgradeStrategy] = None,
version: Option[Timestamp] = None) {
def build(): AppUpdate = AppUpdate(
id, cmd, args, user, env, instances, cpus, mem, disk, executor, constraints,
uris, storeUrls, ports, requirePorts, backoff, backoffFactor, container, healthChecks,
dependencies, upgradeStrategy, version
)
}
}
| sttts/marathon | src/main/scala/mesosphere/marathon/api/v2/json/MarathonModule.scala | Scala | apache-2.0 | 10,230 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils.serializer
import java.io.File
import java.lang.reflect.Modifier
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.serialization.Bigdl.{AttrValue, BigDLModule}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.tensor.{Tensor, TensorNumericMath}
import com.intel.analytics.bigdl.utils.BigDLSpecHelper
import com.intel.analytics.bigdl.utils.serializer.converters.DataConverter
import org.reflections.Reflections
import org.reflections.scanners.SubTypesScanner
import org.reflections.util.{ClasspathHelper, ConfigurationBuilder, FilterBuilder}
import collection.JavaConverters._
import scala.collection.mutable
import scala.reflect.ClassTag
import scala.reflect.runtime.universe
class SerializerSpec extends BigDLSpecHelper {
private val excluded = Set[String](
"com.intel.analytics.bigdl.nn.CellUnit",
"com.intel.analytics.bigdl.nn.tf.ControlDependency",
"com.intel.analytics.bigdl.utils.tf.AdapterForTest",
"com.intel.analytics.bigdl.utils.serializer.TestModule",
"com.intel.analytics.bigdl.utils.ExceptionTest",
"com.intel.analytics.bigdl.utils.serializer.SubModuleOne",
"com.intel.analytics.bigdl.utils.serializer.SubModuleTwo",
"com.intel.analytics.bigdl.nn.mkldnn.AvgPooling",
"com.intel.analytics.bigdl.nn.mkldnn.CAddTable",
"com.intel.analytics.bigdl.nn.mkldnn.ConcatTable",
"com.intel.analytics.bigdl.nn.mkldnn.DnnBase",
"com.intel.analytics.bigdl.nn.mkldnn.Identity",
"com.intel.analytics.bigdl.nn.mkldnn.Input",
"com.intel.analytics.bigdl.nn.mkldnn.JoinTable",
"com.intel.analytics.bigdl.nn.mkldnn.Linear",
"com.intel.analytics.bigdl.nn.mkldnn.LRN",
"com.intel.analytics.bigdl.nn.mkldnn.MaxPooling",
"com.intel.analytics.bigdl.nn.mkldnn.ReLU",
"com.intel.analytics.bigdl.nn.mkldnn.ReorderMemory",
"com.intel.analytics.bigdl.nn.mkldnn.SelectTable",
"com.intel.analytics.bigdl.nn.mkldnn.Sequential",
"com.intel.analytics.bigdl.nn.mkldnn.SoftMax",
"com.intel.analytics.bigdl.nn.mkldnn.SpatialBatchNormalization",
"com.intel.analytics.bigdl.nn.mkldnn.SpatialConvolution",
"com.intel.analytics.bigdl.nn.mkldnn.Dropout",
"com.intel.analytics.bigdl.nn.mkldnn.DnnGraph",
"com.intel.analytics.bigdl.nn.mkldnn.BlasWrapper",
"com.intel.analytics.bigdl.nn.mkldnn.Output",
"com.intel.analytics.bigdl.nn.mkldnn.InputWrapper",
"com.intel.analytics.bigdl.utils.intermediate.IRGraph",
"com.intel.analytics.bigdl.nn.mkldnn.RNN"
)
// Maybe one serial test class contains multiple module test
// Also keras layer main/test class mapping are weired
private val unRegularNameMapping = Map[String, String](
// Many to one mapping
"com.intel.analytics.bigdl.nn.ops.Enter" ->
"com.intel.analytics.bigdl.nn.ops.ControlOpsSerialTest",
"com.intel.analytics.bigdl.nn.tf.Enter" ->
"com.intel.analytics.bigdl.nn.tf.ControlOpsSerialTest",
"com.intel.analytics.bigdl.nn.ops.NextIteration" ->
"com.intel.analytics.bigdl.nn.ops.ControlOpsSerialTest",
"com.intel.analytics.bigdl.nn.tf.NextIteration" ->
"com.intel.analytics.bigdl.nn.tf.ControlOpsSerialTest",
"com.intel.analytics.bigdl.nn.ops.Exit" ->
"com.intel.analytics.bigdl.nn.ops.ControlOpsSerialTest",
"com.intel.analytics.bigdl.nn.tf.Exit" ->
"com.intel.analytics.bigdl.nn.tf.ControlOpsSerialTest",
"com.intel.analytics.bigdl.nn.ops.LoopCondition" ->
"com.intel.analytics.bigdl.nn.ops.ControlOpsSerialTest",
"com.intel.analytics.bigdl.nn.tf.LoopCondition" ->
"com.intel.analytics.bigdl.nn.tf.ControlOpsSerialTest",
"com.intel.analytics.bigdl.nn.ops.StackCreator" ->
"com.intel.analytics.bigdl.nn.ops.StackOpsSerialTest",
"com.intel.analytics.bigdl.nn.tf.StackCreator" ->
"com.intel.analytics.bigdl.nn.tf.StackOpsSerialTest",
"com.intel.analytics.bigdl.nn.ops.StackPush" ->
"com.intel.analytics.bigdl.nn.ops.StackOpsSerialTest",
"com.intel.analytics.bigdl.nn.tf.StackPush" ->
"com.intel.analytics.bigdl.nn.tf.StackOpsSerialTest",
"com.intel.analytics.bigdl.nn.ops.StackPop" ->
"com.intel.analytics.bigdl.nn.ops.StackOpsSerialTest",
"com.intel.analytics.bigdl.nn.tf.StackPop" ->
"com.intel.analytics.bigdl.nn.tf.StackOpsSerialTest",
"com.intel.analytics.bigdl.nn.ops.TensorArrayWrite" ->
"com.intel.analytics.bigdl.nn.ops.TensorArraySerialTest",
"com.intel.analytics.bigdl.nn.tf.TensorArrayWrite" ->
"com.intel.analytics.bigdl.nn.ops.TensorArraySerialTest",
"com.intel.analytics.bigdl.nn.ops.TensorArrayRead" ->
"com.intel.analytics.bigdl.nn.ops.TensorArraySerialTest",
"com.intel.analytics.bigdl.nn.tf.TensorArrayRead" ->
"com.intel.analytics.bigdl.nn.ops.TensorArraySerialTest",
"com.intel.analytics.bigdl.nn.ops.TensorArrayGrad" ->
"com.intel.analytics.bigdl.nn.ops.TensorArraySerialTest",
"com.intel.analytics.bigdl.nn.tf.TensorArrayGrad" ->
"com.intel.analytics.bigdl.nn.ops.TensorArraySerialTest",
"com.intel.analytics.bigdl.nn.tf.TensorArrayCreator" ->
"com.intel.analytics.bigdl.nn.ops.TensorArrayScatterSerialTest",
"com.intel.analytics.bigdl.nn.tf.TensorArrayScatter" ->
"com.intel.analytics.bigdl.nn.ops.TensorArrayScatterSerialTest",
"com.intel.analytics.bigdl.nn.tf.TensorArrayGather" ->
"com.intel.analytics.bigdl.nn.ops.TensorArrayScatterSerialTest",
"com.intel.analytics.bigdl.nn.tf.TensorArrayClose" ->
"com.intel.analytics.bigdl.nn.ops.TensorArrayScatterSerialTest",
"com.intel.analytics.bigdl.nn.tf.TensorArrayConcat" ->
"com.intel.analytics.bigdl.nn.ops.TensorArraySplitSerialTest",
"com.intel.analytics.bigdl.nn.tf.TensorArraySplit" ->
"com.intel.analytics.bigdl.nn.ops.TensorArraySplitSerialTest",
"com.intel.analytics.bigdl.nn.tf.TensorArraySize" ->
"com.intel.analytics.bigdl.nn.ops.TensorArraySplitSerialTest",
// Keras layers
"com.intel.analytics.bigdl.nn.keras.Input" ->
"com.intel.analytics.bigdl.keras.nn.InputSerialTest",
"com.intel.analytics.bigdl.nn.keras.Sequential" ->
"com.intel.analytics.bigdl.keras.nn.SequentialSerialTest",
"com.intel.analytics.bigdl.nn.keras.Activation" ->
"com.intel.analytics.bigdl.keras.nn.ActivationSerialTest",
"com.intel.analytics.bigdl.nn.keras.SoftMax" ->
"com.intel.analytics.bigdl.keras.nn.SoftMaxSerialTest",
"com.intel.analytics.bigdl.nn.keras.AtrousConvolution1D" ->
"com.intel.analytics.bigdl.keras.nn.AtrousConvolution1DSerialTest",
"com.intel.analytics.bigdl.nn.keras.AtrousConvolution2D" ->
"com.intel.analytics.bigdl.keras.nn.AtrousConvolution2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.AveragePooling1D" ->
"com.intel.analytics.bigdl.keras.nn.AveragePooling1DSerialTest",
"com.intel.analytics.bigdl.nn.keras.AveragePooling2D" ->
"com.intel.analytics.bigdl.keras.nn.AveragePooling2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.AveragePooling3D" ->
"com.intel.analytics.bigdl.keras.nn.AveragePooling3DSerialTest",
"com.intel.analytics.bigdl.nn.keras.BatchNormalization" ->
"com.intel.analytics.bigdl.keras.nn.BatchNormalizationSerialTest",
"com.intel.analytics.bigdl.nn.keras.Bidirectional" ->
"com.intel.analytics.bigdl.keras.nn.BidirectionalSerialTest",
"com.intel.analytics.bigdl.nn.keras.ConvLSTM2D" ->
"com.intel.analytics.bigdl.keras.nn.ConvLSTM2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.Convolution1D" ->
"com.intel.analytics.bigdl.keras.nn.Convolution1DSerialTest",
"com.intel.analytics.bigdl.nn.keras.Convolution2D" ->
"com.intel.analytics.bigdl.keras.nn.Convolution2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.Convolution3D" ->
"com.intel.analytics.bigdl.keras.nn.Convolution3DSerialTest",
"com.intel.analytics.bigdl.nn.keras.Cropping1D" ->
"com.intel.analytics.bigdl.keras.nn.Cropping1DSerialTest",
"com.intel.analytics.bigdl.nn.keras.Cropping2D" ->
"com.intel.analytics.bigdl.keras.nn.Cropping2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.Deconvolution2D" ->
"com.intel.analytics.bigdl.keras.nn.Deconvolution2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.ELU" ->
"com.intel.analytics.bigdl.keras.nn.ELUSerialTest",
"com.intel.analytics.bigdl.nn.keras.Embedding" ->
"com.intel.analytics.bigdl.keras.nn.EmbeddingSerialTest",
"com.intel.analytics.bigdl.nn.keras.GaussianDropout" ->
"com.intel.analytics.bigdl.keras.nn.GaussianDropoutSerialTest",
"com.intel.analytics.bigdl.nn.keras.GaussianNoise" ->
"com.intel.analytics.bigdl.keras.nn.GaussianNoiseSerialTest",
"com.intel.analytics.bigdl.nn.keras.GlobalAveragePooling2D" ->
"com.intel.analytics.bigdl.keras.nn.GlobalAveragePooling2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.GlobalMaxPooling2D" ->
"com.intel.analytics.bigdl.keras.nn.GlobalMaxPooling2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.GlobalMaxPooling3D" ->
"com.intel.analytics.bigdl.keras.nn.GlobalMaxPooling3DSerialTest",
"com.intel.analytics.bigdl.nn.keras.GRU" ->
"com.intel.analytics.bigdl.keras.nn.GRUSerialTest",
"com.intel.analytics.bigdl.nn.keras.Highway" ->
"com.intel.analytics.bigdl.keras.nn.HighwaySerialTest",
"com.intel.analytics.bigdl.nn.keras.LeakyReLU" ->
"com.intel.analytics.bigdl.keras.nn.LeakyReLUSerialTest",
"com.intel.analytics.bigdl.nn.keras.LocallyConnected1D" ->
"com.intel.analytics.bigdl.keras.nn.LocallyConnected1DSerialTest",
"com.intel.analytics.bigdl.nn.keras.LocallyConnected2D" ->
"com.intel.analytics.bigdl.keras.nn.LocallyConnected2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.LSTM" ->
"com.intel.analytics.bigdl.keras.nn.LSTMSerialTest",
"com.intel.analytics.bigdl.nn.keras.Masking" ->
"com.intel.analytics.bigdl.keras.nn.MaskingSerialTest",
"com.intel.analytics.bigdl.nn.keras.MaxoutDense" ->
"com.intel.analytics.bigdl.keras.nn.MaxoutDenseSerialTest",
"com.intel.analytics.bigdl.nn.keras.MaxPooling1D" ->
"com.intel.analytics.bigdl.keras.nn.MaxPooling1DSerialTest",
"com.intel.analytics.bigdl.nn.keras.MaxPooling2D" ->
"com.intel.analytics.bigdl.keras.nn.MaxPooling2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.MaxPooling3D" ->
"com.intel.analytics.bigdl.keras.nn.MaxPooling3DSerialTest",
"com.intel.analytics.bigdl.nn.keras.Merge" ->
"com.intel.analytics.bigdl.keras.nn.MergeSerialTest",
"com.intel.analytics.bigdl.nn.keras.RepeatVector" ->
"com.intel.analytics.bigdl.keras.nn.RepeatVectorSerialTest",
"com.intel.analytics.bigdl.nn.keras.SeparableConvolution2D" ->
"com.intel.analytics.bigdl.keras.nn.SeparableConvolution2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.SimpleRNN" ->
"com.intel.analytics.bigdl.keras.nn.SimpleRNNSerialTest",
"com.intel.analytics.bigdl.nn.keras.SpatialDropout1D" ->
"com.intel.analytics.bigdl.keras.nn.SpatialDropout1DSerialTest",
"com.intel.analytics.bigdl.nn.keras.SpatialDropout2D" ->
"com.intel.analytics.bigdl.keras.nn.SpatialDropout2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.SpatialDropout3D" ->
"com.intel.analytics.bigdl.keras.nn.SpatialDropout3DSerialTest",
"com.intel.analytics.bigdl.nn.keras.SReLU" ->
"com.intel.analytics.bigdl.keras.nn.SReLUSerialTest",
"com.intel.analytics.bigdl.nn.keras.ThresholdedReLU" ->
"com.intel.analytics.bigdl.keras.nn.ThresholdedReLUSerialTest",
"com.intel.analytics.bigdl.nn.keras.TimeDistributed" ->
"com.intel.analytics.bigdl.keras.nn.TimeDistributedSerialTest",
"com.intel.analytics.bigdl.nn.keras.UpSampling1D" ->
"com.intel.analytics.bigdl.keras.nn.UpSampling1DSerialTest",
"com.intel.analytics.bigdl.nn.keras.UpSampling2D" ->
"com.intel.analytics.bigdl.keras.nn.UpSampling2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.UpSampling3D" ->
"com.intel.analytics.bigdl.keras.nn.UpSampling3DSerialTest",
"com.intel.analytics.bigdl.nn.keras.ZeroPadding1D" ->
"com.intel.analytics.bigdl.keras.nn.ZeroPadding1DSerialTest",
"com.intel.analytics.bigdl.nn.keras.ZeroPadding2D" ->
"com.intel.analytics.bigdl.keras.nn.ZeroPadding2DSerialTest",
"com.intel.analytics.bigdl.nn.keras.Dense" ->
"com.intel.analytics.bigdl.keras.nn.DenseSerialTest",
"com.intel.analytics.bigdl.nn.keras.Cropping3D" ->
"com.intel.analytics.bigdl.keras.nn.Cropping3DSerialTest",
"com.intel.analytics.bigdl.nn.keras.Reshape" ->
"com.intel.analytics.bigdl.keras.nn.ReshapeSerialTest",
"com.intel.analytics.bigdl.nn.keras.Permute" ->
"com.intel.analytics.bigdl.keras.nn.PermuteSerialTest",
"com.intel.analytics.bigdl.nn.keras.Model" ->
"com.intel.analytics.bigdl.keras.nn.ModelSerialTest",
"com.intel.analytics.bigdl.nn.keras.GlobalAveragePooling3D" ->
"com.intel.analytics.bigdl.keras.nn.GlobalAveragePooling3DSerialTest",
"com.intel.analytics.bigdl.nn.keras.GlobalAveragePooling1D" ->
"com.intel.analytics.bigdl.keras.nn.GlobalAveragePooling1DSerialTest",
"com.intel.analytics.bigdl.nn.keras.ZeroPadding3D" ->
"com.intel.analytics.bigdl.keras.nn.ZeroPadding3DSerialTest",
"com.intel.analytics.bigdl.nn.keras.Dropout" ->
"com.intel.analytics.bigdl.keras.nn.DropoutSerialTest",
"com.intel.analytics.bigdl.nn.keras.GlobalMaxPooling1D" ->
"com.intel.analytics.bigdl.keras.nn.GlobalMaxPooling1DSerialTest",
"com.intel.analytics.bigdl.nn.keras.Flatten" ->
"com.intel.analytics.bigdl.keras.nn.FlattenSerialTest",
"com.intel.analytics.bigdl.nn.keras.KerasIdentityWrapper" ->
"com.intel.analytics.bigdl.keras.nn.KerasIdentityWrapperSerialTest",
"com.intel.analytics.bigdl.nn.keras.KerasLayerWrapper" ->
"com.intel.analytics.bigdl.keras.nn.KerasLayerWrapperSerialTest"
)
private val suffix = "SerialTest"
private val testClasses = new mutable.HashSet[String]()
{
val filterBuilder = new FilterBuilder()
val reflections = new Reflections(new ConfigurationBuilder()
.filterInputsBy(filterBuilder)
.setUrls(ClasspathHelper.forPackage("com.intel.analytics.bigdl.nn"))
.setScanners(new SubTypesScanner()))
val subTypes = reflections.getSubTypesOf(classOf[AbstractModule[_, _, _]])
.asScala.filter(sub => !Modifier.isAbstract(sub.getModifiers))
.filter(sub => !excluded.contains(sub.getName))
subTypes.foreach(sub => testClasses.add(sub.getName))
}
private def getTestClassName(clsName: String): String = {
if (unRegularNameMapping.contains(clsName)) {
unRegularNameMapping(clsName)
} else {
clsName + suffix
}
}
testClasses.foreach(cls => {
"Serialization test of module " + cls should "be correct" in {
val clsWholeName = getTestClassName(cls)
try {
val ins = Class.forName(clsWholeName)
val testClass = ins.getConstructors()(0).newInstance()
require(testClass.isInstanceOf[ModuleSerializationTest], s"$clsWholeName should be a " +
s"subclass of com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest")
testClass.asInstanceOf[ModuleSerializationTest].test()
} catch {
case t: Throwable => throw t
}
}
})
"Group serializer" should "work properly" in {
ModuleSerializer.
registerGroupModules("com.intel.analytics.bigdl.utils.serializer.ParentModule",
ParentModuleSerializer)
val subOne = new SubModuleOne[Float]()
val subTwo = new SubModuleTwo[Float]()
val serFileOne = File.createTempFile("SubOne", "bigdl")
val serFileTwo = File.createTempFile("SubTwo", "bigdl")
subOne.saveModule(serFileOne.getAbsolutePath, overWrite = true)
subTwo.saveModule(serFileTwo.getAbsolutePath, overWrite = true)
val loadedOne = Module.loadModule[Float](serFileOne.getAbsolutePath).
asInstanceOf[SubModuleOne[Float]]
val loadedTwo = Module.loadModule[Float](serFileTwo.getAbsolutePath).
asInstanceOf[SubModuleTwo[Float]]
loadedOne.value should be ("test_value")
loadedTwo.value should be ("test_value")
}
}
abstract class ParentModule[T: ClassTag](implicit ev: TensorNumeric[T]) extends
AbstractModule[Tensor[T], Tensor[T], T] {
override def updateOutput(input: Tensor[T]): Tensor[T] = {
null
}
override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
null
}
var value : String = null
}
class SubModuleOne[T: ClassTag](implicit ev: TensorNumeric[T]) extends ParentModule[T] {
}
class SubModuleTwo[T: ClassTag](implicit ev: TensorNumeric[T]) extends ParentModule[T] {
}
object ParentModuleSerializer extends ModuleSerializable {
override def doSerializeModule[T: ClassTag](context: SerializeContext[T],
bigDLModelBuilder: BigDLModule.Builder)(implicit ev: TensorNumericMath.TensorNumeric[T]): Unit = {
val groupTypeAttrValue = AttrValue.newBuilder
DataConverter.setAttributeValue[T](context, groupTypeAttrValue,
"test_value", universe.typeOf[String])
bigDLModelBuilder.putAttr("groupValue", groupTypeAttrValue.build)
}
override def doLoadModule[T: ClassTag](context: DeserializeContext)
(implicit ev: TensorNumericMath.TensorNumeric[T]): AbstractModule[Activity, Activity, T] = {
val module = super.doLoadModule(context).asInstanceOf[ParentModule[T]]
val attrMap = context.bigdlModule.getAttrMap
val valueAttr = attrMap.get("groupValue")
val value = DataConverter.getAttributeValue(context, valueAttr).
asInstanceOf[String]
module.value = value
module
}
}
private[bigdl] abstract class ModuleSerializationTest extends SerializerSpecHelper {
def test(): Unit
}
| wzhongyuan/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/utils/serializer/SerializerSpec.scala | Scala | apache-2.0 | 18,579 |
// Copyright 2017 EPFL DATA Lab (data.epfl.ch)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package squid
package ir
import ir._
class BindingNormalizerTests extends MyFunSuite(BindingNormalizerTests) {
import DSL.Predef._
val t = code"42"
val u = code"val a = 1; val b = 2; a + b"
val v = code"val a = 1; val b = 2; val c = 3; a + b + c"
val w = code"val a = 1; val b = 2; val c = 3; val d = 4; val e = 5; a + b + c + d + e"
test("Normalization of curried applications") {
t eqt code"lib.uncurried0(42)()"
u eqt code"((a: Int) => (b: Int) => a + b)(1)(2)"
v eqt code"((a: Int) => (b: Int) => (c: Int) => a + b + c)(1)(2)(3)"
w eqt code"((a: Int) => (b: Int) => (c: Int) => (d: Int) => (e: Int) => a + b + c + d + e)(1)(2)(3)(4)(5)"
//println(ir"var x = 0; { val y = x; println(y) }; x") // TODO block normalization
}
test("Currying of applications") {
t eqt code"(() => 42)()"
u eqt code"((a: Int, b: Int) => a + b)(1, 2)"
v eqt code"((a: Int, b: Int, c: Int) => a + b + c)(1, 2, 3)"
w eqt code"((a: Int) => (b: Int) => (c: Int) => (d: Int) => (e: Int) => a + b + c + d + e)(1)(2)(3)(4)(5)"
}
}
object BindingNormalizerTests extends SimpleAST with OnlineOptimizer with BindingNormalizer
| epfldata/squid | src/test/scala/squid/ir/BindingNormalizerTests.scala | Scala | apache-2.0 | 1,825 |
/*
* Copyright (C) 2017 Radicalbit
*
* This file is part of flink-JPMML
*
* flink-JPMML is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* flink-JPMML is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with flink-JPMML. If not, see <http://www.gnu.org/licenses/>.
*/
package io.radicalbit.flink.pmml
import io.radicalbit.flink.pmml.scala.api.PmmlModel
import io.radicalbit.flink.pmml.scala.api.functions.{EvaluationCoFunction, EvaluationFunction}
import io.radicalbit.flink.pmml.scala.api.reader.ModelReader
import io.radicalbit.flink.pmml.scala.models.control.ServingMessage
import io.radicalbit.flink.pmml.scala.models.input.BaseEvent
import io.radicalbit.flink.pmml.scala.models.prediction.Prediction
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.ml.math.Vector
import org.apache.flink.streaming.api.functions.co.CoProcessFunction
import org.apache.flink.streaming.api.scala._
import org.apache.flink.util.Collector
import _root_.scala.reflect.ClassTag
/** Main library package, it contains the core of the library.
*
* The `scala` package object provides implicit classes enriching Flink
* [[org.apache.flink.streaming.api.scala.DataStream]] in order to compute evaluations against streams.
*
*/
package object scala {
/** Enriches Flink [[org.apache.flink.streaming.api.scala.DataStream]] with [[evaluate]] method, as
*
* {{{
* case class Input(values: Seq[Double])
* val inputStream = env.fromCollection(Seq(Input(Seq(1.0)), Input(Seq(3.0)))
* inputStream.evaluate(reader) { (event, model) =>
* val prediction = model.predict(event.toVector)
* prediction.value
* }
* }}}
*
* @param stream The input stream
* @tparam T The input stream inner Type
*/
implicit class RichDataStream[T: TypeInformation: ClassTag](stream: DataStream[T]) {
/**
* It connects the main `DataStream` with the `ControlStream`
*/
def withSupportStream[CTRL <: ServingMessage: TypeInformation](
supportStream: DataStream[CTRL]): ConnectedStreams[T, CTRL] =
stream.connect(supportStream.broadcast)
/** It evaluates the `DataStream` against the model pointed out by
* [[io.radicalbit.flink.pmml.scala.api.reader.ModelReader]]; it takes as input an UDF `(T, PmmlModel) => R)` .
* It's modeled on top of `EvaluationFunction`.
*
* @param modelReader the [[io.radicalbit.flink.pmml.scala.api.reader.ModelReader]] instance
* @param f UDF function
* @tparam R The output type
* @return `R`
*/
def evaluate[R: TypeInformation](modelReader: ModelReader)(f: (T, PmmlModel) => R): DataStream[R] = {
val abstractOperator = new EvaluationFunction[T, R](modelReader) {
override def flatMap(value: T, out: Collector[R]): Unit = out.collect(f(value, evaluator))
}
stream.flatMap(abstractOperator)
}
}
/**
* It wraps the connected `<event,model>` stream and provides the evaluate function.
*
* @param connectedStream the connected stream: it chains the event Stream and the models control Stream
* @tparam T Type information relative to the main event stream
*/
implicit class RichConnectedStream[T <: BaseEvent: TypeInformation: ClassTag, CTRL <: ServingMessage](
connectedStream: ConnectedStreams[T, CTRL]) {
/**
* It provides the evaluation function by applying
* [[io.radicalbit.flink.pmml.scala.api.functions.EvaluationCoFunction]] to the connected streams.
*
* The first flatMap handles the event stream and applies the UDF (i.e. executing the punctual prediction)
* The second flatMap handles models control stream and records the information relative to current model
* and update the model instance
*
* @param f UDF for prediction manipulation and pre/post-processing logic
* @tparam R UDF return type
* @return The prediction output as defined by the UDF
*/
def evaluate[R: TypeInformation](f: (T, PmmlModel) => R): DataStream[R] = {
val abstractOperator = new EvaluationCoFunction[T, CTRL, R] {
override def processElement1(event: T, ctx: CoProcessFunction[T, CTRL, R]#Context, out: Collector[R]): Unit = {
val model = servingModels.getOrElse(event.modelId.hashCode, fromMetadata(event.modelId))
out.collect(f(event, model))
}
}
connectedStream.process(abstractOperator)
}
}
/** Enriches Flink DataStream with [[evaluate]] on
* [[https://ci.apache.org/projects/flink/flink-docs-release-1.2/dev/libs/ml/index.html FlinkML]]
* [[org.apache.flink.ml.math.Vector]] input stream
*
* @param stream The input stream
* @tparam V The input stream inner type; it is subclass of [[org.apache.flink.ml.math.Vector]]
*/
implicit class QuickDataStream[V <: Vector: TypeInformation: ClassTag](stream: DataStream[V]) {
/** Evaluates the `DataStream` against PmmlModel by invoking [[RichDataStream]] `evaluate` method.
* It returns directly the prediction along with the input vector.
*
* @param modelReader The reader instance coupled to model source path.
* @return (Prediction, V)
*/
def quickEvaluate(modelReader: ModelReader): DataStream[(Prediction, V)] =
new RichDataStream[V](stream).evaluate(modelReader) { (vec, model) =>
val result: Prediction = model.predict(vec, None)
(result, vec)
}
}
}
| francescofrontera/flink-jpmml | flink-jpmml-scala/src/main/scala/io/radicalbit/flink/pmml/scala/package.scala | Scala | agpl-3.0 | 5,956 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import scala.collection.immutable.ListSet
import java.util.ConcurrentModificationException
import java.util.concurrent.atomic.AtomicReference
import org.scalatest.StackDepthExceptionHelper.getStackDepth
import org.scalatest.events._
import Suite.anErrorThatShouldCauseAnAbort
/**
* A suite of tests in which each test is represented as a function value. The “<code>Fun</code>” in <code>FunSuite</code> stands
* for “function.” Here's an example <code>FunSuite</code>:
*
* <pre>
* import org.scalatest.FunSuite
*
* class MySuite extends FunSuite {
*
* test("addition") {
* val sum = 1 + 1
* assert(sum === 2)
* assert(sum + 2 === 4)
* }
*
* test("subtraction") {
* val diff = 4 - 1
* assert(diff === 3)
* assert(diff - 2 === 1)
* }
* }
* </pre>
*
* <p>
* “<code>test</code>” is a method, defined in <code>FunSuite</code>, which will be invoked
* by the primary constructor of <code>MySuite</code>. You specify the name of the test as
* a string between the parentheses, and the test code itself between curly braces.
* The test code is a function passed as a by-name parameter to <code>test</code>, which registers
* it for later execution. One benefit of <code>FunSuite</code> compared to <code>Suite</code> is you need not name all your
* tests starting with “<code>test</code>.” In addition, you can more easily give long names to
* your tests, because you need not encode them in camel case, as you must do
* with test methods.
* </p>
*
* <p>
* A <code>FunSuite</code>'s lifecycle has two phases: the <em>registration</em> phase and the
* <em>ready</em> phase. It starts in registration phase and enters ready phase the first time
* <code>run</code> is called on it. It then remains in ready phase for the remainder of its lifetime.
* </p>
*
* <p>
* Tests can only be registered with the <code>test</code> method while the <code>FunSuite</code> is
* in its registration phase. Any attempt to register a test after the <code>FunSuite</code> has
* entered its ready phase, <em>i.e.</em>, after <code>run</code> has been invoked on the <code>FunSuite</code>,
* will be met with a thrown <code>TestRegistrationClosedException</code>. The recommended style
* of using <code>FunSuite</code> is to register tests during object construction as is done in all
* the examples shown here. If you keep to the recommended style, you should never see a
* <code>TestRegistrationClosedException</code>.
* </p>
*
* <p>
* <strong>Shared fixtures</strong>
* </p>
*
* <p>
* A test <em>fixture</em> is objects or other artifacts (such as files, sockets, database
* connections, etc.) used by tests to do their work. You can use fixtures in
* <code>FunSuite</code>s with the same approaches suggested for <code>Suite</code> in
* its documentation. The same text that appears in the test fixture
* section of <code>Suite</code>'s documentation is repeated here, with examples changed from
* <code>Suite</code> to <code>FunSuite</code>.
* </p>
*
* <p>
* If a fixture is used by only one test, then the definitions of the fixture objects can
* be local to the test function, such as the objects assigned to <code>sum</code> and <code>diff</code> in the
* previous <code>MySuite</code> examples. If multiple tests need to share a fixture, the best approach
* is to assign them to instance variables. Here's a (very contrived) example, in which the object assigned
* to <code>shared</code> is used by multiple test functions:
* </p>
*
* <pre>
* import org.scalatest.FunSuite
*
* class MySuite extends FunSuite {
*
* // Sharing immutable fixture objects via instance variables
* val shared = 5
*
* test("addition") {
* val sum = 2 + 3
* assert(sum === shared)
* }
*
* test("subtraction") {
* val diff = 7 - 2
* assert(diff === shared)
* }
* }
* </pre>
*
* <p>
* In some cases, however, shared <em>mutable</em> fixture objects may be changed by tests such that
* they need to be recreated or reinitialized before each test. Shared resources such
* as files or database connections may also need to be created and initialized before,
* and cleaned up after, each test. JUnit offers methods <code>setUp</code> and
* <code>tearDown</code> for this purpose. In ScalaTest, you can use the <code>BeforeAndAfterEach</code> trait,
* which will be described later, to implement an approach similar to JUnit's <code>setUp</code>
* and <code>tearDown</code>, however, this approach often involves reassigning <code>var</code>s
* between tests. Before going that route, you should consider some approaches that
* avoid <code>var</code>s. One approach is to write one or more <em>create-fixture</em> methods
* that return a new instance of a needed object (or a tuple or case class holding new instances of
* multiple objects) each time it is called. You can then call a create-fixture method at the beginning of each
* test that needs the fixture, storing the fixture object or objects in local variables. Here's an example:
* </p>
*
* <pre>
* import org.scalatest.FunSuite
* import scala.collection.mutable.ListBuffer
*
* class MySuite extends FunSuite {
*
* // create objects needed by tests and return as a tuple
* def createFixture = (
* new StringBuilder("ScalaTest is "),
* new ListBuffer[String]
* )
*
* test("easy") {
* val (builder, lbuf) = createFixture
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(lbuf.isEmpty)
* lbuf += "sweet"
* }
*
* test("fun") {
* val (builder, lbuf) = createFixture
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(lbuf.isEmpty)
* }
* }
* </pre>
*
* <p>
* If different tests in the same <code>FunSuite</code> require different fixtures, you can create multiple create-fixture methods and
* call the method (or methods) needed by each test at the begining of the test. If every test requires the same set of
* mutable fixture objects, one other approach you can take is make them simply <code>val</code>s and mix in trait
* <a href="OneInstancePerTest.html"><code>OneInstancePerTest</code></a>. If you mix in <code>OneInstancePerTest</code>, each test
* will be run in its own instance of the <code>FunSuite</code>, similar to the way JUnit tests are executed.
* </p>
*
* <p>
* Although the create-fixture and <code>OneInstancePerTest</code> approaches take care of setting up a fixture before each
* test, they don't address the problem of cleaning up a fixture after the test completes. In this situation,
* one option is to mix in the <a href="BeforeAndAfterEach.html"><code>BeforeAndAfterEach</code></a> trait.
* <code>BeforeAndAfterEach</code>'s <code>beforeEach</code> method will be run before, and its <code>afterEach</code>
* method after, each test (like JUnit's <code>setUp</code> and <code>tearDown</code>
* methods, respectively).
* For example, you could create a temporary file before each test, and delete it afterwords, like this:
* </p>
*
* <pre>
* import org.scalatest.FunSuite
* import org.scalatest.BeforeAndAfterEach
* import java.io.FileReader
* import java.io.FileWriter
* import java.io.File
*
* class MySuite extends FunSuite with BeforeAndAfterEach {
*
* private val FileName = "TempFile.txt"
* private var reader: FileReader = _
*
* // Set up the temp file needed by the test
* override def beforeEach() {
* val writer = new FileWriter(FileName)
* try {
* writer.write("Hello, test!")
* }
* finally {
* writer.close()
* }
*
* // Create the reader needed by the test
* reader = new FileReader(FileName)
* }
*
* // Close and delete the temp file
* override def afterEach() {
* reader.close()
* val file = new File(FileName)
* file.delete()
* }
*
* test("reading from the temp file") {
* var builder = new StringBuilder
* var c = reader.read()
* while (c != -1) {
* builder.append(c.toChar)
* c = reader.read()
* }
* assert(builder.toString === "Hello, test!")
* }
*
* test("first char of the temp file") {
* assert(reader.read() === 'H')
* }
*
* test("without a fixture") {
* assert(1 + 1 === 2)
* }
* }
* </pre>
*
* <p>
* In this example, the instance variable <code>reader</code> is a <code>var</code>, so
* it can be reinitialized between tests by the <code>beforeEach</code> method.
* </p>
*
* <p>
* Although the <code>BeforeAndAfterEach</code> approach should be familiar to the users of most
* test other frameworks, ScalaTest provides another alternative that also allows you to perform cleanup
* after each test: overriding <code>withFixture(NoArgTest)</code>.
* To execute each test, <code>Suite</code>'s implementation of the <code>runTest</code> method wraps an invocation
* of the appropriate test method in a no-arg function. <code>runTest</code> passes that test function to the <code>withFixture(NoArgTest)</code>
* method, which is responsible for actually running the test by invoking the function. <code>Suite</code>'s
* implementation of <code>withFixture(NoArgTest)</code> simply invokes the function, like this:
* </p>
*
* <pre>
* // Default implementation
* protected def withFixture(test: NoArgTest) {
* test()
* }
* </pre>
*
* <p>
* The <code>withFixture(NoArgTest)</code> method exists so that you can override it and set a fixture up before, and clean it up after, each test.
* Thus, the previous temp file example could also be implemented without mixing in <code>BeforeAndAfterEach</code>, like this:
* </p>
*
* <pre>
* import org.scalatest.FunSuite
* import org.scalatest.BeforeAndAfterEach
* import java.io.FileReader
* import java.io.FileWriter
* import java.io.File
*
* class MySuite extends FunSuite {
*
* private var reader: FileReader = _
*
* override def withFixture(test: NoArgTest) {
*
* val FileName = "TempFile.txt"
*
* // Set up the temp file needed by the test
* val writer = new FileWriter(FileName)
* try {
* writer.write("Hello, test!")
* }
* finally {
* writer.close()
* }
*
* // Create the reader needed by the test
* reader = new FileReader(FileName)
*
* try {
* test() // Invoke the test function
* }
* finally {
* // Close and delete the temp file
* reader.close()
* val file = new File(FileName)
* file.delete()
* }
* }
*
* test("reading from the temp file") {
* var builder = new StringBuilder
* var c = reader.read()
* while (c != -1) {
* builder.append(c.toChar)
* c = reader.read()
* }
* assert(builder.toString === "Hello, test!")
* }
*
* test("first char of the temp file") {
* assert(reader.read() === 'H')
* }
*
* test("without a fixture") {
* assert(1 + 1 === 2)
* }
* }
* </pre>
*
* <p>
* If you prefer to keep your test classes immutable, one final variation is to use the
* <a href="fixture/FixtureFunSuite.html"><code>FixtureFunSuite</code></a> trait from the
* <code>org.scalatest.fixture</code> package. Tests in an <code>org.scalatest.fixture.FixtureFunSuite</code> can have a fixture
* object passed in as a parameter. You must indicate the type of the fixture object
* by defining the <code>Fixture</code> type member and define a <code>withFixture</code> method that takes a <em>one-arg</em> test function.
* (A <code>FixtureFunSuite</code> has two overloaded <code>withFixture</code> methods, therefore, one that takes a <code>OneArgTest</code>
* and the other, inherited from <code>Suite</code>, that takes a <code>NoArgTest</code>.)
* Inside the <code>withFixture(OneArgTest)</code> method, you create the fixture, pass it into the test function, then perform any
* necessary cleanup after the test function returns. Instead of invoking each test directly, a <code>FixtureFunSuite</code> will
* pass a function that invokes the code of a test to <code>withFixture(OneArgTest)</code>. Your <code>withFixture(OneArgTest)</code> method, therefore,
* is responsible for actually running the code of the test by invoking the test function.
* For example, you could pass the temp file reader fixture to each test that needs it
* by overriding the <code>withFixture(OneArgTest)</code> method of a <code>FixtureFunSuite</code>, like this:
* </p>
*
* <pre>
* import org.scalatest.fixture.FixtureFunSuite
* import java.io.FileReader
* import java.io.FileWriter
* import java.io.File
*
* class MySuite extends FixtureFunSuite {
*
* type FixtureParam = FileReader
*
* def withFixture(test: OneArgTest) {
*
* val FileName = "TempFile.txt"
*
* // Set up the temp file needed by the test
* val writer = new FileWriter(FileName)
* try {
* writer.write("Hello, test!")
* }
* finally {
* writer.close()
* }
*
* // Create the reader needed by the test
* val reader = new FileReader(FileName)
*
* try {
* // Run the test using the temp file
* test(reader)
* }
* finally {
* // Close and delete the temp file
* reader.close()
* val file = new File(FileName)
* file.delete()
* }
* }
*
* test("reading from the temp file") { reader =>
* var builder = new StringBuilder
* var c = reader.read()
* while (c != -1) {
* builder.append(c.toChar)
* c = reader.read()
* }
* assert(builder.toString === "Hello, test!")
* }
*
* test("first char of the temp file") { reader =>
* assert(reader.read() === 'H')
* }
*
* test("without a fixture") { () =>
* assert(1 + 1 === 2)
* }
* }
* </pre>
*
* <p>
* It is worth noting that the only difference in the test code between the mutable
* <code>BeforeAndAfterEach</code> approach shown here and the immutable <code>FixtureFunSuite</code>
* approach shown previously is that two of the <code>FixtureFunSuite</code>'s test functions take a <code>FileReader</code> as
* a parameter via the "<code>reader =></code>" at the beginning of the function. Otherwise the test code is identical.
* One benefit of the explicit parameter is that, as demonstrated
* by the "<code>without a fixture</code>" test, a <code>FixtureFunSuite</code>
* test need not take the fixture. So you can have some tests that take a fixture, and others that don't.
* In this case, the <code>FixtureFunSuite</code> provides documentation indicating which
* tests use the fixture and which don't, whereas the <code>BeforeAndAfterEach</code> approach does not.
* (If you have want to combine tests that take different fixture types in the same <code>FunSuite</code>, you can
* use <a href="fixture/MultipleFixtureFunSuite.html">MultipleFixtureFunSuite</a>.)
* </p>
*
* <p>
* If you want to execute code before and after all tests (and nested suites) in a suite, such
* want to execute code before and after all tests (and nested suites) in a suite, such
* as you could do with <code>@BeforeClass</code> and <code>@AfterClass</code>
* annotations in JUnit 4, you can use the <code>beforeAll</code> and <code>afterAll</code>
* methods of <code>BeforeAndAfterAll</code>. See the documentation for <code>BeforeAndAfterAll</code> for
* an example.
* </p>
*
* <p>
* <a name="SharedTests"><strong>Shared tests</strong></a>
* </p>
*
* <p>
* Sometimes you may want to run the same test code on different fixture objects. In other words, you may want to write tests that are "shared"
* by different fixture objects.
* To accomplish this in a <code>FunSuite</code>, you first place shared tests in
* <em>behavior functions</em>. These behavior functions will be
* invoked during the construction phase of any <code>FunSuite</code> that uses them, so that the tests they contain will
* be registered as tests in that <code>FunSuite</code>.
* For example, given this stack class:
* </p>
*
* <pre>
* import scala.collection.mutable.ListBuffer
*
* class Stack[T] {
*
* val MAX = 10
* private var buf = new ListBuffer[T]
*
* def push(o: T) {
* if (!full)
* o +: buf
* else
* throw new IllegalStateException("can't push onto a full stack")
* }
*
* def pop(): T = {
* if (!empty)
* buf.remove(0)
* else
* throw new IllegalStateException("can't pop an empty stack")
* }
*
* def peek: T = {
* if (!empty)
* buf(0)
* else
* throw new IllegalStateException("can't pop an empty stack")
* }
*
* def full: Boolean = buf.size == MAX
* def empty: Boolean = buf.size == 0
* def size = buf.size
*
* override def toString = buf.mkString("Stack(", ", ", ")")
* }
* </pre>
*
* <p>
* You may want to test the <code>Stack</code> class in different states: empty, full, with one item, with one item less than capacity,
* <em>etc</em>. You may find you have several tests that make sense any time the stack is non-empty. Thus you'd ideally want to run
* those same tests for three stack fixture objects: a full stack, a stack with a one item, and a stack with one item less than
* capacity. With shared tests, you can factor these tests out into a behavior function, into which you pass the
* stack fixture to use when running the tests. So in your <code>FunSuite</code> for stack, you'd invoke the
* behavior function three times, passing in each of the three stack fixtures so that the shared tests are run for all three fixtures.
* </p>
*
* <p>
* You can define a behavior function that encapsulates these shared tests inside the <code>FunSuite</code> that uses them. If they are shared
* between different <code>FunSuite</code>s, however, you could also define them in a separate trait that is mixed into
* each <code>FunSuite</code> that uses them.
* <a name="StackBehaviors">For</a> example, here the <code>nonEmptyStack</code> behavior function (in this case, a
* behavior <em>method</em>) is defined in a trait along with another
* method containing shared tests for non-full stacks:
* </p>
*
* <pre>
* import org.scalatest.FunSuite
*
* trait FunSuiteStackBehaviors { this: FunSuite =>
*
* def nonEmptyStack(createNonEmptyStack: => Stack[Int], lastItemAdded: Int) {
*
* test("empty is invoked on this non-empty stack: " + createNonEmptyStack.toString) {
* val stack = createNonEmptyStack
* assert(!stack.empty)
* }
*
* test("peek is invoked on this non-empty stack: " + createNonEmptyStack.toString) {
* val stack = createNonEmptyStack
* val size = stack.size
* assert(stack.peek === lastItemAdded)
* assert(stack.size === size)
* }
*
* test("pop is invoked on this non-empty stack: " + createNonEmptyStack.toString) {
* val stack = createNonEmptyStack
* val size = stack.size
* assert(stack.pop === lastItemAdded)
* assert(stack.size === size - 1)
* }
* }
*
* def nonFullStack(createNonFullStack: => Stack[Int]) {
*
* test("full is invoked on this non-full stack: " + createNonFullStack.toString) {
* val stack = createNonFullStack
* assert(!stack.full)
* }
*
* test("push is invoked on this non-full stack: " + createNonFullStack.toString) {
* val stack = createNonFullStack
* val size = stack.size
* stack.push(7)
* assert(stack.size === size + 1)
* assert(stack.peek === 7)
* }
* }
* }
* </pre>
*
* <p>
* Given these behavior functions, you could invoke them directly, but <code>FunSuite</code> offers a DSL for the purpose,
* which looks like this:
* </p>
*
* <pre>
* testsFor(nonEmptyStack(stackWithOneItem, lastValuePushed))
* testsFor(nonFullStack(stackWithOneItem))
* </pre>
*
* <p>
* If you prefer to use an imperative style to change fixtures, for example by mixing in <code>BeforeAndAfterEach</code> and
* reassigning a <code>stack</code> <code>var</code> in <code>beforeEach</code>, you could write your behavior functions
* in the context of that <code>var</code>, which means you wouldn't need to pass in the stack fixture because it would be
* in scope already inside the behavior function. In that case, your code would look like this:
* </p>
*
* <pre>
* testsFor(nonEmptyStack) // assuming lastValuePushed is also in scope inside nonEmptyStack
* testsFor(nonFullStack)
* </pre>
*
* <p>
* The recommended style, however, is the functional, pass-all-the-needed-values-in style. Here's an example:
* </p>
*
* <pre>
* import org.scalatest.FunSuite
*
* class StackFunSuite extends FunSuite with FunSuiteStackBehaviors {
*
* // Stack fixture creation methods
* def emptyStack = new Stack[Int]
*
* def fullStack = {
* val stack = new Stack[Int]
* for (i <- 0 until stack.MAX)
* stack.push(i)
* stack
* }
*
* def stackWithOneItem = {
* val stack = new Stack[Int]
* stack.push(9)
* stack
* }
*
* def stackWithOneItemLessThanCapacity = {
* val stack = new Stack[Int]
* for (i <- 1 to 9)
* stack.push(i)
* stack
* }
*
* val lastValuePushed = 9
*
* test("empty is invoked on an empty stack") {
* val stack = emptyStack
* assert(stack.empty)
* }
*
* test("peek is invoked on an empty stack") {
* val stack = emptyStack
* intercept[IllegalStateException] {
* stack.peek
* }
* }
*
* test("pop is invoked on an empty stack") {
* val stack = emptyStack
* intercept[IllegalStateException] {
* emptyStack.pop
* }
* }
*
* testsFor(nonEmptyStack(stackWithOneItem, lastValuePushed))
* testsFor(nonFullStack(stackWithOneItem))
*
* testsFor(nonEmptyStack(stackWithOneItemLessThanCapacity, lastValuePushed))
* testsFor(nonFullStack(stackWithOneItemLessThanCapacity))
*
* test("full is invoked on a full stack") {
* val stack = fullStack
* assert(stack.full)
* }
*
* testsFor(nonEmptyStack(fullStack, lastValuePushed))
*
* test("push is invoked on a full stack") {
* val stack = fullStack
* intercept[IllegalStateException] {
* stack.push(10)
* }
* }
* }
* </pre>
*
* <p>
* If you load these classes into the Scala interpreter (with scalatest's JAR file on the class path), and execute it,
* you'll see:
* </p>
*
* <pre>
* scala> (new StackFunSuite).execute()
* Test Starting - StackFunSuite: empty is invoked on an empty stack
* Test Succeeded - StackFunSuite: empty is invoked on an empty stack
* Test Starting - StackFunSuite: peek is invoked on an empty stack
* Test Succeeded - StackFunSuite: peek is invoked on an empty stack
* Test Starting - StackFunSuite: pop is invoked on an empty stack
* Test Succeeded - StackFunSuite: pop is invoked on an empty stack
* Test Starting - StackFunSuite: empty is invoked on this non-empty stack: Stack(9)
* Test Succeeded - StackFunSuite: empty is invoked on this non-empty stack: Stack(9)
* Test Starting - StackFunSuite: peek is invoked on this non-empty stack: Stack(9)
* Test Succeeded - StackFunSuite: peek is invoked on this non-empty stack: Stack(9)
* Test Starting - StackFunSuite: pop is invoked on this non-empty stack: Stack(9)
* Test Succeeded - StackFunSuite: pop is invoked on this non-empty stack: Stack(9)
* Test Starting - StackFunSuite: full is invoked on this non-full stack: Stack(9)
* Test Succeeded - StackFunSuite: full is invoked on this non-full stack: Stack(9)
* Test Starting - StackFunSuite: push is invoked on this non-full stack: Stack(9)
* Test Succeeded - StackFunSuite: push is invoked on this non-full stack: Stack(9)
* Test Starting - StackFunSuite: empty is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)
* Test Succeeded - StackFunSuite: empty is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)
* Test Starting - StackFunSuite: peek is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)
* Test Succeeded - StackFunSuite: peek is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)
* Test Starting - StackFunSuite: pop is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)
* Test Succeeded - StackFunSuite: pop is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)
* Test Starting - StackFunSuite: full is invoked on this non-full stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)
* Test Succeeded - StackFunSuite: full is invoked on this non-full stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)
* Test Starting - StackFunSuite: push is invoked on this non-full stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)
* Test Succeeded - StackFunSuite: push is invoked on this non-full stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)
* Test Starting - StackFunSuite: full is invoked on a full stack
* Test Succeeded - StackFunSuite: full is invoked on a full stack
* Test Starting - StackFunSuite: empty is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
* Test Succeeded - StackFunSuite: empty is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
* Test Starting - StackFunSuite: peek is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
* Test Succeeded - StackFunSuite: peek is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
* Test Starting - StackFunSuite: pop is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
* Test Succeeded - StackFunSuite: pop is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
* Test Starting - StackFunSuite: push is invoked on a full stack
* Test Succeeded - StackFunSuite: push is invoked on a full stack
* </pre>
*
* <p>
* One thing to keep in mind when using shared tests is that in ScalaTest, each test in a suite must have a unique name.
* If you register the same tests repeatedly in the same suite, one problem you may encounter is an exception at runtime
* complaining that multiple tests are being registered with the same test name.
* In a <code>FunSuite</code> there is no nesting construct analogous to <code>Spec</code>'s <code>describe</code> clause.
* Therefore, you need to do a bit of
* extra work to ensure that the test names are unique. If a duplicate test name problem shows up in a
* <code>FunSuite</code>, you'll need to pass in a prefix or suffix string to add to each test name. You can pass this string
* the same way you pass any other data needed by the shared tests, or just call <code>toString</code> on the shared fixture object.
* This is the approach taken by the previous <code>FunSuiteStackBehaviors</code> example.
* </p>
*
* <p>
* Given this <code>FunSuiteStackBehaviors</code> trait, calling it with the <code>stackWithOneItem</code> fixture, like this:
* </p>
*
* <pre>
* testsFor(nonEmptyStack(stackWithOneItem, lastValuePushed))
* </pre>
*
* <p>
* yields test names:
* </p>
*
* <ul>
* <li><code>empty is invoked on this non-empty stack: Stack(9)</code></li>
* <li><code>peek is invoked on this non-empty stack: Stack(9)</code></li>
* <li><code>pop is invoked on this non-empty stack: Stack(9)</code></li>
* </ul>
*
* <p>
* Whereas calling it with the <code>stackWithOneItemLessThanCapacity</code> fixture, like this:
* </p>
*
* <pre>
* testsFor(nonEmptyStack(stackWithOneItemLessThanCapacity, lastValuePushed))
* </pre>
*
* <p>
* yields different test names:
* </p>
*
* <ul>
* <li><code>empty is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)</code></li>
* <li><code>peek is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)</code></li>
* <li><code>pop is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)</code></li>
* </ul>
*
* <p>
* <strong>Tagging tests</strong>
* </p>
*
* <p>
* A <code>FunSuite</code>'s tests may be classified into groups by <em>tagging</em> them with string names.
* As with any suite, when executing a <code>FunSuite</code>, groups of tests can
* optionally be included and/or excluded. To tag a <code>FunSuite</code>'s tests,
* you pass objects that extend abstract class <code>org.scalatest.Tag</code> to methods
* that register tests, <code>test</code> and <code>ignore</code>. Class <code>Tag</code> takes one parameter, a string name. If you have
* created Java annotation interfaces for use as group names in direct subclasses of <code>org.scalatest.Suite</code>,
* then you will probably want to use group names on your <code>FunSuite</code>s that match. To do so, simply
* pass the fully qualified names of the Java interfaces to the <code>Tag</code> constructor. For example, if you've
* defined Java annotation interfaces with fully qualified names, <code>com.mycompany.groups.SlowTest</code> and
* <code>com.mycompany.groups.DbTest</code>, then you could
* create matching groups for <code>FunSuite</code>s like this:
* </p>
*
* <pre>
* import org.scalatest.Tag
*
* object SlowTest extends Tag("com.mycompany.groups.SlowTest")
* object DbTest extends Tag("com.mycompany.groups.DbTest")
* </pre>
*
* <p>
* Given these definitions, you could place <code>FunSuite</code> tests into groups like this:
* </p>
*
* <pre>
* import org.scalatest.FunSuite
*
* class MySuite extends FunSuite {
*
* test("addition", SlowTest) {
* val sum = 1 + 1
* assert(sum === 2)
* assert(sum + 2 === 4)
* }
*
* test("subtraction", SlowTest, DbTest) {
* val diff = 4 - 1
* assert(diff === 3)
* assert(diff - 2 === 1)
* }
* }
* </pre>
*
* <p>
* This code marks both tests, "addition" and "subtraction," with the <code>com.mycompany.groups.SlowTest</code> tag,
* and test "subtraction" with the <code>com.mycompany.groups.DbTest</code> tag.
* </p>
*
* <p>
* The primary <code>run</code> method takes a <code>Filter</code>, whose constructor takes an optional
* <code>Set[String]</code>s called <code>tagsToInclude</code> and a <code>Set[String]</code> called
* <code>tagsToExclude</code>. If <code>tagsToInclude</code> is <code>None</code>, all tests will be run
* except those those belonging to tags listed in the
* <code>tagsToExclude</code> <code>Set</code>. If <code>tagsToInclude</code> is defined, only tests
* belonging to tags mentioned in the <code>tagsToInclude</code> set, and not mentioned in <code>tagsToExclude</code>,
* will be run.
* </p>
*
* <p>
* <strong>Ignored tests</strong>
* </p>
*
* <p>
* To support the common use case of “temporarily” disabling a test, with the
* good intention of resurrecting the test at a later time, <code>FunSuite</code> provides registration
* methods that start with <code>ignore</code> instead of <code>test</code>. For example, to temporarily
* disable the test named <code>addition</code>, just change “<code>test</code>” into “<code>ignore</code>,” like this:
* </p>
*
* <pre>
* import org.scalatest.FunSuite
*
* class MySuite extends FunSuite {
*
* ignore("addition") {
* val sum = 1 + 1
* assert(sum === 2)
* assert(sum + 2 === 4)
* }
*
* test("subtraction") {
* val diff = 4 - 1
* assert(diff === 3)
* assert(diff - 2 === 1)
* }
* }
* </pre>
*
* <p>
* If you run this version of <code>MySuite</code> with:
* </p>
*
* <pre>
* scala> (new MySuite).execute()
* </pre>
*
* <p>
* It will run only <code>subtraction</code> and report that <code>addition</code> was ignored:
* </p>
*
* <pre>
* Test Ignored - MySuite: addition
* Test Starting - MySuite: subtraction
* Test Succeeded - MySuite: subtraction
* </pre>
*
* <p>
* <strong>Pending tests</strong>
* </p>
*
* <p>
* A <em>pending test</em> is one that has been given a name but is not yet implemented. The purpose of
* pending tests is to facilitate a style of testing in which documentation of behavior is sketched
* out before tests are written to verify that behavior (and often, the before the behavior of
* the system being tested is itself implemented). Such sketches form a kind of specification of
* what tests and functionality to implement later.
* </p>
*
* <p>
* To support this style of testing, a test can be given a name that specifies one
* bit of behavior required by the system being tested. The test can also include some code that
* sends more information about the behavior to the reporter when the tests run. At the end of the test,
* it can call method <code>pending</code>, which will cause it to complete abruptly with <code>TestPendingException</code>.
* Because tests in ScalaTest can be designated as pending with <code>TestPendingException</code>, both the test name and any information
* sent to the reporter when running the test can appear in the report of a test run. (In other words,
* the code of a pending test is executed just like any other test.) However, because the test completes abruptly
* with <code>TestPendingException</code>, the test will be reported as pending, to indicate
* the actual test, and possibly the functionality, has not yet been implemented.
* </p>
*
* <p>
* Although pending tests may be used more often in specification-style suites, such as
* <code>org.scalatest.Spec</code>, you can also use it in <code>FunSuite</code>, like this:
* </p>
*
* <pre>
* import org.scalatest.FunSuite
*
* class MySuite extends FunSuite {
*
* def test("addition") {
* val sum = 1 + 1
* assert(sum === 2)
* assert(sum + 2 === 4)
* }
*
* def test("subtraction") (pending)
* }
* </pre>
*
* <p>
* (Note: "<code>(pending)</code>" is the body of the test. Thus the test contains just one statement, an invocation
* of the <code>pending</code> method, which throws <code>TestPendingException</code>.)
* If you run this version of <code>MySuite</code> with:
* </p>
*
* <pre>
* scala> (new MySuite).execute()
* </pre>
*
* <p>
* It will run both tests, but report that <code>subtraction</code> is pending. You'll see:
* </p>
*
* <pre>
* Test Starting - MySuite: addition
* Test Succeeded - MySuite: addition
* Test Starting - MySuite: subtraction
* Test Pending - MySuite: subtraction
* </pre>
*
* <p>
* <strong>Informers</strong>
* </p>
*
* <p>
* One of the parameters to the primary <code>run</code> method is a <code>Reporter</code>, which
* will collect and report information about the running suite of tests.
* Information about suites and tests that were run, whether tests succeeded or failed,
* and tests that were ignored will be passed to the <code>Reporter</code> as the suite runs.
* Most often the reporting done by default by <code>FunSuite</code>'s methods will be sufficient, but
* occasionally you may wish to provide custom information to the <code>Reporter</code> from a test.
* For this purpose, an <code>Informer</code> that will forward information to the current <code>Reporter</code>
* is provided via the <code>info</code> parameterless method.
* You can pass the extra information to the <code>Informer</code> via one of its <code>apply</code> methods.
* The <code>Informer</code> will then pass the information to the <code>Reporter</code> via an <code>InfoProvided</code> event.
* Here's an example:
* </p>
*
* <pre>
* import org.scalatest.FunSuite
*
* class MySuite extends FunSuite {
*
* test("addition") {
* val sum = 1 + 1
* assert(sum === 2)
* assert(sum + 2 === 4)
* info("Addition seems to work")
* }
* }
* </pre>
*
* If you run this <code>Suite</code> from the interpreter, you will see the following message
* included in the printed report:
*
* <pre>
* Test Starting - MySuite: addition
* Info Provided - MySuite.addition: Addition seems to work
* Test Succeeded - MySuite: addition
* </pre>
*
* @author Bill Venners
*/
trait FunSuite extends Suite { thisSuite =>
private val IgnoreTagName = "org.scalatest.Ignore"
private abstract class FunNode
private case class TestNode(testName: String, fun: () => Unit) extends FunNode
private case class InfoNode(message: String) extends FunNode
// Access to the testNamesList, testsMap, and tagsMap must be synchronized, because the test methods are invoked by
// the primary constructor, but testNames, tags, and runTest get invoked directly or indirectly
// by run. When running tests concurrently with ScalaTest Runner, different threads can
// instantiate and run the suite. Instead of synchronizing, I put them in an immutable Bundle object (and
// all three collections--testNamesList, testsMap, and tagsMap--are immuable collections), then I put the Bundle
// in an AtomicReference. Since the expected use case is the test method will be called
// from the primary constructor, which will be all done by one thread, I just in effect use optimistic locking on the Bundle.
// If two threads ever called test at the same time, they could get a ConcurrentModificationException.
// Test names are in reverse order of test registration method invocations
private class Bundle private(
val testNamesList: List[String],
val doList: List[FunNode],
val testsMap: Map[String, TestNode],
val tagsMap: Map[String, Set[String]],
val registrationClosed: Boolean
) {
def unpack = (testNamesList, doList, testsMap, tagsMap, registrationClosed)
}
private object Bundle {
def apply(
testNamesList: List[String],
doList: List[FunNode],
testsMap: Map[String, TestNode],
tagsMap: Map[String, Set[String]],
registrationClosed: Boolean
): Bundle =
new Bundle(testNamesList, doList,testsMap, tagsMap, registrationClosed)
}
private val atomic = new AtomicReference[Bundle](Bundle(List(), List(), Map(), Map(), false))
private def updateAtomic(oldBundle: Bundle, newBundle: Bundle) {
val shouldBeOldBundle = atomic.getAndSet(newBundle)
if (!(shouldBeOldBundle eq oldBundle))
throw new ConcurrentModificationException(Resources("concurrentFunSuiteBundleMod"))
}
private class RegistrationInformer extends Informer {
def apply(message: String) {
if (message == null)
throw new NullPointerException
val oldBundle = atomic.get
var (testNamesList, doList, testsMap, tagsMap, registrationClosed) = oldBundle.unpack
doList ::= InfoNode(message)
updateAtomic(oldBundle, Bundle(testNamesList, doList, testsMap, tagsMap, registrationClosed))
}
}
// The informer will be a registration informer until run is called for the first time. (This
// is the registration phase of a FunSuite's lifecycle.)
private final val atomicInformer = new AtomicReference[Informer](new RegistrationInformer)
/**
* Returns an <code>Informer</code> that during test execution will forward strings (and other objects) passed to its
* <code>apply</code> method to the current reporter. If invoked in a constructor, it
* will register the passed string for forwarding later during test execution. If invoked while this
* <code>FunSuite</code> is being executed, such as from inside a test function, it will forward the information to
* the current reporter immediately. If invoked at any other time, it will
* throw an exception. This method can be called safely by any thread.
*/
implicit protected def info: Informer = atomicInformer.get
private val zombieInformer =
new Informer {
private val complaint = Resources("cantCallInfoNow", "FunSuite")
def apply(message: String) {
if (message == null)
throw new NullPointerException
throw new IllegalStateException(complaint)
}
}
/**
* Register a test with the specified name, optional tags, and function value that takes no arguments.
* This method will register the test for later execution via an invocation of one of the <code>run</code>
* methods. The passed test name must not have been registered previously on
* this <code>FunSuite</code> instance.
*
* @param testName the name of the test
* @param testTags the optional list of tags for this test
* @param testFun the test function
* @throws TestRegistrationClosedException if invoked after <code>run</code> has been invoked on this suite
* @throws DuplicateTestNameException if a test with the same name has been registered previously
* @throws NotAllowedException if <code>testName</code> had been registered previously
* @throws NullPointerException if <code>testName</code> or any passed test tag is <code>null</code>
*/
protected def test(testName: String, testTags: Tag*)(f: => Unit) {
if (testName == null)
throw new NullPointerException("testName was null")
if (testTags.exists(_ == null))
throw new NullPointerException("a test tag was null")
if (atomic.get.registrationClosed)
throw new TestRegistrationClosedException(Resources("testCannotAppearInsideAnotherTest"), getStackDepth("FunSuite.scala", "test"))
if (atomic.get.testsMap.keySet.contains(testName))
throw new DuplicateTestNameException(Resources("duplicateTestName", testName), getStackDepth("FunSuite.scala", "test"))
val oldBundle = atomic.get
var (testNamesList, doList, testsMap, tagsMap, registrationClosed) = oldBundle.unpack
val testNode = TestNode(testName, f _)
testsMap += (testName -> testNode)
testNamesList ::= testName
doList ::= testNode
val tagNames = Set[String]() ++ testTags.map(_.name)
if (!tagNames.isEmpty)
tagsMap += (testName -> tagNames)
updateAtomic(oldBundle, Bundle(testNamesList, doList, testsMap, tagsMap, registrationClosed))
}
/**
* Register a test to ignore, which has the specified name, optional tags, and function value that takes no arguments.
* This method will register the test for later ignoring via an invocation of one of the <code>run</code>
* methods. This method exists to make it easy to ignore an existing test by changing the call to <code>test</code>
* to <code>ignore</code> without deleting or commenting out the actual test code. The test will not be run, but a
* report will be sent that indicates the test was ignored. The passed test name must not have been registered previously on
* this <code>FunSuite</code> instance.
*
* @param testName the name of the test
* @param testTags the optional list of tags for this test
* @param testFun the test function
* @throws TestRegistrationClosedException if invoked after <code>run</code> has been invoked on this suite
* @throws DuplicateTestNameException if a test with the same name has been registered previously
* @throws NotAllowedException if <code>testName</code> had been registered previously
*/
protected def ignore(testName: String, testTags: Tag*)(f: => Unit) {
if (testName == null)
throw new NullPointerException("testName was null")
if (testTags.exists(_ == null))
throw new NullPointerException("a test tag was null")
if (atomic.get.registrationClosed)
throw new TestRegistrationClosedException(Resources("ignoreCannotAppearInsideATest"), getStackDepth("FunSuite.scala", "ignore"))
test(testName)(f) // Call test without passing the tags
val oldBundle = atomic.get
var (testNamesList, doList, testsMap, tagsMap, registrationClosed) = oldBundle.unpack
val tagNames = Set[String]() ++ testTags.map(_.name)
tagsMap += (testName -> (tagNames + IgnoreTagName))
updateAtomic(oldBundle, Bundle(testNamesList, doList, testsMap, tagsMap, registrationClosed))
}
/**
* An immutable <code>Set</code> of test names. If this <code>FunSuite</code> contains no tests, this method returns an empty <code>Set</code>.
*
* <p>
* This trait's implementation of this method will return a set that contains the names of all registered tests. The set's iterator will
* return those names in the order in which the tests were registered.
* </p>
*/
override def testNames: Set[String] = {
// I'm returning a ListSet here so that they tests will be run in registration order
ListSet(atomic.get.testNamesList.toArray: _*)
}
// runTest should throw IAE if a test name is passed that doesn't exist. Looks like right now it just reports a test failure.
/**
* Run a test. This trait's implementation runs the test registered with the name specified by <code>testName</code>.
*
* @param testName the name of one test to run.
* @param reporter the <code>Reporter</code> to which results will be reported
* @param stopper the <code>Stopper</code> that will be consulted to determine whether to stop execution early.
* @param configMap a <code>Map</code> of properties that can be used by the executing <code>Suite</code> of tests.
* @throws NullPointerException if any of <code>testName</code>, <code>reporter</code>, <code>stopper</code>, or <code>configMap</code>
* is <code>null</code>.
*/
protected override def runTest(testName: String, reporter: Reporter, stopper: Stopper, configMap: Map[String, Any], tracker: Tracker) {
if (testName == null || reporter == null || stopper == null || configMap == null)
throw new NullPointerException
val stopRequested = stopper
val report = wrapReporterIfNecessary(reporter)
// Create a Rerunner if the FunSuite has a no-arg constructor
val hasPublicNoArgConstructor = Suite.checkForPublicNoArgConstructor(getClass)
val rerunnable =
if (hasPublicNoArgConstructor)
Some(new TestRerunner(getClass.getName, testName))
else
None
val testStartTime = System.currentTimeMillis
report(TestStarting(tracker.nextOrdinal(), thisSuite.suiteName, Some(thisSuite.getClass.getName), testName, None, rerunnable))
try {
val theTest = atomic.get.testsMap(testName)
val informerForThisTest =
new ConcurrentInformer(NameInfo(thisSuite.suiteName, Some(thisSuite.getClass.getName), Some(testName))) {
def apply(message: String) {
if (message == null)
throw new NullPointerException
report(InfoProvided(tracker.nextOrdinal(), message, nameInfoForCurrentThread))
}
}
val oldInformer = atomicInformer.getAndSet(informerForThisTest)
var swapAndCompareSucceeded = false
try {
val theConfigMap = configMap
withFixture(
new NoArgTest {
def name = testName
def apply() { theTest.fun() }
def configMap = theConfigMap
}
)
}
finally {
val shouldBeInformerForThisTest = atomicInformer.getAndSet(oldInformer)
swapAndCompareSucceeded = shouldBeInformerForThisTest eq informerForThisTest
}
if (!swapAndCompareSucceeded) // Do outside finally to workaround Scala compiler bug
throw new ConcurrentModificationException(Resources("concurrentInformerMod", thisSuite.getClass.getName))
val duration = System.currentTimeMillis - testStartTime
report(TestSucceeded(tracker.nextOrdinal(), thisSuite.suiteName, Some(thisSuite.getClass.getName), testName, Some(duration), None, rerunnable))
}
catch {
case _: TestPendingException =>
report(TestPending(tracker.nextOrdinal(), thisSuite.suiteName, Some(thisSuite.getClass.getName), testName))
case e if !anErrorThatShouldCauseAnAbort(e) =>
val duration = System.currentTimeMillis - testStartTime
handleFailedTest(e, false, testName, rerunnable, report, tracker, duration)
case e => throw e
}
}
private def handleFailedTest(throwable: Throwable, hasPublicNoArgConstructor: Boolean, testName: String,
rerunnable: Option[Rerunner], reporter: Reporter, tracker: Tracker, duration: Long) {
val message =
if (throwable.getMessage != null) // [bv: this could be factored out into a helper method]
throwable.getMessage
else
throwable.toString
reporter(TestFailed(tracker.nextOrdinal(), message, thisSuite.suiteName, Some(thisSuite.getClass.getName), testName, Some(throwable), Some(duration), None, rerunnable))
}
/**
* A <code>Map</code> whose keys are <code>String</code> tag names to which tests in this <code>FunSuite</code> belong, and values
* the <code>Set</code> of test names that belong to each tag. If this <code>FunSuite</code> contains no tags, this method returns an empty <code>Map</code>.
*
* <p>
* This trait's implementation returns tags that were passed as strings contained in <code>Tag</code> objects passed to
* methods <code>test</code> and <code>ignore</code>.
* </p>
*/
override def tags: Map[String, Set[String]] = atomic.get.tagsMap
/**
* Run zero to many of this <code>Spec</code>'s tests.
*
* @param testName an optional name of one test to run. If <code>None</code>, all relevant tests should be run.
* I.e., <code>None</code> acts like a wildcard that means run all relevant tests in this <code>Suite</code>.
* @param reporter the <code>Reporter</code> to which results will be reported
* @param stopper the <code>Stopper</code> that will be consulted to determine whether to stop execution early.
* @param filter a <code>Filter</code> with which to filter tests based on their tags
* @param configMap a <code>Map</code> of key-value pairs that can be used by the executing <code>Suite</code> of tests.
* @param distributor an optional <code>Distributor</code>, into which to put nested <code>Suite</code>s to be run
* by another entity, such as concurrently by a pool of threads. If <code>None</code>, nested <code>Suite</code>s will be run sequentially.
* @param tracker a <code>Tracker</code> tracking <code>Ordinal</code>s being fired by the current thread.
* @throws NullPointerException if any of the passed parameters is <code>null</code>.
* @throws IllegalArgumentException if <code>testName</code> is defined, but no test with the specified test name
* exists in this <code>Suite</code>
*/
protected override def runTests(testName: Option[String], reporter: Reporter, stopper: Stopper, filter: Filter,
configMap: Map[String, Any], distributor: Option[Distributor], tracker: Tracker) {
if (testName == null)
throw new NullPointerException("testName was null")
if (reporter == null)
throw new NullPointerException("reporter was null")
if (stopper == null)
throw new NullPointerException("stopper was null")
if (filter == null)
throw new NullPointerException("filter was null")
if (configMap == null)
throw new NullPointerException("configMap was null")
if (distributor == null)
throw new NullPointerException("distributor was null")
if (tracker == null)
throw new NullPointerException("tracker was null")
val stopRequested = stopper
// Wrap any non-DispatchReporter, non-CatchReporter in a CatchReporter,
// so that exceptions are caught and transformed
// into error messages on the standard error stream.
val report = wrapReporterIfNecessary(reporter)
// If a testName is passed to run, just run that, else run the tests returned
// by testNames.
testName match {
case Some(tn) => runTest(tn, report, stopRequested, configMap, tracker)
case None =>
val doList = atomic.get.doList.reverse
for (node <- doList) {
node match {
case InfoNode(message) => info(message)
case TestNode(tn, _) =>
val (filterTest, ignoreTest) = filter(tn, tags)
if (!filterTest)
if (ignoreTest)
report(TestIgnored(tracker.nextOrdinal(), thisSuite.suiteName, Some(thisSuite.getClass.getName), tn))
else
runTest(tn, report, stopRequested, configMap, tracker)
}
}
}
}
@volatile private var wasRunBefore = false
override def run(testName: Option[String], reporter: Reporter, stopper: Stopper, filter: Filter,
configMap: Map[String, Any], distributor: Option[Distributor], tracker: Tracker) {
if (wasRunBefore)
println(thisSuite.getClass.getName + ", a FunSuite, is being run again")
else
wasRunBefore = true
val stopRequested = stopper
// Set the flag that indicates registration is closed (because run has now been invoked),
// which will disallow any further invocations of "test" or "ignore" with
// an RegistrationClosedException.
val oldBundle = atomic.get
val (testNamesList, doList, testsMap, tagsMap, registrationClosed) = oldBundle.unpack
if (!registrationClosed)
updateAtomic(oldBundle, Bundle(testNamesList, doList, testsMap, tagsMap, true))
val report = wrapReporterIfNecessary(reporter)
val informerForThisSuite =
new ConcurrentInformer(NameInfo(thisSuite.suiteName, Some(thisSuite.getClass.getName), None)) {
def apply(message: String) {
if (message == null)
throw new NullPointerException
report(InfoProvided(tracker.nextOrdinal(), message, nameInfoForCurrentThread))
}
}
atomicInformer.set(informerForThisSuite)
var swapAndCompareSucceeded = false
try {
super.run(testName, report, stopRequested, filter, configMap, distributor, tracker)
}
finally {
val shouldBeInformerForThisSuite = atomicInformer.getAndSet(zombieInformer)
swapAndCompareSucceeded = shouldBeInformerForThisSuite eq informerForThisSuite
}
if (!swapAndCompareSucceeded) // Do outside finally to workaround Scala compiler bug
throw new ConcurrentModificationException(Resources("concurrentInformerMod", thisSuite.getClass.getName))
}
/**
* Registers shared tests.
*
* <p>
* This method enables the following syntax for shared tests in a <code>FunSuite</code>:
* </p>
*
* <pre>
* testsFor(nonEmptyStack(lastValuePushed))
* </pre>
*
* <p>
* This method just provides syntax sugar intended to make the intent of the code clearer.
* Because the parameter passed to it is
* type <code>Unit</code>, the expression will be evaluated before being passed, which
* is sufficient to register the shared tests. For examples of shared tests, see the
* <a href="#SharedTests">Shared tests section</a> in the main documentation for this trait.
* </p>
*/
protected def testsFor(unit: Unit) {}
}
| kevinwright/scalatest | src/main/scala/org/scalatest/FunSuite.scala | Scala | apache-2.0 | 55,243 |
package com.rasterfoundry
import com.rasterfoundry.datamodel._
import _root_.io.circe._
import _root_.io.circe.generic.semiauto._
import _root_.io.circe.parser._
import _root_.io.circe.syntax._
import cats.syntax.either._
import geotrellis.proj4.CRS
import geotrellis.raster._
import geotrellis.raster.io.geotiff._
import geotrellis.raster.io.geotiff.compression._
import geotrellis.raster.io.geotiff.tags._
import geotrellis.raster.render.IndexedColorMap
import geotrellis.vector._
import geotrellis.vector.io.json.{CrsFormats, Implicits => GeoJsonImplicits}
import geotrellis.vector.{Extent, MultiPolygon}
import java.nio.ByteOrder
package object common extends GeoJsonImplicits {
implicit val extentEncoder: Encoder[Extent] =
new Encoder[Extent] {
def apply(extent: Extent): Json =
List(extent.xmin, extent.ymin, extent.xmax, extent.ymax).asJson
}
implicit val extentDecoder: Decoder[Extent] =
Decoder[Json] emap { js =>
js.as[List[Double]]
.map {
case List(xmin, ymin, xmax, ymax) =>
Extent(xmin, ymin, xmax, ymax)
}
.leftMap(_ => "Extent")
}
implicit val multipolygonEncoder: Encoder[MultiPolygon] =
new Encoder[MultiPolygon] {
def apply(mp: MultiPolygon): Json = {
parse(mp.toGeoJson) match {
case Right(js: Json) => js
case Left(e) => throw e
}
}
}
implicit val multipolygonDecoder: Decoder[MultiPolygon] = Decoder[Json] map {
_.spaces4.parseGeoJson[MultiPolygon]
}
// Decoders
implicit val byteOrderDecoder: Decoder[ByteOrder] =
Decoder.decodeString.emap {
case "BIG_ENDIAN" => Either.right(ByteOrder.BIG_ENDIAN)
case "LITTLE_ENDIAN" => Either.right(ByteOrder.LITTLE_ENDIAN)
case s => Either.left(s"Unknown Byte Order: $s")
}
implicit val byteOrderEncoder: Encoder[ByteOrder] =
Encoder.encodeString.contramap(_.toString)
implicit val metadataTagsDecoder: Decoder[MetadataTags] =
deriveDecoder[MetadataTags]
implicit val basicTagsDecoder: Decoder[BasicTags] = deriveDecoder[BasicTags]
implicit val nonBasicTagsDecoder: Decoder[NonBasicTags] =
deriveDecoder[NonBasicTags]
implicit val pixel3DTagsDecoder: Decoder[Pixel3D] = deriveDecoder[Pixel3D]
implicit val configKeysDecoder: Decoder[ConfigKeys] =
deriveDecoder[ConfigKeys]
implicit val geogCSParameterKeysDecoder: Decoder[GeogCSParameterKeys] =
deriveDecoder[GeogCSParameterKeys]
implicit val projectedFalsingsDecoder: Decoder[ProjectedFalsings] =
deriveDecoder[ProjectedFalsings]
implicit val projectedCSParameterKeysDecoder
: Decoder[ProjectedCSParameterKeys] =
deriveDecoder[ProjectedCSParameterKeys]
implicit val verticalCSKeysDecoder: Decoder[VerticalCSKeys] =
deriveDecoder[VerticalCSKeys]
implicit val nonStandardizedKeysDecoder: Decoder[NonStandardizedKeys] =
deriveDecoder[NonStandardizedKeys]
implicit val geoKeyDirectoryTagsDecoder: Decoder[GeoKeyDirectory] =
deriveDecoder[GeoKeyDirectory]
implicit val geotiffTagsDecoder: Decoder[GeoTiffTags] =
deriveDecoder[GeoTiffTags]
implicit val docTagsDecoder: Decoder[DocumentationTags] =
deriveDecoder[DocumentationTags]
implicit val tileTagsDecoder: Decoder[TileTags] = deriveDecoder[TileTags]
implicit val cmykTagsDecoder: Decoder[CmykTags] = deriveDecoder[CmykTags]
implicit val dataSampleFormatTagsDecoder: Decoder[DataSampleFormatTags] =
deriveDecoder[DataSampleFormatTags]
implicit val colimetryTagsDecoder: Decoder[ColimetryTags] =
deriveDecoder[ColimetryTags]
implicit val jpegTagsDecoder: Decoder[JpegTags] = deriveDecoder[JpegTags]
implicit val ycbcrTagsDecoder: Decoder[YCbCrTags] = deriveDecoder[YCbCrTags]
implicit val nonStandardizedTagsDecoder: Decoder[NonStandardizedTags] =
deriveDecoder[NonStandardizedTags]
implicit val tiffDecoder: Decoder[TiffType] = deriveDecoder[TiffType]
implicit val tiffTagsDecoder: Decoder[TiffTags] = deriveDecoder[TiffTags]
implicit val storageMethodDecoder: Decoder[StorageMethod] =
new Decoder[StorageMethod] {
final def apply(c: HCursor): Decoder.Result[StorageMethod] = {
for {
storageType <- c.downField("storageType").as[String]
} yield {
storageType match {
case "striped" => {
val rowsPerStrip =
c.downField("rowsPerStrip").as[Option[Int]] match {
case Left(_) => None
case Right(v) => v
}
new Striped(rowsPerStrip)
}
case _ => {
val cols = c.downField("cols").as[Int] match {
case Left(_) => 256
case Right(v) => v
}
val rows = c.downField("rows").as[Int] match {
case Left(_) => 256
case Right(v) => v
}
Tiled(cols, rows)
}
}
}
}
}
implicit val storageMethodEncoder: Encoder[StorageMethod] =
new Encoder[StorageMethod] {
final def apply(a: StorageMethod): Json = a match {
case _: Striped => Json.obj(("storageType", Json.fromString("striped")))
case Tiled(cols, rows) =>
Json.obj(
("storageType", Json.fromString("tiled")),
("cols", Json.fromInt(cols)),
("rows", Json.fromInt(rows))
)
}
}
implicit val compressionDecoder: Decoder[Compression] =
new Decoder[Compression] {
final def apply(c: HCursor): Decoder.Result[Compression] = {
for {
compressionType <- c.downField("compressionType").as[String]
} yield {
compressionType match {
case "NoCompression" => NoCompression
case _ => {
c.downField("level").as[Int] match {
case Left(_) => DeflateCompression()
case Right(i) => DeflateCompression(i)
}
}
}
}
}
}
implicit val compressionEncoder: Encoder[Compression] =
new Encoder[Compression] {
final def apply(a: Compression): Json = a match {
case NoCompression =>
Json.obj(("compressionType", Json.fromString("NoCompression")))
case d: DeflateCompression =>
Json.obj(
("compressionType", Json.fromString("Deflate")),
("level", Json.fromInt(d.level))
)
}
}
implicit val indexedColorMapDecoder: Decoder[IndexedColorMap] =
Decoder.decodeSeq[Int].emap { s =>
Either.right(new IndexedColorMap(s))
}
implicit val indexedColorMapEncoder: Encoder[IndexedColorMap] =
Encoder.encodeSeq[Int].contramapArray(_.colors.toSeq)
implicit val newSubFileTypeDecoder: Decoder[NewSubfileType] =
deriveDecoder[NewSubfileType]
implicit val geotiffOptionsDecoder: Decoder[GeoTiffOptions] =
deriveDecoder[GeoTiffOptions]
implicit val bandTypeDecoder: Decoder[BandType] =
new Decoder[BandType] {
final def apply(c: HCursor): Decoder.Result[BandType] = {
for {
bitsPerSample <- c.downField("bitsPerSample").as[Int]
sampleFormat <- c.downField("sampleFormat").as[Int]
} yield {
BandType(bitsPerSample, sampleFormat)
}
}
}
implicit val bandTypeEncoder: Encoder[BandType] = new Encoder[BandType] {
final def apply(a: BandType): Json = Json.obj(
("bitsPerSample", Json.fromInt(a.bitsPerSample)),
("sampleFormat", Json.fromInt(a.sampleFormat))
)
}
implicit val tileLayoutDecoder: Decoder[TileLayout] =
deriveDecoder[TileLayout]
implicit val geotiffSegmentLayoutDecoder: Decoder[GeoTiffSegmentLayout] =
deriveDecoder[GeoTiffSegmentLayout]
// override the crsformats default decoder to be able to decode from an EPSG
// name in addition to a proj4 string
implicit val decCrs: Decoder[CRS] = (Decoder.decodeString.emap(s => {
Either
.catchNonFatal(CRS.fromName(s))
.leftMap(_ => s"$s is not a valid CRS name")
})) or CrsFormats.crsDecoder
implicit val tagsDecoder: Decoder[Tags] = deriveDecoder[Tags]
implicit val bsgtDecoder: Decoder[BacksplashGeoTiffInfo] =
deriveDecoder[BacksplashGeoTiffInfo]
// Encoders
implicit val metadataTagsEncoder: Encoder[MetadataTags] =
deriveEncoder[MetadataTags]
implicit val basicTagsEncoder: Encoder[BasicTags] = deriveEncoder[BasicTags]
implicit val nonBasicTagsEncoder: Encoder[NonBasicTags] =
deriveEncoder[NonBasicTags]
implicit val pixel3DTagsEncoder: Encoder[Pixel3D] = deriveEncoder[Pixel3D]
implicit val configKeysEncoder: Encoder[ConfigKeys] =
deriveEncoder[ConfigKeys]
implicit val geogCSParameterKeysEncoder: Encoder[GeogCSParameterKeys] =
deriveEncoder[GeogCSParameterKeys]
implicit val projectedFalsingsEncoder: Encoder[ProjectedFalsings] =
deriveEncoder[ProjectedFalsings]
implicit val projectedCSParameterKeysEncoder
: Encoder[ProjectedCSParameterKeys] =
deriveEncoder[ProjectedCSParameterKeys]
implicit val verticalCSKeysEncoder: Encoder[VerticalCSKeys] =
deriveEncoder[VerticalCSKeys]
implicit val nonStandardizedKeysEncoder: Encoder[NonStandardizedKeys] =
deriveEncoder[NonStandardizedKeys]
implicit val geoKeyDirectoryTagsEncoder: Encoder[GeoKeyDirectory] =
deriveEncoder[GeoKeyDirectory]
implicit val geotiffTagsEncoder: Encoder[GeoTiffTags] =
deriveEncoder[GeoTiffTags]
implicit val docTagsEncoder: Encoder[DocumentationTags] =
deriveEncoder[DocumentationTags]
implicit val tileTagsEncoder: Encoder[TileTags] = deriveEncoder[TileTags]
implicit val cmykTagsEncoder: Encoder[CmykTags] = deriveEncoder[CmykTags]
implicit val dataSampleFormatTagsEncoder: Encoder[DataSampleFormatTags] =
deriveEncoder[DataSampleFormatTags]
implicit val colimetryTagsEncoder: Encoder[ColimetryTags] =
deriveEncoder[ColimetryTags]
implicit val jpegTagsEncoder: Encoder[JpegTags] = deriveEncoder[JpegTags]
implicit val ycbcrTagsEncoder: Encoder[YCbCrTags] = deriveEncoder[YCbCrTags]
implicit val nonStandardizedTagsEncoder: Encoder[NonStandardizedTags] =
deriveEncoder[NonStandardizedTags]
implicit val tiffEncoder: Encoder[TiffType] = deriveEncoder[TiffType]
implicit val tiffTagsEncoder: Encoder[TiffTags] = deriveEncoder[TiffTags]
implicit val interleaveMethodEncoder: Encoder[InterleaveMethod] =
Encoder.encodeString.contramap[InterleaveMethod](_.toString)
implicit val interleaveMethodDecoder: Decoder[InterleaveMethod] =
Decoder.decodeString.emap {
case "PixelInterleave" => Right(PixelInterleave)
case "BandInterleave" => Right(BandInterleave)
case str => Left(s"Invalid Interleave: $str")
}
implicit val newSubFileTypeEncoder: Encoder[NewSubfileType] =
deriveEncoder[NewSubfileType]
implicit val geotiffOptionsEncoder: Encoder[GeoTiffOptions] =
deriveEncoder[GeoTiffOptions]
implicit val tileLayoutEncoder: Encoder[TileLayout] =
deriveEncoder[TileLayout]
implicit val geotiffSegmentLayoutEncoder: Encoder[GeoTiffSegmentLayout] =
deriveEncoder[GeoTiffSegmentLayout]
implicit val tagsEncoder: Encoder[Tags] = deriveEncoder[Tags]
implicit val bsgtEncoder: Encoder[BacksplashGeoTiffInfo] =
deriveEncoder[BacksplashGeoTiffInfo]
}
| raster-foundry/raster-foundry | app-backend/common/src/main/scala/package.scala | Scala | apache-2.0 | 11,327 |
package code
package snippet
import scala.xml.{NodeSeq, Text}
import net.liftweb.util._
import net.liftweb.http._
import js._
import JsCmds._
import JE._
import SHtml._
import net.liftweb.common._
import java.util.Date
import code.lib._
import Helpers._
class HelloWorld {
lazy val date: Box[Date] = DependencyFactory.inject[Date] // inject the date
// replace the contents of the element with id "time" with the date
def howdy = "#time *" #> date.map(_.toString)
def shortAjax = {
"a [onclick]" #> onEvent({ s =>
Alert("SHORT")
})
}
def longAjax = {
"a [onclick]" #> onEvent({ s =>
Thread.sleep(9000)
Alert("LONG")
})
}
def comet = {
<lift:comet type="HelloWorldComet" />
}
/*
lazy val date: Date = DependencyFactory.time.vend // create the date via factory
def howdy = "#time *" #> date.toString
*/
}
| Shadowfiend/lift-tests | src/main/scala/code/snippet/HelloWorld.scala | Scala | mit | 893 |
package controllers
import play.api._
import play.api.mvc._
import play.api.libs.json._
import play.api.data._
import play.api.data.Forms._
object Application extends Controller {
case class ShortUrl(id: Int, name: String, longUrl: String)
implicit val urlWrites = Json.writes[ShortUrl]
val form = Form(
tuple(
"Name" -> text,
"Url" -> text
)
)
var urls = Array[ShortUrl]()
var idCounter = 1
val validUrl ="^(https?|ftp|file)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]".r;
def shortenedURL = Action {
val json = Json.toJson(urls)
Ok(json)
}
def index = Action {
Ok(views.html.main())
}
def create = Action { implicit request =>
val params = form.bindFromRequest.get
val url = params._2 match {
case validUrl(_*) => params._2
case _ => ""
}
var x:ShortUrl = new ShortUrl(idCounter, params._1, url)
urls = urls :+ x
idCounter = idCounter + 1
println(urls(urls.length - 1))
Ok(views.html.main())
}
}
| patrickmedaugh/shortnr | app/controllers/Application.scala | Scala | mit | 1,023 |
import scala.collection.mutable
case class FPNode(var tree: FPTree, var item: Option[String],
var parent: Option[FPNode], var count: Int = 1) {
val children: mutable.Map[String, FPNode] = mutable.Map.empty
var neighbour = None: Option[FPNode]
// Add this as a child to the parent
if (parent.isDefined && item.isDefined) {
parent.get.children += (item.get -> this)
}
// Return true if this node is root
def root: Boolean = parent.isEmpty
// Search for given item in one of the children
def search(item: String): Option[FPNode] = {
children.get(item)
}
// Return true if one of the children contains given item
def contains(item: String): Boolean = {
children.contains(item)
}
// Increment items frequency
def increment(): Unit = {
count += 1
}
// Return true if the node is a leaf
def leaf: Boolean = children.isEmpty
// Set parent of current node
def setParent(newParent: FPNode): Unit = {
if (newParent.tree != tree) {
throw new IllegalArgumentException("Only nodes in current tree can be added as parent.")
}
parent = Option(newParent)
}
// Set the neighbour (to the right) of the current node in FPTree
def setNeighbour(newNeighbour: FPNode): Unit = {
if (newNeighbour.tree != tree) {
throw new IllegalArgumentException("Only nodes in the current tree can be added as neighbour.")
}
neighbour = Option(newNeighbour)
}
def inspect(depth: Int = 0): Unit = {
println(" " * depth + repr())
children.foreach(_._2.inspect(depth+1))
}
def repr(): String = {
if (root) {
"<root>"
}
else {
item.get + "(" + count + ")"
}
}
}
| jayadeepk/FPGrowth | src/FPNode.scala | Scala | mit | 1,694 |
package net.caoticode.dirwatcher.actors
import akka.actor.Actor
import net.caoticode.dirwatcher.FSListener
/**
* @author Daniel Camarda ([email protected])
* */
class ListenerActor(listener: FSListener) extends Actor {
def receive = {
case Messages.Create(path) => listener.onCreate(path)
case Messages.Delete(path) => listener.onDelete(path)
case Messages.Modify(path) => listener.onModify(path)
case _ =>
}
} | mdread/dir-watcher | src/main/scala/net/caoticode/dirwatcher/actors/ListenerActor.scala | Scala | mit | 434 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.lewuathe.dllib
import com.lewuathe.dllib.example.XORApp
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest._
class XORSpec extends FlatSpec with Matchers {
"NN3" should "learn XOR behaviour" in {
val conf = new SparkConf()
val sc = new SparkContext(master = "local[*]",
appName = "XORSpec", conf = conf)
sc.setLogLevel("WARN")
val app = XORApp
app.numIterations = 2000
app.learningRate = 0.7
app.submit(sc)
}
}
| Lewuathe/dllib | src/test/scala/com/lewuathe/dllib/XORSpec.scala | Scala | apache-2.0 | 1,292 |
/*
* SPDX-License-Identifier: Apache-2.0
* Copyright 2016-2020 Daniel Urban and contributors listed in NOTICE.txt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dev.tauri.choam
import scala.annotation.StaticAnnotation
import scala.reflect.macros.whitebox.Context
final class KCASParams(private val desc: String, private val disable: Boolean = false) extends StaticAnnotation {
def macroTransform(annottees: Any*): Any =
macro JcStressMacros.kcasParamsImpl
}
object JcStressMacros {
def kcasParamsImpl(c: Context)(annottees: c.Expr[Any]*): c.Tree = {
import c.universe._
val impls = List(
"CASN" -> q"_root_.dev.tauri.choam.kcas.KCAS.CASN",
"MCAS" -> q"_root_.dev.tauri.choam.kcas.KCAS.MCAS",
"EMCAS" -> q"_root_.dev.tauri.choam.kcas.KCAS.EMCAS",
"NaiveKCAS" -> q"_root_.dev.tauri.choam.kcas.KCAS.NaiveKCAS"
)
val (baseDesc, disable) = c.prefix.tree match {
case q"new $_($d)" =>
(c.eval[String](c.Expr(d)), false)
case q"new $_($d, $disable)" =>
(c.eval[String](c.Expr(d)), c.eval[Boolean](c.Expr(disable)))
case _ => c.abort(c.enclosingPosition, "Invalid macro argument")
}
def isMarked(mods: Modifiers): Boolean = {
mods.annotations.find {
case q"new _root_.org.openjdk.jcstress.annotations.Actor()" =>
true
case q"new Actor()" => // FIXME
true
case q"new _root_.org.openjdk.jcstress.annotations.Arbiter()" =>
true
case q"new Arbiter()" => // FIXME
true
case _ =>
false
}.isDefined
}
def mkOverrides(baseBody: List[c.Tree]): List[c.Tree] = {
baseBody.collect {
case DefDef(mods, name, tparams, params, ret, _) if isMarked(mods) =>
val newMods = mods match {
case Modifiers(flags, nme, anns) =>
Modifiers(flags | Flag.OVERRIDE, nme, anns)
case _ =>
c.abort(c.enclosingPosition, "Invalid modifiers")
}
val ps = params.map(_.map {
case ValDef(_, nme, _, _) => nme
case _ => c.abort(c.enclosingPosition, "Invalid paramlist")
})
val newBody = q"""
super.${name}[..$tparams](...$ps)
"""
DefDef(newMods, name, tparams, params, ret, newBody)
}
}
def mkTestClass(base: TypeName, overrides: List[c.Tree], kcasImpl: c.Tree, kcasName: String): c.Tree = {
val prefix = base.toString().split('.').last
val desc: String = s"${baseDesc} (${kcasName})"
val clsName = TypeName(prefix + kcasName)
val defi = q"""
@_root_.org.openjdk.jcstress.annotations.State
@_root_.org.openjdk.jcstress.annotations.Description($desc)
class ${clsName} extends $base($kcasImpl) {
..$overrides
}
"""
if (disable) {
defi
} else {
defi match {
case ClassDef(mods, name, bs, tmpl) =>
ClassDef(
mods.mapAnnotations(_ :+ q"new _root_.org.openjdk.jcstress.annotations.JCStressTest()"),
name,
bs,
tmpl
)
case _ =>
c.abort(c.enclosingPosition, "Internal error")
}
}
}
def mkSubs(base: TypeName, baseBody: List[c.Tree]): List[c.Tree] = {
val overrides = mkOverrides(baseBody)
impls.map {
case (kcasName, kcasImpl) =>
mkTestClass(
base,
overrides,
kcasImpl,
kcasName
)
}
}
def transformBaseClass(cls: ClassDef): ClassDef = {
val paramName: TermName = cls match {
case q"$_ class $_[..$_] $_(...$paramss) extends { ..$_ } with ..$_ { $_ => ..$_ }" =>
paramss match {
case (ValDef(_, paramName, _, _) :: _) :: _ =>
paramName
case _ =>
c.abort(c.enclosingPosition, s"Expected at least one constructor parameter")
}
case _ =>
c.abort(c.enclosingPosition, s"Expected a class definition, got ${showRaw(cls)}")
}
val kcasImplDef: Tree = q"""
protected implicit final val kcasImpl: _root_.dev.tauri.choam.kcas.KCAS =
${paramName}
"""
cls match {
case ClassDef(mods, name, tparams, Template(parents, self, body)) =>
val newBody = body :+ kcasImplDef
ClassDef(mods, name, tparams, Template(parents, self, newBody))
case _ =>
c.abort(c.enclosingPosition, s"Expected a class definition, got ${showRaw(cls)}")
}
}
annottees.map(_.tree).toList match {
case List(cls @ ClassDef(_, name, _, Template(_, _, body))) =>
q"""
${transformBaseClass(cls)}
object ${name.toTermName} {
..${mkSubs(name, body)}
}
"""
case List(cls @ ClassDef(_, name, _, Template(_, _, body)), ModuleDef(mds, nme, Template(ps, slf, bdy))) =>
val newTemplate = Template(ps, slf, bdy ++ mkSubs(name, body))
val newMod = ModuleDef(mds, nme, newTemplate)
q"""
${transformBaseClass(cls)}
${newMod}
"""
case h :: _ =>
c.abort(c.enclosingPosition, s"Invalid annotation target: ${h} (${h.getClass.getName})")
case _ =>
c.abort(c.enclosingPosition, "Invalid annotation target")
}
}
}
| durban/exp-reagents | stress/src/main/scala/dev/tauri/choam/jcStressMacros.scala | Scala | apache-2.0 | 5,914 |
object basic {
enum Expr[A] {
case IntExpr(value: Int) extends Expr[Int]
case Other[T](value: T) extends Expr[T]
}
class C[A] {
def eval(e: Expr[A]): A =
e match {
case Expr.IntExpr(i) => i + 2
case Expr.Other(v) => v
}
}
}
| dotty-staging/dotty | tests/pos/class-gadt/basic.scala | Scala | apache-2.0 | 273 |
import edu.insight.unlp.nn.example.TwitterSentiMain
import org.apache.spark.rdd.RDD
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization._
import scala.util.parsing.json.JSON
/**
* Created by cnavarro on 24/11/15.
*/
object SparkSentiment {
def extractSentiment(lines: Iterator[String], resourcesFolder: String): Iterator[Map[String,Any]] = {
val glovePath: String = resourcesFolder + "embeddings/glove.twitter.27B.50d.txt"
val dataPath: String = resourcesFolder + "data/twitterSemEval2013.tsv"
val modelPath: String = resourcesFolder + "model/learntSentiTwitter.model"
val timestart = System.currentTimeMillis()
val sentimenter: TwitterSentiMain = new TwitterSentiMain
println("modelPath = " + modelPath)
println("glovePath = " + modelPath)
println("dataPath = " + dataPath)
sentimenter.loadModel(modelPath, glovePath, dataPath)
val timeend = System.currentTimeMillis()
println("Time to initialize model: " + (timeend - timestart))
// var result = new mutable.MutableList[Map[String, Any]]()
(lines).map { case line => Map("line" -> line, "sentiment" -> sentimenter.classify(line)) }
}
def extractSentimentFromMap(lines: Iterator[scala.collection.mutable.Map[String,Any]], resourcesFolder:String) : Iterator[scala.collection.mutable
.Map[String,Any]]= {
val sentimenter: TwitterSentiMain = new TwitterSentiMain
val glovePath: String = resourcesFolder + "embeddings/glove.twitter.27B.50d.txt"
val dataPath: String = resourcesFolder + "data/twitterSemEval2013.tsv"
val modelPath: String = resourcesFolder + "model/learntSentiTwitter.model"
println("******************glovePath: "+glovePath)
println("******************dataPath: "+dataPath)
println("******************modelPath: "+modelPath)
sentimenter.loadModel(modelPath, glovePath, dataPath)
for(line<-lines) yield {
val text = line.getOrElse("text","").asInstanceOf[String]
val lang = line.getOrElse("lang","").asInstanceOf[String]
if(text.length>0 && lang.equals("en")){
//line + (("sentiment", sentimenter.classify(text)))
val polarity = sentimenter.classify(text).toLowerCase
line += ("polarity"-> polarity, "sentiment"->getSentimentScore(polarity))
//Map("_source"->(line.getOrElse("_source", Map[String,String]()).asInstanceOf[Map[String,String]] + (("sentiment", sentimenter.classify(text)))))
}else{
line += ("polarity"-> "neutral", "sentiment"->0)
}
}
}
def getSentimentScore(polarity: String) : Double = {
if(polarity=="positive"){
1.0
}else if(polarity=="negative"){
-1.0
}else{
0
}
}
def extractSentimentFromRDD(input: RDD[String], resourcesFolder: String): RDD[String] = {
println("\\t\\tExtracting sentiment")
val temp = input.map(x=> JSON.parseFull(x).asInstanceOf[Some[Map[String,Any]]].getOrElse(Map[String,Any]())).map(x => collection.mutable.Map(x.toSeq: _*))
val temp2 = temp.mapPartitions(item =>extractSentimentFromMap(item, resourcesFolder))
temp2.mapPartitions(x => {
implicit val formats = Serialization.formats(NoTypeHints)
var a = List[String]()
while(x.hasNext) {
val r = x.next()
a=a:+(write(r))
}
a.toIterator
})
}
}
| canademar/me_extractors | BRMDemoReview/src/main/scala/SparkSentiment.scala | Scala | gpl-2.0 | 3,368 |
package pl.touk.nussknacker.engine.process.helpers
import cats.data.Validated.Valid
import cats.data.ValidatedNel
import io.circe.generic.JsonCodec
import org.apache.flink.api.common.ExecutionConfig
import org.apache.flink.api.common.eventtime.WatermarkStrategy
import org.apache.flink.api.common.functions.{FilterFunction, FlatMapFunction}
import org.apache.flink.streaming.api.datastream.DataStreamSink
import org.apache.flink.streaming.api.functions.co.RichCoFlatMapFunction
import org.apache.flink.streaming.api.functions.sink.SinkFunction
import org.apache.flink.streaming.api.operators.{AbstractStreamOperator, OneInputStreamOperator}
import org.apache.flink.streaming.api.scala.{DataStream, _}
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord
import org.apache.flink.util.Collector
import pl.touk.nussknacker.engine.api._
import pl.touk.nussknacker.engine.api.context.ProcessCompilationError.CustomNodeError
import pl.touk.nussknacker.engine.api.context._
import pl.touk.nussknacker.engine.api.context.transformation._
import pl.touk.nussknacker.engine.api.definition._
import pl.touk.nussknacker.engine.api.process._
import pl.touk.nussknacker.engine.api.runtimecontext.{ContextIdGenerator, EngineRuntimeContext}
import pl.touk.nussknacker.engine.api.test.InvocationCollectors.ServiceInvocationCollector
import pl.touk.nussknacker.engine.api.test.{EmptyLineSplittedTestDataParser, NewLineSplittedTestDataParser, TestDataParser}
import pl.touk.nussknacker.engine.api.typed.typing.{Typed, TypedObjectTypingResult, Unknown}
import pl.touk.nussknacker.engine.api.typed.{ReturningType, TypedMap, typing}
import pl.touk.nussknacker.engine.flink.api.compat.ExplicitUidInOperatorsSupport
import pl.touk.nussknacker.engine.flink.api.process._
import pl.touk.nussknacker.engine.flink.api.timestampwatermark.{StandardTimestampWatermarkHandler, TimestampWatermarkHandler}
import pl.touk.nussknacker.engine.flink.util.sink.EmptySink
import pl.touk.nussknacker.engine.flink.util.source.CollectionSource
import pl.touk.nussknacker.engine.api.NodeId
import pl.touk.nussknacker.engine.process.SimpleJavaEnum
import pl.touk.nussknacker.engine.util.service.{EnricherContextTransformation, TimeMeasuringService}
import pl.touk.nussknacker.engine.util.typing.TypingUtils
import pl.touk.nussknacker.test.WithDataList
import java.util.concurrent.atomic.AtomicInteger
import java.util.{Date, Optional, UUID}
import javax.annotation.Nullable
import scala.collection.JavaConverters._
import scala.concurrent.{ExecutionContext, Future}
//TODO: clean up sample objects...
object SampleNodes {
// Unfortunately we can't use scala Enumeration because of limited scala TypeInformation macro - see note in TypedDictInstance
case class SimpleRecord(id: String, value1: Long, value2: String, date: Date, value3Opt: Option[BigDecimal] = None,
value3: BigDecimal = 1, intAsAny: Any = 1, enumValue: SimpleJavaEnum = SimpleJavaEnum.ONE)
case class SimpleRecordWithPreviousValue(record: SimpleRecord, previous: Long, added: String)
case class SimpleRecordAcc(id: String, value1: Long, value2: Set[String], date: Date)
@JsonCodec case class SimpleJsonRecord(id: String, field: String)
class IntParamSourceFactory(exConfig: ExecutionConfig) extends SourceFactory {
@MethodToInvoke
def create(@ParamName("param") param: Int) = new CollectionSource[Int](config = exConfig,
list = List(param),
timestampAssigner = None, returnType = Typed[Int])
}
class JoinExprBranchFunction(valueByBranchId: Map[String, LazyParameter[AnyRef]],
val lazyParameterHelper: FlinkLazyParameterFunctionHelper)
extends RichCoFlatMapFunction[Context, Context, ValueWithContext[AnyRef]] with LazyParameterInterpreterFunction {
@transient lazy val end1Interpreter: Context => AnyRef =
lazyParameterInterpreter.syncInterpretationFunction(valueByBranchId("end1"))
@transient lazy val end2Interpreter: Context => AnyRef =
lazyParameterInterpreter.syncInterpretationFunction(valueByBranchId("end2"))
override def flatMap1(ctx: Context, out: Collector[ValueWithContext[AnyRef]]): Unit = collectHandlingErrors(ctx, out) {
ValueWithContext(end1Interpreter(ctx), ctx)
}
override def flatMap2(ctx: Context, out: Collector[ValueWithContext[AnyRef]]): Unit = collectHandlingErrors(ctx, out) {
ValueWithContext(end2Interpreter(ctx), ctx)
}
}
//data is static, to be able to track, Service is object, to initialize metrics properly...
class MockService extends Service with TimeMeasuringService {
val serviceName = "mockService"
@MethodToInvoke
def invoke(@ParamName("all") all: Any)(implicit ec: ExecutionContext): Future[Unit] = {
measuring(Future.successful {
MockService.add(all)
})
}
}
class EnricherWithOpenService extends Service with TimeMeasuringService {
val serviceName = "enricherWithOpenService"
var internalVar: String = _
override def open(runtimeContext: EngineRuntimeContext): Unit = {
super.open(runtimeContext)
internalVar = "initialized!"
}
@MethodToInvoke
def invoke()(implicit ec: ExecutionContext): Future[String] = {
measuring(Future.successful {
internalVar
})
}
}
trait WithLifecycle extends Lifecycle {
var opened: Boolean = false
var closed: Boolean = false
def reset(): Unit = {
opened = false
closed = false
}
override def open(engineRuntimeContext: EngineRuntimeContext): Unit = {
super.open(engineRuntimeContext)
opened = true
}
override def close(): Unit = {
super.close()
closed = true
}
}
object LifecycleService extends Service with WithLifecycle {
@MethodToInvoke
def invoke(): Future[Unit] = {
Future.successful(())
}
}
object EagerLifecycleService extends EagerService with WithLifecycle {
var list: List[(String, WithLifecycle)] = Nil
override def open(engineRuntimeContext: EngineRuntimeContext): Unit = {
super.open(engineRuntimeContext)
list.foreach(_._2.open(engineRuntimeContext))
}
override def close(): Unit = {
super.close()
list.foreach(_._2.close())
}
override def reset(): Unit = synchronized {
super.reset()
list = Nil
}
@MethodToInvoke
def invoke(@ParamName("name") name: String): ServiceInvoker = synchronized {
val newI = new ServiceInvoker with WithLifecycle {
override def invokeService(params: Map[String, Any])
(implicit ec: ExecutionContext,
collector: ServiceInvocationCollector,
contextId: ContextId,
componentUseCase: ComponentUseCase): Future[Any] = {
if (!opened) {
throw new IllegalArgumentException
}
Future.successful(())
}
}
list = (name -> newI)::list
newI
}
}
object CollectingEagerService extends EagerService {
@MethodToInvoke
def invoke(@ParamName("static") static: String, @ParamName("dynamic") dynamic: LazyParameter[String]): ServiceInvoker = new ServiceInvoker {
override def invokeService(params: Map[String, Any])(implicit ec: ExecutionContext,
collector: ServiceInvocationCollector,
contextId: ContextId,
componentUseCase: ComponentUseCase): Future[Any] = {
collector.collect(s"static-$static-dynamic-${params("dynamic")}", Option(())) {
Future.successful(())
}
}
}
}
object ServiceAcceptingScalaOption extends Service {
@MethodToInvoke
def invoke(@ParamName("scalaOptionParam") scalaOptionParam: Option[String]): Future[Option[String]] = Future.successful(scalaOptionParam)
}
object StateCustomNode extends CustomStreamTransformer with ExplicitUidInOperatorsSupport {
@MethodToInvoke(returnType = classOf[SimpleRecordWithPreviousValue])
def execute(@ParamName("stringVal") stringVal: String,
@ParamName("groupBy") groupBy: LazyParameter[String])
(implicit nodeId: NodeId, metaData: MetaData, componentUseCase: ComponentUseCase) = FlinkCustomStreamTransformation((start: DataStream[Context], context: FlinkCustomNodeContext) => {
setUidToNodeIdIfNeed(context,
start
.flatMap(context.lazyParameterHelper.lazyMapFunction(groupBy))
.keyBy(_.value)
.mapWithState[ValueWithContext[AnyRef], Long] {
case (SimpleFromValueWithContext(ctx, sr), Some(oldState)) =>
(ValueWithContext(
SimpleRecordWithPreviousValue(sr, oldState, stringVal), ctx), Some(sr.value1))
case (SimpleFromValueWithContext(ctx, sr), None) =>
(ValueWithContext(
SimpleRecordWithPreviousValue(sr, 0, stringVal), ctx), Some(sr.value1))
})
})
object SimpleFromValueWithContext {
def unapply(vwc: ValueWithContext[_]) = Some((vwc.context, vwc.context.apply[SimpleRecord]("input")))
}
}
object CustomFilter extends CustomStreamTransformer {
@MethodToInvoke(returnType = classOf[Void])
def execute(@ParamName("input") input: LazyParameter[String],
@ParamName("stringVal") stringVal: String) = FlinkCustomStreamTransformation((start: DataStream[Context], context: FlinkCustomNodeContext) => {
start
.filter(new AbstractOneParamLazyParameterFunction(input, context.lazyParameterHelper) with FilterFunction[Context] {
override def filter(value: Context): Boolean = evaluateParameter(value) == stringVal
})
.map(ValueWithContext[AnyRef](null, _))
})
}
object CustomFilterContextTransformation extends CustomStreamTransformer {
@MethodToInvoke(returnType = classOf[Void])
def execute(@ParamName("input") input: LazyParameter[String], @ParamName("stringVal") stringVal: String): ContextTransformation = {
ContextTransformation
.definedBy(Valid(_))
.implementedBy(
FlinkCustomStreamTransformation { (start: DataStream[Context], context: FlinkCustomNodeContext) =>
start
.filter(new AbstractOneParamLazyParameterFunction(input, context.lazyParameterHelper) with FilterFunction[Context] {
override def filter(value: Context): Boolean = evaluateParameter(value) == stringVal
})
.map(ValueWithContext[AnyRef](null, _))
})
}
}
object CustomContextClear extends CustomStreamTransformer {
@MethodToInvoke(returnType = classOf[Void])
def execute(@ParamName("value") value: LazyParameter[String]) = {
ContextTransformation
.definedBy((in: context.ValidationContext) => Valid(in.clearVariables))
.implementedBy(FlinkCustomStreamTransformation((start: DataStream[Context], context: FlinkCustomNodeContext) => {
start
.flatMap(context.lazyParameterHelper.lazyMapFunction(value))
.keyBy(_.value)
.map(_ => ValueWithContext[AnyRef](null, Context("new")))
}))
}
}
object CustomJoin extends CustomStreamTransformer {
@MethodToInvoke
def execute(@OutputVariableName outputVarName: String)(implicit nodeId: NodeId): JoinContextTransformation = {
ContextTransformation
.join
.definedBy((in: Map[String, context.ValidationContext]) => in.head._2.clearVariables.withVariable(outputVarName, Unknown, None))
.implementedBy(new FlinkCustomJoinTransformation {
override def transform(inputs: Map[String, DataStream[Context]], context: FlinkCustomNodeContext): DataStream[ValueWithContext[AnyRef]] = {
val inputFromIr = (ir: Context) => ValueWithContext(ir.variables("input").asInstanceOf[AnyRef], ir)
inputs("end1")
.connect(inputs("end2"))
.map(inputFromIr, inputFromIr)
}
})
}
}
object CustomJoinUsingBranchExpressions extends CustomStreamTransformer {
@MethodToInvoke
def execute(@BranchParamName("value") valueByBranchId: Map[String, LazyParameter[AnyRef]],
@OutputVariableName variableName: String): JoinContextTransformation =
ContextTransformation
.join.definedBy { contexts =>
val newType = Typed(contexts.keys.toList.map(branchId => valueByBranchId(branchId).returnType): _*)
val parent = contexts.values.flatMap(_.parent).headOption
Valid(ValidationContext(Map(variableName -> newType), Map.empty, parent))
}.implementedBy(
new FlinkCustomJoinTransformation {
override def transform(inputs: Map[String, DataStream[Context]],
flinkContext: FlinkCustomNodeContext): DataStream[ValueWithContext[AnyRef]] = {
inputs("end1")
.connect(inputs("end2"))
.flatMap(new JoinExprBranchFunction(valueByBranchId, flinkContext.lazyParameterHelper))
}
})
}
object ExtractAndTransformTimestamp extends CustomStreamTransformer {
@MethodToInvoke(returnType = classOf[Long])
def methodToInvoke(@ParamName("timestampToSet") timestampToSet: Long): FlinkCustomStreamTransformation
= FlinkCustomStreamTransformation(_.transform("collectTimestamp",
new AbstractStreamOperator[ValueWithContext[AnyRef]] with OneInputStreamOperator[Context, ValueWithContext[AnyRef]] {
override def processElement(element: StreamRecord[Context]): Unit = {
output.collect(new StreamRecord[ValueWithContext[AnyRef]](ValueWithContext(element.getTimestamp.underlying(), element.getValue), timestampToSet))
}
}))
}
object ReturningDependentTypeService extends EagerService {
@MethodToInvoke
def invoke(@ParamName("definition") definition: java.util.List[String],
@ParamName("toFill") toFill: LazyParameter[String],
@ParamName("count") count: Int,
@OutputVariableName outputVar: String)(implicit nodeId: NodeId): ContextTransformation = {
val listType = TypedObjectTypingResult(definition.asScala.map(_ -> Typed[String]).toList)
val returnType: typing.TypingResult = Typed.genericTypeClass[java.util.List[_]](List(listType))
EnricherContextTransformation(outputVar, returnType, new ServiceInvoker {
override def invokeService(params: Map[String, Any])
(implicit ec: ExecutionContext,
collector: ServiceInvocationCollector,
contextId: ContextId,
componentUseCase: ComponentUseCase): Future[Any] = {
val result = (1 to count)
.map(_ => definition.asScala.map(_ -> params("toFill").asInstanceOf[String]).toMap)
.map(TypedMap(_))
.toList.asJava
Future.successful(result)
}
})
}
}
object LogService extends Service {
val invocationsCount = new AtomicInteger(0)
def clear(): Unit = {
invocationsCount.set(0)
}
@MethodToInvoke
def invoke(@ParamName("all") all: Any)(implicit ec: ExecutionContext, collector: ServiceInvocationCollector): Future[Unit] = {
collector.collect(s"$all-collectedDuringServiceInvocation", Option(())) {
invocationsCount.incrementAndGet()
Future.successful(())
}
}
}
class ThrowingService(exception: Exception) extends Service {
@MethodToInvoke
def invoke(@ParamName("throw") throwing: Boolean): Future[Unit] = {
if (throwing) {
Future.failed(exception)
} else Future.successful(Unit)
}
}
object TransformerWithTime extends CustomStreamTransformer {
@MethodToInvoke
def execute(@OutputVariableName outputVarName: String, @ParamName("seconds") seconds: Int)(implicit nodeId: NodeId) = {
ContextTransformation
.definedBy((in: context.ValidationContext) => in.clearVariables.withVariable(outputVarName, Typed[Int], None))
.implementedBy(
FlinkCustomStreamTransformation((start: DataStream[Context], context: FlinkCustomNodeContext) => {
start
.map(_ => 1: java.lang.Integer)
.keyBy(_ => "")
.window(TumblingEventTimeWindows.of(Time.seconds(seconds)))
.reduce((k, v) => k + v: java.lang.Integer)
.map(i => ValueWithContext[AnyRef](i, Context(UUID.randomUUID().toString)))
}))
}
}
object TransformerWithNullableParam extends CustomStreamTransformer {
@MethodToInvoke(returnType = classOf[String])
def execute(@ParamName("param") @Nullable param: LazyParameter[String]) =
FlinkCustomStreamTransformation((start: DataStream[Context], context: FlinkCustomNodeContext) => {
start
.flatMap(context.lazyParameterHelper.lazyMapFunction[AnyRef](param))
})
}
object TransformerAddingComponentUsaCase extends CustomStreamTransformer {
@MethodToInvoke
def execute = {
FlinkCustomStreamTransformation((start: DataStream[Context], flinkCustomNodeContext: FlinkCustomNodeContext) => {
val componentUseCase = flinkCustomNodeContext.componentUseCase
start
.map(context => ValueWithContext[AnyRef](componentUseCase, context))
})
}
}
object OptionalEndingCustom extends CustomStreamTransformer {
override def canBeEnding: Boolean = true
@MethodToInvoke(returnType = classOf[String])
def execute(@ParamName("param") @Nullable param: LazyParameter[String]) =
FlinkCustomStreamTransformation((start: DataStream[Context], context: FlinkCustomNodeContext) => {
val afterMap = start
.flatMap(context.lazyParameterHelper.lazyMapFunction[AnyRef](param))
afterMap.addSink(element => MockService.add(element.value))
afterMap
})
}
object EagerOptionalParameterSinkFactory extends SinkFactory with WithDataList[String] {
@MethodToInvoke
def createSink(@ParamName("optionalStringParam") value: Optional[String]): Sink = new BasicFlinkSink {
//Optional is not serializable...
private val serializableValue = value.orElse(null)
override def valueFunction(helper: FlinkLazyParameterFunctionHelper): FlatMapFunction[Context, ValueWithContext[String]] =
(ctx, collector) => collector.collect(ValueWithContext(serializableValue, ctx))
override def toFlinkFunction: SinkFunction[String] = new SinkFunction[String] {
override def invoke(value: String, context: SinkFunction.Context): Unit = add(value)
}
override type Value = String
}
}
object MockService extends Service with WithDataList[Any]
case object MonitorEmptySink extends EmptySink {
val invocationsCount = new AtomicInteger(0)
def clear(): Unit = {
invocationsCount.set(0)
}
override def valueFunction(helper: FlinkLazyParameterFunctionHelper): FlatMapFunction[Context, ValueWithContext[AnyRef]] = (_, _) => {
invocationsCount.getAndIncrement()
}
}
case object SinkForInts extends SinkForType[java.lang.Integer]
case object SinkForStrings extends SinkForType[String]
case object SinkForLongs extends SinkForType[java.lang.Long]
case object SinkForAny extends SinkForType[AnyRef]
object EmptyService extends Service {
def invoke(): Future[Unit.type] = Future.successful(Unit)
}
object GenericParametersNode extends CustomStreamTransformer with SingleInputGenericNodeTransformation[AnyRef] {
override type State = List[String]
override def contextTransformation(context: ValidationContext,
dependencies: List[NodeDependencyValue])(implicit nodeId: NodeId): this.NodeTransformationDefinition = {
case TransformationStep(Nil, _) => NextParameters(List(
Parameter[String]("par1"), Parameter[java.lang.Boolean]("lazyPar1").copy(isLazyParameter = true)))
case TransformationStep(("par1", DefinedEagerParameter(value: String, _))::("lazyPar1", _)::Nil, None) =>
val split = value.split(",").toList
NextParameters(split.map(Parameter(_, Unknown)), state = Some(split))
case TransformationStep(("par1", FailedToDefineParameter)::("lazyPar1", _)::Nil, None) =>
outputParameters(context, dependencies, Nil)
case TransformationStep(("par1", _)::("lazyPar1", _)::rest, Some(names)) if rest.map(_._1) == names =>
outputParameters(context, dependencies, rest)
}
private def outputParameters(context: ValidationContext, dependencies: List[NodeDependencyValue], rest: List[(String, BaseDefinedParameter)])(implicit nodeId: NodeId): this.FinalResults = {
dependencies.collectFirst { case OutputVariableNameValue(name) => name } match {
case Some(name) =>
val result = TypedObjectTypingResult(rest.map { case (k, v) => k -> v.returnType })
FinalResults.forValidation(context)(_.withVariable(OutputVar.customNode(name), result))
case None =>
FinalResults(context, errors = List(CustomNodeError("Output not defined", None)))
}
}
override def implementation(params: Map[String, Any], dependencies: List[NodeDependencyValue], finalState: Option[State]): AnyRef = {
val map = params.filterNot(k => List("par1", "lazyPar1").contains(k._1))
val bool = params("lazyPar1").asInstanceOf[LazyParameter[java.lang.Boolean]]
FlinkCustomStreamTransformation((stream, fctx) => {
stream
.filter(new LazyParameterFilterFunction(bool, fctx.lazyParameterHelper))
.map(ctx => ValueWithContext[AnyRef](TypedMap(map), ctx))
})
}
override def nodeDependencies: List[NodeDependency] = List(OutputVariableNameDependency, TypedNodeDependency[MetaData])
}
object NodePassingStateToImplementation extends CustomStreamTransformer with SingleInputGenericNodeTransformation[AnyRef] {
val VariableThatShouldBeDefinedBeforeNodeName = "foo"
override type State = Boolean
override def contextTransformation(context: ValidationContext,
dependencies: List[NodeDependencyValue])(implicit nodeId: NodeId): this.NodeTransformationDefinition = {
case TransformationStep(Nil, _) =>
context.withVariable(OutputVar.customNode(OutputVariableNameDependency.extract(dependencies)), Typed[Boolean])
.map(FinalResults(_, state = Some(context.contains(VariableThatShouldBeDefinedBeforeNodeName))))
.valueOr( errors => FinalResults(context, errors.toList))
}
override def implementation(params: Map[String, Any], dependencies: List[NodeDependencyValue], finalState: Option[State]): AnyRef = {
FlinkCustomStreamTransformation((stream, fctx) => {
stream
.map(ctx => ValueWithContext[AnyRef](finalState.get: java.lang.Boolean, ctx))
})
}
override def nodeDependencies: List[NodeDependency] = List(OutputVariableNameDependency)
}
object GenericParametersSource extends SourceFactory with SingleInputGenericNodeTransformation[Source] {
override type State = Nothing
override def contextTransformation(context: ValidationContext, dependencies: List[NodeDependencyValue])(implicit nodeId: NodeId)
: this.NodeTransformationDefinition = {
case TransformationStep(Nil, _) => NextParameters(Parameter[String]("type")
.copy(editor = Some(FixedValuesParameterEditor(List(FixedExpressionValue("'type1'", "type1"), FixedExpressionValue("'type2'", "type2"))))) :: Nil)
case TransformationStep(("type", DefinedEagerParameter(value: String, _))::Nil, None) =>
//This is just sample, so we don't care about all cases, in *real* transformer we would e.g. take lists from config file, external service etc.
val versions = value match {
case "type1" => List(1, 2)
case "type2" => List(3, 4)
case _ => ???
}
NextParameters(Parameter[Int]("version")
.copy(editor = Some(FixedValuesParameterEditor(versions.map(v => FixedExpressionValue(v.toString, v.toString))))):: Nil)
case TransformationStep(("type", FailedToDefineParameter)::Nil, None) =>
output(context, dependencies)
case TransformationStep(("type", _)::("version", _)::Nil, None) =>
output(context, dependencies)
}
private def output(context: ValidationContext, dependencies: List[NodeDependencyValue])(implicit nodeId: NodeId) = {
val name = dependencies.collectFirst {
case OutputVariableNameValue(name) => name
}.get
FinalResults.forValidation(context)(_.withVariable(OutputVar.customNode(name), Typed[String]))
}
override def implementation(params: Map[String, Any], dependencies: List[NodeDependencyValue], finalState: Option[State]): Source = {
val out = params("type") + "-" + params("version")
CollectionSource(StreamExecutionEnvironment.getExecutionEnvironment.getConfig, out::Nil, None, Typed[String])
}
override def nodeDependencies: List[NodeDependency] = OutputVariableNameDependency :: Nil
}
object GenericSourceWithCustomVariables extends SourceFactory with SingleInputGenericNodeTransformation[Source] {
private class CustomFlinkContextInitializer extends BasicContextInitializer[String](Typed[String]) {
override def validationContext(context: ValidationContext)(implicit nodeId: NodeId): ValidatedNel[ProcessCompilationError, ValidationContext] = {
//Append variable "input"
val contextWithInput = super.validationContext(context)
//Specify additional variables
val additionalVariables = Map(
"additionalOne" -> Typed[String],
"additionalTwo" -> Typed[Int]
)
//Append additional variables to ValidationContext
additionalVariables.foldLeft(contextWithInput) { case (acc, (name, typingResult)) =>
acc.andThen(_.withVariable(name, typingResult, None))
}
}
override def initContext(contextIdGenerator: ContextIdGenerator): ContextInitializingFunction[String] =
new BasicContextInitializingFunction[String](contextIdGenerator, outputVariableName) {
override def apply(input: String): Context = {
//perform some transformations and/or computations
val additionalVariables = Map[String, Any](
"additionalOne" -> s"transformed:${input}",
"additionalTwo" -> input.length()
)
//initialize context with input variable and append computed values
super.apply(input).withVariables(additionalVariables)
}
}
}
override type State = Nothing
//There is only one parameter in this source
private val elementsParamName = "elements"
private val customContextInitializer: ContextInitializer[String] = new CustomFlinkContextInitializer
override def contextTransformation(context: ValidationContext, dependencies: List[NodeDependencyValue])(implicit nodeId: NodeId)
: GenericSourceWithCustomVariables.NodeTransformationDefinition = {
case TransformationStep(Nil, _) => NextParameters(Parameter[java.util.List[String]](`elementsParamName`) :: Nil)
case step@TransformationStep((`elementsParamName`, _) :: Nil, None) =>
FinalResults.forValidation(context)(customContextInitializer.validationContext)
}
override def implementation(params: Map[String, Any], dependencies: List[NodeDependencyValue], finalState: Option[State]): Source = {
import scala.collection.JavaConverters._
val elements = params(`elementsParamName`).asInstanceOf[java.util.List[String]].asScala.toList
new CollectionSource(StreamExecutionEnvironment.getExecutionEnvironment.getConfig, elements, None, Typed[String])
with TestDataGenerator
with FlinkSourceTestSupport[String] {
override val contextInitializer: ContextInitializer[String] = customContextInitializer
override def generateTestData(size: Int): Array[Byte] = elements.mkString("\\n").getBytes
override def testDataParser: TestDataParser[String] = new NewLineSplittedTestDataParser[String] {
override def parseElement(testElement: String): String = testElement
}
override def timestampAssignerForTest: Option[TimestampWatermarkHandler[String]] = timestampAssigner
}
}
override def nodeDependencies: List[NodeDependency] = Nil
}
object GenericParametersSink extends SinkFactory with SingleInputGenericNodeTransformation[Sink] {
private val componentUseCaseDependency = TypedNodeDependency[ComponentUseCase]
override type State = Nothing
override def contextTransformation(context: ValidationContext, dependencies: List[NodeDependencyValue])(implicit nodeId: NodeId)
: this.NodeTransformationDefinition = {
case TransformationStep(Nil, _) => NextParameters(Parameter[String]("value").copy(isLazyParameter = true) :: Parameter[String]("type")
.copy(editor = Some(FixedValuesParameterEditor(List(FixedExpressionValue("'type1'", "type1"), FixedExpressionValue("'type2'", "type2"))))) :: Nil)
case TransformationStep(("value", _) :: ("type", DefinedEagerParameter(value: String, _))::Nil, None) =>
val versions = value match {
case "type1" => List(1, 2)
case "type2" => List(3, 4)
case _ => ???
}
NextParameters(Parameter[Int]("version")
.copy(editor = Some(FixedValuesParameterEditor(versions.map(v => FixedExpressionValue(v.toString, v.toString))))):: Nil)
case TransformationStep(("value", _) :: ("type", FailedToDefineParameter)::Nil, None) => FinalResults(context)
case TransformationStep(("value", _) :: ("type", _)::("version", _)::Nil, None) => FinalResults(context)
}
override def implementation(params: Map[String, Any], dependencies: List[NodeDependencyValue], finalState: Option[State]): FlinkSink = new FlinkSink {
type Value = String
private val typ = params("type")
private val version = params("version")
override def prepareValue(dataStream: DataStream[Context], flinkNodeContext: FlinkCustomNodeContext): DataStream[ValueWithContext[Value]] = {
dataStream
.flatMap(flinkNodeContext.lazyParameterHelper.lazyMapFunction(params("value").asInstanceOf[LazyParameter[String]]))
.map((v: ValueWithContext[String]) => v.copy(value = s"${v.value}+$typ-$version+componentUseCase:${componentUseCaseDependency.extract(dependencies)}"))
}
override def registerSink(dataStream: DataStream[ValueWithContext[String]], flinkNodeContext: FlinkCustomNodeContext): DataStreamSink[_] =
dataStream.map(_.value).addSink(SinkForStrings.toSinkFunction)
}
override def nodeDependencies: List[NodeDependency] = List(componentUseCaseDependency)
}
object ProcessHelper {
val constant = 4
def add(a: Int, b: Int): Int = a + b
def scalaOptionValue: Option[String] = Some("" + constant)
def javaOptionalValue: Optional[String] = Optional.of("" + constant)
def extractProperty(map: java.util.Map[String, _], property: String): Any = map.get(property)
}
private val ascendingTimestampExtractor = new StandardTimestampWatermarkHandler[SimpleRecord](WatermarkStrategy
.forMonotonousTimestamps[SimpleRecord]().withTimestampAssigner(StandardTimestampWatermarkHandler.toAssigner[SimpleRecord](_.date.getTime)))
private val newLineSplittedTestDataParser = new NewLineSplittedTestDataParser[SimpleRecord] {
override def parseElement(csv: String): SimpleRecord = {
val parts = csv.split("\\\\|")
SimpleRecord(parts(0), parts(1).toLong, parts(2), new Date(parts(3).toLong), Some(BigDecimal(parts(4))), BigDecimal(parts(5)), parts(6))
}
}
def simpleRecordSource(data: List[SimpleRecord]): SourceFactory = SourceFactory.noParam[SimpleRecord](
new CollectionSource[SimpleRecord](new ExecutionConfig, data, Some(ascendingTimestampExtractor), Typed[SimpleRecord]) with FlinkSourceTestSupport[SimpleRecord] {
override def testDataParser: TestDataParser[SimpleRecord] = newLineSplittedTestDataParser
override def timestampAssignerForTest: Option[TimestampWatermarkHandler[SimpleRecord]] = timestampAssigner
})
val jsonSource: SourceFactory = SourceFactory.noParam[SimpleJsonRecord](
new CollectionSource[SimpleJsonRecord](new ExecutionConfig, List(), None, Typed[SimpleJsonRecord]) with FlinkSourceTestSupport[SimpleJsonRecord] {
override def testDataParser: TestDataParser[SimpleJsonRecord] = new EmptyLineSplittedTestDataParser[SimpleJsonRecord] {
override def parseElement(json: String): SimpleJsonRecord = {
CirceUtil.decodeJsonUnsafe[SimpleJsonRecord](json, "invalid request")
}
}
override def timestampAssignerForTest: Option[TimestampWatermarkHandler[SimpleJsonRecord]] = timestampAssigner
}
)
object TypedJsonSource extends SourceFactory with ReturningType {
@MethodToInvoke
def create(processMetaData: MetaData, componentUseCase: ComponentUseCase, @ParamName("type") definition: java.util.Map[String, _]): Source = {
new CollectionSource[TypedMap](new ExecutionConfig, List(), None, Typed[TypedMap]) with FlinkSourceTestSupport[TypedMap] with ReturningType {
override def testDataParser: TestDataParser[TypedMap] = new EmptyLineSplittedTestDataParser[TypedMap] {
override def parseElement(json: String): TypedMap = {
TypedMap(CirceUtil.decodeJsonUnsafe[Map[String, String]](json, "invalid request"))
}
}
override val returnType: typing.TypingResult = TypingUtils.typeMapDefinition(definition)
override def timestampAssignerForTest: Option[TimestampWatermarkHandler[TypedMap]] = timestampAssigner
}
}
override def returnType: typing.TypingResult = Typed[TypedMap]
}
@JsonCodec case class KeyValue(key: String, value: Int, date: Long)
object ReturningComponentUsaCaseService extends Service {
@MethodToInvoke
def invoke(implicit componentUseCase: ComponentUseCase): Future[ComponentUseCase] = {
Future.successful(componentUseCase)
}
}
object CountingNodesListener extends EmptyProcessListener {
@volatile private var nodesEntered: List[String] = Nil
@volatile private var listening = false
def listen(body: => Unit): List[String] = {
nodesEntered = Nil
listening = true
body
listening = false
nodesEntered
}
override def nodeEntered(nodeId: String, context: Context, processMetaData: MetaData): Unit = {
if(listening) nodesEntered = nodesEntered ::: nodeId :: Nil
}
}
}
| TouK/nussknacker | engine/flink/test-utils/src/main/scala/pl/touk/nussknacker/engine/process/helpers/SampleNodes.scala | Scala | apache-2.0 | 35,189 |
package eventstore
package core
package settings
import java.net.InetSocketAddress
import scala.concurrent.duration._
import com.typesafe.config.Config
import syntax._
/**
* @param address IP & port of Event Store
* @param connectionTimeout The desired connection timeout
* @param maxReconnections Maximum number of reconnections before backing off, -1 to reconnect forever
* @param reconnectionDelayMin Delay before first reconnection
* @param reconnectionDelayMax Maximum delay on reconnections
* @param defaultCredentials The [[UserCredentials]] to use for operations where other [[UserCredentials]] are not explicitly supplied.
* @param heartbeatInterval The interval at which to send heartbeat messages.
* @param heartbeatTimeout The interval after which an unacknowledged heartbeat will cause the connection to be considered faulted and disconnect.
* @param operationMaxRetries The maximum number of operation retries
* @param operationTimeout The amount of time before an operation is considered to have timed out
* @param resolveLinkTos Whether to resolve LinkTo events automatically
* @param requireMaster Whether or not to require Event Store to refuse serving read or write request if it is not master
* @param readBatchSize Number of events to be retrieved by client as single message
* @param enableTcpTls Whether TLS should be enabled for TCP connections.
* @param cluster see [[ClusterSettings]]
* @param http see [[HttpSettings]]
* @param serializationParallelism The number of serialization/deserialization functions to be run in parallel
* @param serializationOrdered Serialization done asynchronously and these futures may complete in any order, but results will be used with preserved order if set to true
* @param connectionName Client identifier used to show a friendly name of client in Event Store.
*/
@SerialVersionUID(1L)
final case class EsSettings(
address: InetSocketAddress,
connectionTimeout: FiniteDuration,
maxReconnections: Int,
reconnectionDelayMin: FiniteDuration,
reconnectionDelayMax: FiniteDuration,
defaultCredentials: Option[UserCredentials],
heartbeatInterval: FiniteDuration,
heartbeatTimeout: FiniteDuration,
operationMaxRetries: Int,
operationTimeout: FiniteDuration,
resolveLinkTos: Boolean,
requireMaster: Boolean,
readBatchSize: Int,
enableTcpTls: Boolean,
cluster: Option[ClusterSettings],
http: HttpSettings,
serializationParallelism: Int,
serializationOrdered: Boolean,
connectionName: Option[String]
) {
require(reconnectionDelayMin > Duration.Zero, "reconnectionDelayMin must be > 0")
require(reconnectionDelayMax > Duration.Zero, "reconnectionDelayMax must be > 0")
require(operationTimeout > Duration.Zero, "operationTimeout must be > 0")
require(serializationParallelism > 0, "serializationParallelism must be > 0")
}
object EsSettings {
def apply(conf: Config): EsSettings = {
def cluster = ClusterSettings.opt(conf)
def load(c: Config): EsSettings = {
def operationTimeout = {
val deprecated = "operation-timeout"
if (c hasPath deprecated) c duration deprecated
else c duration "operation.timeout"
}
def credentials = for {
l <- Option(c getString "credentials.login")
p <- Option(c getString "credentials.password")
} yield UserCredentials(login = l, password = p)
def address =
(c getString "address.host") :: (c getInt "address.port")
def connectionName =
Option(c getString "connection-name").filter(_.nonEmpty)
EsSettings(
address = address,
connectionTimeout = c duration "connection-timeout",
maxReconnections = c getInt "max-reconnections",
reconnectionDelayMin = c duration "reconnection-delay.min",
reconnectionDelayMax = c duration "reconnection-delay.max",
defaultCredentials = credentials,
heartbeatInterval = c duration "heartbeat.interval",
heartbeatTimeout = c duration "heartbeat.timeout",
operationMaxRetries = c getInt "operation.max-retries",
operationTimeout = operationTimeout,
resolveLinkTos = c getBoolean "resolve-linkTos",
requireMaster = c getBoolean "require-master",
readBatchSize = c getInt "read-batch-size",
enableTcpTls = c getBoolean "enable-tcp-tls",
cluster = cluster,
http = HttpSettings(c),
serializationParallelism = c getInt "serialization-parallelism",
serializationOrdered = c getBoolean "serialization-ordered",
connectionName = connectionName
)
}
load(conf getConfig "eventstore")
}
}
| EventStore/EventStore.JVM | core/src/main/scala/eventstore/core/settings/EsSettings.scala | Scala | bsd-3-clause | 5,018 |
package org.aja.dhira.nnql
/**
* Created by mdhandapani on 18/5/16.
*/
object NNQLCommands {
trait RunTimeActions
trait NNQlExpr
case class CreateNeurons(n: Long, interconnections: Boolean) extends NNQlExpr
case class CreateLayer(n: Long) extends NNQlExpr
case class LoadData(csvPath: String, referenceName: Option[String]) extends NNQlExpr {
import scala.io.Source
val lines = Source.fromFile(csvPath).getLines().toArray
val numCols = lines.take(1)(0).split(",").size
val data = lines.map(line =>
line.split(",").map(_.toDouble))
data.foreach(println)
}
case class StartTraining() extends NNQlExpr with RunTimeActions
case class StopTraining() extends NNQlExpr with RunTimeActions
}
| Mageswaran1989/aja | src/main/scala/org/aja/dhira/src/main/scala/org/dhira/core/nnql/NNQLCommands.scala | Scala | apache-2.0 | 734 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package databases.derby
import java.math.BigDecimal
import java.net.URI
import java.sql.{ SQLException, SQLIntegrityConstraintViolationException, SQLSyntaxErrorException }
import akka.testkit.{ EventFilter, TestFSMRef }
import com.wegtam.scalatest.tags.{ DbTest, DbTestDerby }
import com.wegtam.tensei.adt.{ ConnectionInformation, DFASDL, DFASDLReference }
import com.wegtam.tensei.agent.ActorSpecWithDebugLog
import com.wegtam.tensei.agent.writers.BaseWriter.BaseWriterMessages._
import com.wegtam.tensei.agent.writers.BaseWriter._
import com.wegtam.tensei.agent.writers.DatabaseWriterActor.DatabaseWriterData
import com.wegtam.tensei.agent.writers.{ BaseWriter, DatabaseWriterActor }
import org.scalatest.BeforeAndAfterEach
import scalaz._
import Scalaz._
class DatabaseWriterActorTest extends ActorSpecWithDebugLog with BeforeAndAfterEach {
val databaseName = "test"
override protected def beforeEach(): Unit = {
java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName;create=true")
super.beforeEach()
}
override protected def afterEach(): Unit = {
withClue("Derby database did not shutdown correctly!") {
val se = the[SQLException] thrownBy java.sql.DriverManager
.getConnection(s"jdbc:derby:memory:$databaseName;drop=true")
se.getSQLState should be("08006")
}
super.afterEach()
}
private def initializeWriter(
con: ConnectionInformation,
dfasdl: DFASDL
): TestFSMRef[BaseWriter.State, DatabaseWriterData, DatabaseWriterActor] = {
val writer = TestFSMRef(
new DatabaseWriterActor(con, dfasdl, Option("DatabaseWriterActorTest"))
)
writer.stateName should be(BaseWriter.State.Initializing)
writer ! BaseWriterMessages.InitializeTarget
writer ! AreYouReady
val expectedMsg = ReadyToWork
expectMsg(expectedMsg)
writer
}
describe("DatabaseWriterActor") {
describe("using derby") {
describe("initialize") {
it("should create the tables", DbTest, DbTestDerby) {
val connection = java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-01.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target = new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference("TEST", "SIMPLE-01")))
initializeWriter(target, dfasdl)
val statement = connection.createStatement()
val results = statement.executeQuery(
"SELECT UPPER(TABLENAME) AS TABLENAME FROM SYS.SYSTABLES WHERE TABLETYPE = 'T' AND UPPER(TABLENAME) = 'ACCOUNTS'"
)
withClue("Database table 'accounts' should be created!'") {
results.next() should be(true)
results.getString("TABLENAME") shouldEqual "ACCOUNTS"
}
connection.close()
}
it("should not create tables that are already existing", DbTest, DbTestDerby) {
val connection = java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val statement = connection.createStatement()
statement.execute("CREATE TABLE accounts (id DOUBLE)")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-01.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target = new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference("TEST", "SIMPLE-01")))
EventFilter.warning(occurrences = 1, start = "Table") intercept {
initializeWriter(target, dfasdl)
}
connection.close()
}
it("should create primary keys if defined", DbTest, DbTestDerby) {
val connection = java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-01-with-primary-key.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target = new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference("TEST", "SIMPLE-01")))
initializeWriter(target, dfasdl)
val statement = connection.createStatement()
val results = statement.executeQuery(
"SELECT UPPER(TABLENAME) AS TABLENAME FROM SYS.SYSTABLES WHERE TABLETYPE = 'T' AND UPPER(TABLENAME) = 'ACCOUNTS'"
)
withClue("Database table 'accounts' should be created!'") {
results.next() should be(true)
results.getString("TABLENAME") shouldEqual "ACCOUNTS"
}
statement.execute("INSERT INTO ACCOUNTS VALUES(1, 'John Doe', NULL, '2001-01-01', 3.14)")
an[SQLIntegrityConstraintViolationException] should be thrownBy statement.execute(
"INSERT INTO ACCOUNTS VALUES(1, 'Jane Doe', NULL, '2001-01-02', 2.76)"
)
connection.close()
}
it("should create auto-increment columns if defined", DbTest, DbTestDerby) {
val connection = java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-01-with-pk-and-auto-inc.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target = new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference("TEST", "SIMPLE-01")))
initializeWriter(target, dfasdl)
val statement = connection.createStatement()
val results = statement.executeQuery(
"SELECT UPPER(TABLENAME) AS TABLENAME FROM SYS.SYSTABLES WHERE TABLETYPE = 'T' AND UPPER(TABLENAME) = 'ACCOUNTS'"
)
withClue("Database table 'accounts' should be created!'") {
results.next() should be(true)
results.getString("TABLENAME") shouldEqual "ACCOUNTS"
}
statement.execute(
"INSERT INTO ACCOUNTS (name, description, birthday, salary) VALUES('John Doe', NULL, '2001-01-01', 3.14)"
)
val entries = statement.executeQuery("SELECT * FROM ACCOUNTS WHERE name = 'John Doe'")
withClue("Column should be incremented automatically.") {
entries.next() should be(true)
entries.getInt("id") should be(1)
}
// Derby permits any statements that modify a key column.
an[SQLSyntaxErrorException] should be thrownBy statement.execute(
"INSERT INTO ACCOUNTS VALUES(1, 'Jane Doe', NULL, '2001-01-02', 2.76)"
)
connection.close()
}
it("should fail to create foreign keys without primary keys", DbTest, DbTestDerby) {
val connection = java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-02-with-foreign-key.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target = new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference("TEST", "SIMPLE-01")))
val writer =
TestFSMRef(new DatabaseWriterActor(target, dfasdl, Option("DatabaseWriterActorTest")))
writer.stateName should be(BaseWriter.State.Initializing)
EventFilter[SQLException](source = writer.path.toString, occurrences = 1) intercept {
writer ! BaseWriterMessages.InitializeTarget
writer ! AreYouReady
val expectedMsg = ReadyToWork
expectMsg(expectedMsg)
}
writer ! CloseWriter
expectMsgType[WriterClosed]
connection.close()
}
it("should create the unique columns if defined", DbTest, DbTestDerby) {
val connection = java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-01-with-unique.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target = new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference("TEST", "SIMPLE-01")))
initializeWriter(target, dfasdl)
val statement = connection.createStatement()
statement.execute("INSERT INTO ACCOUNTS VALUES(1, 'John Doe', NULL, '2001-01-01', 3.14)")
val entries = statement.executeQuery("SELECT * FROM ACCOUNTS WHERE name = 'John Doe'")
withClue("Unique should work.") {
entries.next() should be(true)
entries.getString("name") should be("John Doe")
an[java.sql.SQLIntegrityConstraintViolationException] should be thrownBy statement
.execute("INSERT INTO ACCOUNTS VALUES(2, 'John Doe', NULL, '2001-01-02', 2.76)")
}
connection.close()
}
it("should create primary keys, foreign keys and auto increments", DbTest, DbTestDerby) {
val connection = java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile =
"/databases/generic/DatabaseWriter/simple-02-with-pk-and-fk-and-auto-inc.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target = new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference("TEST", "SIMPLE-01")))
initializeWriter(target, dfasdl)
val statement = connection.createStatement()
val results = statement.executeQuery(
"SELECT UPPER(TABLENAME) AS TABLENAME FROM SYS.SYSTABLES WHERE TABLETYPE = 'T' AND (UPPER(TABLENAME) = 'ACCOUNTS' OR UPPER(TABLENAME) = 'COMPANIES') ORDER BY TABLENAME ASC"
)
withClue("Database tables should be created!'") {
results.next() should be(true)
results.getString("TABLENAME") shouldEqual "ACCOUNTS"
results.next() should be(true)
results.getString("TABLENAME") shouldEqual "COMPANIES"
}
statement.execute("INSERT INTO COMPANIES VALUES(1, 'Letterbox Inc.', NULL)")
statement.execute(
"INSERT INTO ACCOUNTS (name, description, birthday, salary, company_id) VALUES('John Doe', NULL, '2001-01-01', 3.14, 1)"
)
val entries = statement.executeQuery(
"SELECT ACCOUNTS.id AS id, COMPANIES.name AS name FROM ACCOUNTS JOIN COMPANIES ON ACCOUNTS.company_id = COMPANIES.id WHERE ACCOUNTS.name = 'John Doe'"
)
withClue("Foreign keys should work.") {
entries.next() should be(true)
withClue("Column id should be auto-incremented.")(entries.getInt("id") should be(1))
entries.getString("name") should be("Letterbox Inc.")
an[SQLIntegrityConstraintViolationException] should be thrownBy statement.execute(
"INSERT INTO ACCOUNTS (name, description, birthday, salary, company_id) VALUES('Jane Doe', NULL, '2001-01-02', 2.76, -1)"
)
}
connection.close()
}
}
describe("writing data") {
describe("using a single sequence") {
describe("when given data for a single row") {
it("should write a sequence row", DbTest, DbTestDerby) {
val connection =
java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-01.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target =
new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef = Option(DFASDLReference("TEST", "SIMPLE-01")))
val databaseWriter = initializeWriter(target, dfasdl)
val msg = new WriteBatchData(
batch = List(
new WriteData(1, 1, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(2,
"Max Mustermann",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(3,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(4,
java.sql.Date.valueOf("1968-01-03"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(5,
new BigDecimal("1500.23"),
List(),
Option(new WriterMessageMetaData("salary")))
)
)
databaseWriter ! msg
databaseWriter ! BaseWriterMessages.CloseWriter
val expectedMessage = BaseWriterMessages.WriterClosed("".right[String])
expectMsg(expectedMessage)
val statement = connection.createStatement()
val results = statement.executeQuery("SELECT * FROM ACCOUNTS")
withClue("Data should have been written to the database!") {
results.next() should be(true)
results.getLong("id") should be(1)
results.getString("name") should be("Max Mustermann")
results.getString("description") should be("Some fancy text...")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-03"))
results.getDouble("salary") should be(1500.23)
}
connection.close()
}
}
describe("when given data for multiple rows") {
describe("without primary key") {
it("should write all possible sequence rows", DbTest, DbTestDerby) {
val connection =
java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-01.xml"
val xml = scala.io.Source
.fromInputStream(getClass.getResourceAsStream(dfasdlFile))
.mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target =
new ConnectionInformation(
uri = new URI(connection.getMetaData.getURL),
dfasdlRef = Option(DFASDLReference("TEST", "SIMPLE-01"))
)
val databaseWriter = initializeWriter(target, dfasdl)
val msg = new WriteBatchData(
batch = List(
new WriteData(1, 1, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(2,
"Max Mustermann",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(3,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(4,
java.sql.Date.valueOf("1968-01-03"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(5,
new BigDecimal("1500.23"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(6, 2, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(7,
"Eva Mustermann",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(8,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(9,
java.sql.Date.valueOf("1968-01-01"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(10,
new BigDecimal("1500.00"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(11, 3, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(12,
"Dr. Evil",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(13,
"Beware of Austin Powers!",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(14,
java.sql.Date.valueOf("1968-08-08"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(15,
new BigDecimal("1500000.00"),
List(),
Option(new WriterMessageMetaData("salary")))
)
)
databaseWriter ! msg
databaseWriter ! BaseWriterMessages.CloseWriter
val expectedMessage = BaseWriterMessages.WriterClosed("".right[String])
expectMsg(expectedMessage)
val statement = connection.createStatement()
val results = statement.executeQuery("SELECT * FROM ACCOUNTS ORDER BY id")
withClue("Data should have been written to the database!") {
results.next() should be(true)
results.getLong("id") should be(1)
results.getString("name") should be("Max Mustermann")
results.getString("description") should be("Some fancy text...")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-03"))
results.getDouble("salary") should be(1500.23)
results.next() should be(true)
results.getLong("id") should be(2)
results.getString("name") should be("Eva Mustermann")
results.getString("description") should be("Some fancy text...")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-01"))
results.getDouble("salary") should be(1500.00)
results.next() should be(true)
results.getLong("id") should be(3)
results.getString("name") should be("Dr. Evil")
results.getString("description") should be("Beware of Austin Powers!")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-08-08"))
results.getDouble("salary") should be(1500000.00)
}
connection.close()
}
}
describe("with primary key") {
it("should write new and update existing rows", DbTest, DbTestDerby) {
val connection =
java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-01-with-primary-key.xml"
val xml = scala.io.Source
.fromInputStream(getClass.getResourceAsStream(dfasdlFile))
.mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target =
new ConnectionInformation(
uri = new URI(connection.getMetaData.getURL),
dfasdlRef = Option(DFASDLReference("TEST", "SIMPLE-01"))
)
val databaseWriter = initializeWriter(target, dfasdl)
val ps = connection.prepareStatement(
"INSERT INTO accounts (id, name, description, birthday, salary) VALUES(?, ?, ?, ?, ?)"
)
ps.setInt(1, 1)
ps.setString(2, "Max Mustermann")
ps.setString(3, "Some fancy text...")
ps.setDate(4, java.sql.Date.valueOf("1968-01-03"))
ps.setBigDecimal(5, new BigDecimal("1500.23"))
ps.execute()
val msg = new WriteBatchData(
batch = List(
new WriteData(1, 2, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(2,
"Eva Mustermann",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(3,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(4,
java.sql.Date.valueOf("1968-01-01"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(5,
new BigDecimal("1500.00"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(6, 3, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(7, "Dr. Evil", List(), Option(new WriterMessageMetaData("name"))),
new WriteData(8,
"Beware of Austin Powers!",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(9,
java.sql.Date.valueOf("1968-08-08"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(10,
new BigDecimal("1500000.00"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(11, 1, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(12,
"Lord Fancy Pants",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(13,
"An updated description text.",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(14,
java.sql.Date.valueOf("1968-04-01"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(15,
new BigDecimal("999.97"),
List(),
Option(new WriterMessageMetaData("salary")))
)
)
databaseWriter ! msg
databaseWriter ! BaseWriterMessages.CloseWriter
val expectedMessage = BaseWriterMessages.WriterClosed("".right[String])
expectMsg(expectedMessage)
val statement = connection.createStatement()
withClue("The exact number of rows should have been written!") {
val count = statement.executeQuery("SELECT COUNT(*) FROM ACCOUNTS")
count.next() should be(true)
count.getInt(1) shouldEqual 3
}
withClue("Data should have been written to the database!") {
val results = statement.executeQuery("SELECT * FROM ACCOUNTS ORDER BY id")
results.next() should be(true)
results.getLong("id") should be(1)
results.getString("name") should be("Lord Fancy Pants")
results.getString("description") should be("An updated description text.")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-04-01"))
results.getDouble("salary") should be(999.97)
results.next() should be(true)
results.getLong("id") should be(2)
results.getString("name") should be("Eva Mustermann")
results.getString("description") should be("Some fancy text...")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-01"))
results.getDouble("salary") should be(1500.00)
results.next() should be(true)
results.getLong("id") should be(3)
results.getString("name") should be("Dr. Evil")
results.getString("description") should be("Beware of Austin Powers!")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-08-08"))
results.getDouble("salary") should be(1500000.00)
}
connection.close()
}
}
}
}
describe("using multiple sequences") {
describe("when given data for multiple rows") {
it("should write all possible sequence rows", DbTest, DbTestDerby) {
val connection =
java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-02.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target =
new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef = Option(DFASDLReference("TEST", "SIMPLE-01")))
val databaseWriter = initializeWriter(target, dfasdl)
val msg = new WriteBatchData(
batch = List(
new WriteData(1, 1, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(2,
"Max Mustermann",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(3,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(4,
java.sql.Date.valueOf("1968-01-03"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(5,
new BigDecimal("1500.23"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(6, 2, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(7,
"Eva Mustermann",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(8,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(9,
java.sql.Date.valueOf("1968-01-01"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(10,
new BigDecimal("1500.00"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(11, 1, List(), Option(new WriterMessageMetaData("id2"))),
new WriteData(12, "Dr. Evil", List(), Option(new WriterMessageMetaData("name2"))),
new WriteData(13,
"Beware of Austin Powers!",
List(),
Option(new WriterMessageMetaData("description2"))),
new WriteData(14,
java.sql.Date.valueOf("1968-08-08"),
List(),
Option(new WriterMessageMetaData("birthday2"))),
new WriteData(15,
new BigDecimal("1500000.00"),
List(),
Option(new WriterMessageMetaData("salary2"))),
new WriteData(16, 2, List(), Option(new WriterMessageMetaData("id2"))),
new WriteData(17,
"Eva Mustermann",
List(),
Option(new WriterMessageMetaData("name2"))),
new WriteData(18,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description2"))),
new WriteData(19,
java.sql.Date.valueOf("1968-01-01"),
List(),
Option(new WriterMessageMetaData("birthday2"))),
new WriteData(20,
new BigDecimal("1500.00"),
List(),
Option(new WriterMessageMetaData("salary2"))),
new WriteData(21, 3, List(), Option(new WriterMessageMetaData("id2"))),
new WriteData(22, "Dr. Evil", List(), Option(new WriterMessageMetaData("name2"))),
new WriteData(23,
"Beware of Austin Powers!",
List(),
Option(new WriterMessageMetaData("description2"))),
new WriteData(24,
java.sql.Date.valueOf("1968-08-08"),
List(),
Option(new WriterMessageMetaData("birthday2"))),
new WriteData(25,
new BigDecimal("1500000.00"),
List(),
Option(new WriterMessageMetaData("salary2")))
)
)
databaseWriter ! msg
databaseWriter ! BaseWriterMessages.CloseWriter
val expectedMessage = BaseWriterMessages.WriterClosed("".right[String])
expectMsg(expectedMessage)
val statement = connection.createStatement()
val results = statement.executeQuery("SELECT * FROM ACCOUNTS ORDER BY id")
withClue("Data should have been written to the database!") {
results.next() should be(true)
results.getLong("id") should be(1)
results.getString("name") should be("Max Mustermann")
results.getString("description") should be("Some fancy text...")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-03"))
results.getDouble("salary") should be(1500.23)
results.next() should be(true)
results.getLong("id") should be(2)
results.getString("name") should be("Eva Mustermann")
results.getString("description") should be("Some fancy text...")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-01"))
results.getDouble("salary") should be(1500.00)
results.next() should be(false)
}
val results2 = statement.executeQuery("SELECT * FROM ACCOUNTS2 ORDER BY id")
withClue("Data should have been written to the database!") {
results2.next() should be(true)
results2.getLong("id") should be(1)
results2.getString("name") should be("Dr. Evil")
results2.getString("description") should be("Beware of Austin Powers!")
results2.getDate("birthday") should be(java.sql.Date.valueOf("1968-08-08"))
results2.getDouble("salary") should be(1500000.00)
results2.next() should be(true)
results2.getLong("id") should be(2)
results2.getString("name") should be("Eva Mustermann")
results2.getString("description") should be("Some fancy text...")
results2.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-01"))
results2.getDouble("salary") should be(1500.00)
results2.next() should be(true)
results2.getLong("id") should be(3)
results2.getString("name") should be("Dr. Evil")
results2.getString("description") should be("Beware of Austin Powers!")
results2.getDate("birthday") should be(java.sql.Date.valueOf("1968-08-08"))
results2.getDouble("salary") should be(1500000.00)
results2.next() should be(false)
}
connection.close()
}
}
describe("when given data for multiple rows in random order") {
it("should write all possible sequence rows", DbTest, DbTestDerby) {
val connection =
java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-02.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target =
new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef = Option(DFASDLReference("TEST", "SIMPLE-01")))
val databaseWriter = initializeWriter(target, dfasdl)
val msg = new WriteBatchData(
batch = List(
new WriteData(1, 1, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(5,
new BigDecimal("1500.23"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(2,
"Max Mustermann",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(3,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(6, 2, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(7,
"Eva Mustermann",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(4,
java.sql.Date.valueOf("1968-01-03"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(8,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(11, 1, List(), Option(new WriterMessageMetaData("id2"))),
new WriteData(12, "Dr. Evil", List(), Option(new WriterMessageMetaData("name2"))),
new WriteData(13,
"Beware of Austin Powers!",
List(),
Option(new WriterMessageMetaData("description2"))),
new WriteData(9,
java.sql.Date.valueOf("1968-01-01"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(10,
new BigDecimal("1500.00"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(14,
java.sql.Date.valueOf("1968-08-08"),
List(),
Option(new WriterMessageMetaData("birthday2"))),
new WriteData(15,
new BigDecimal("1500000.00"),
List(),
Option(new WriterMessageMetaData("salary2"))),
new WriteData(23,
"Beware of Austin Powers!",
List(),
Option(new WriterMessageMetaData("description2"))),
new WriteData(24,
java.sql.Date.valueOf("1968-08-08"),
List(),
Option(new WriterMessageMetaData("birthday2"))),
new WriteData(16, 2, List(), Option(new WriterMessageMetaData("id2"))),
new WriteData(18,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description2"))),
new WriteData(17,
"Eva Mustermann",
List(),
Option(new WriterMessageMetaData("name2"))),
new WriteData(19,
java.sql.Date.valueOf("1968-01-01"),
List(),
Option(new WriterMessageMetaData("birthday2"))),
new WriteData(20,
new BigDecimal("1500.00"),
List(),
Option(new WriterMessageMetaData("salary2"))),
new WriteData(22, "Dr. Evil", List(), Option(new WriterMessageMetaData("name2"))),
new WriteData(25,
new BigDecimal("1500000.00"),
List(),
Option(new WriterMessageMetaData("salary2"))),
new WriteData(21, 3, List(), Option(new WriterMessageMetaData("id2")))
)
)
databaseWriter ! msg
databaseWriter ! BaseWriterMessages.CloseWriter
val expectedMessage = BaseWriterMessages.WriterClosed("".right[String])
expectMsg(expectedMessage)
val statement = connection.createStatement()
val results = statement.executeQuery("SELECT * FROM ACCOUNTS ORDER BY id")
withClue("Data should have been written to the database!") {
results.next() should be(true)
results.getLong("id") should be(1)
results.getString("name") should be("Max Mustermann")
results.getString("description") should be("Some fancy text...")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-03"))
results.getDouble("salary") should be(1500.23)
results.next() should be(true)
results.getLong("id") should be(2)
results.getString("name") should be("Eva Mustermann")
results.getString("description") should be("Some fancy text...")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-01"))
results.getDouble("salary") should be(1500.00)
results.next() should be(false)
}
val results2 = statement.executeQuery("SELECT * FROM ACCOUNTS2 ORDER BY id")
withClue("Data should have been written to the database!") {
results2.next() should be(true)
results2.getLong("id") should be(1)
results2.getString("name") should be("Dr. Evil")
results2.getString("description") should be("Beware of Austin Powers!")
results2.getDate("birthday") should be(java.sql.Date.valueOf("1968-08-08"))
results2.getDouble("salary") should be(1500000.00)
results2.next() should be(true)
results2.getLong("id") should be(2)
results2.getString("name") should be("Eva Mustermann")
results2.getString("description") should be("Some fancy text...")
results2.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-01"))
results2.getDouble("salary") should be(1500.00)
results2.next() should be(true)
results2.getLong("id") should be(3)
results2.getString("name") should be("Dr. Evil")
results2.getString("description") should be("Beware of Austin Powers!")
results2.getDate("birthday") should be(java.sql.Date.valueOf("1968-08-08"))
results2.getDouble("salary") should be(1500000.00)
results2.next() should be(false)
}
connection.close()
}
}
}
describe("when retrieving ordered column data") {
it("should write the columns in correct order", DbTest, DbTestDerby) {
val connection =
java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-01.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target = new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference("TEST", "SIMPLE-01")))
val databaseWriter = initializeWriter(target, dfasdl)
val msg = new WriteBatchData(
batch = List(
new WriteData(1, 1, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(2,
"Max Mustermann",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(3,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(4,
java.sql.Date.valueOf("1968-01-03"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(5,
new BigDecimal("1500.23"),
List(),
Option(new WriterMessageMetaData("salary")))
)
)
databaseWriter ! msg
databaseWriter ! BaseWriterMessages.CloseWriter
val expectedMessage = BaseWriterMessages.WriterClosed("".right[String])
expectMsg(expectedMessage)
val statement = connection.createStatement()
val results = statement.executeQuery("SELECT * FROM ACCOUNTS")
withClue("Data should have been written to the database!") {
results.next() should be(true)
results.getLong("id") should be(1)
results.getString("name") should be("Max Mustermann")
results.getString("description") should be("Some fancy text...")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-03"))
results.getDouble("salary") should be(1500.23)
}
connection.close()
}
}
describe("when retrieving unordered column data") {
it("should write the columns in correct order", DbTest, DbTestDerby) {
val connection =
java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-01.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target = new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference("TEST", "SIMPLE-01")))
val databaseWriter = initializeWriter(target, dfasdl)
val msg = new WriteBatchData(
batch = List(
new WriteData(1,
java.sql.Date.valueOf("1968-01-03"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(2,
"Max Mustermann",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(3, 1, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(4,
new BigDecimal("1500.23"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(5,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description")))
)
)
databaseWriter ! msg
databaseWriter ! BaseWriterMessages.CloseWriter
val expectedMessage = BaseWriterMessages.WriterClosed("".right[String])
expectMsg(expectedMessage)
val statement = connection.createStatement()
val results = statement.executeQuery("SELECT * FROM ACCOUNTS")
withClue("Data should have been written to the database!") {
results.next() should be(true)
results.getLong("id") should be(1)
results.getString("name") should be("Max Mustermann")
results.getString("description") should be("Some fancy text...")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-03"))
results.getDouble("salary") should be(1500.23)
}
connection.close()
}
}
}
describe("using auto increment columns") {
it("should collect the written auto increment values", DbTest, DbTestDerby) {
val connection = java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile =
"/databases/generic/DatabaseWriter/simple-01-with-pk-and-auto-inc-not-first-column.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target = new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference("TEST", "SIMPLE-01")))
val databaseWriter = initializeWriter(target, dfasdl)
val statement = connection.createStatement()
statement.execute(
"INSERT INTO accounts (name, description, birthday, salary) VALUES('Max Mustermann', 'Some fancy text...', '1968-01-03', 1500.23)"
)
val msg = new WriteBatchData(
batch = List(
new WriteData(1, None, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(2, "Eva Mustermann", List(), Option(new WriterMessageMetaData("name"))),
new WriteData(3,
"Some fancy text...",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(4,
java.sql.Date.valueOf("1968-01-01"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(5,
new java.math.BigDecimal("1500.00"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(6, None, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(7, "Dr. Evil", List(), Option(new WriterMessageMetaData("name"))),
new WriteData(8,
"Beware of Austin Powers!",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(9,
java.sql.Date.valueOf("1968-08-08"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(10,
new java.math.BigDecimal("1500000.00"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(11, 1L, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(12,
"Lord Fancy Pants",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(13,
"An updated description text.",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(14,
java.sql.Date.valueOf("1968-04-01"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(15,
new java.math.BigDecimal("999.97"),
List(),
Option(new WriterMessageMetaData("salary")))
)
)
EventFilter.debug(occurrences = 1, message = "GENERATED INSERT KEY: 3") intercept {
databaseWriter ! msg
databaseWriter ! BaseWriterMessages.CloseWriter
val expectedMessage = BaseWriterMessages.WriterClosed("".right[String])
expectMsg(expectedMessage)
}
withClue("The exact number of rows should have been written!") {
val count = statement.executeQuery("SELECT COUNT(*) FROM ACCOUNTS")
count.next() should be(true)
count.getInt(1) shouldEqual 3
}
withClue("Data should have been written to the database!") {
val results = statement.executeQuery("SELECT * FROM ACCOUNTS ORDER BY id")
results.next() should be(true)
results.getLong("id") should be(1)
results.getString("name") should be("Lord Fancy Pants")
results.getString("description") should be("An updated description text.")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-04-01"))
results.getDouble("salary") should be(999.97)
results.next() should be(true)
results.getLong("id") should be(2)
results.getString("name") should be("Eva Mustermann")
results.getString("description") should be("Some fancy text...")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-01"))
results.getDouble("salary") should be(1500.00)
results.next() should be(true)
results.getLong("id") should be(3)
results.getString("name") should be("Dr. Evil")
results.getString("description") should be("Beware of Austin Powers!")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-08-08"))
results.getDouble("salary") should be(1500000.00)
}
connection.close()
}
}
describe("using NULL values") {
it("should set the correct parameter columns to null", DbTest, DbTestDerby) {
val connection = java.sql.DriverManager.getConnection(s"jdbc:derby:memory:$databaseName")
val dfasdlFile = "/databases/generic/DatabaseWriter/simple-01.xml"
val xml =
scala.io.Source.fromInputStream(getClass.getResourceAsStream(dfasdlFile)).mkString
val dfasdl = new DFASDL("SIMPLE-01", xml)
val target = new ConnectionInformation(uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference("TEST", "SIMPLE-01")))
val databaseWriter = initializeWriter(target, dfasdl)
val msg = new WriteBatchData(
batch = List(
new WriteData(1, 2L, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(2, "Eva Mustermann", List(), Option(new WriterMessageMetaData("name"))),
new WriteData(3, None, List(), Option(new WriterMessageMetaData("description"))),
new WriteData(4,
java.sql.Date.valueOf("1968-01-01"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(5,
new java.math.BigDecimal("1500.00"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(6, 3L, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(7, "Dr. Evil", List(), Option(new WriterMessageMetaData("name"))),
new WriteData(8,
"Beware of Austin Powers!",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(9, None, List(), Option(new WriterMessageMetaData("birthday"))),
new WriteData(10,
new java.math.BigDecimal("1500000.00"),
List(),
Option(new WriterMessageMetaData("salary"))),
new WriteData(11, 1L, List(), Option(new WriterMessageMetaData("id"))),
new WriteData(12,
"Lord Fancy Pants",
List(),
Option(new WriterMessageMetaData("name"))),
new WriteData(13,
"An updated description text.",
List(),
Option(new WriterMessageMetaData("description"))),
new WriteData(14,
java.sql.Date.valueOf("1968-04-01"),
List(),
Option(new WriterMessageMetaData("birthday"))),
new WriteData(15, None, List(), Option(new WriterMessageMetaData("salary")))
)
)
databaseWriter ! msg
databaseWriter ! BaseWriterMessages.CloseWriter
val expectedMessage = BaseWriterMessages.WriterClosed("".right[String])
expectMsg(expectedMessage)
val statement = connection.createStatement()
withClue("The exact number of rows should have been written!") {
val count = statement.executeQuery("SELECT COUNT(*) FROM ACCOUNTS")
count.next() should be(true)
count.getInt(1) shouldEqual 3
}
withClue("Data should have been written to the database!") {
val results = statement.executeQuery("SELECT * FROM ACCOUNTS ORDER BY id")
results.next() should be(true)
results.getLong("id") should be(1)
results.getString("name") should be("Lord Fancy Pants")
results.getString("description") should be("An updated description text.")
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-04-01"))
results.getDouble("salary") should be(0.0)
results.next() should be(true)
results.getLong("id") should be(2)
results.getString("name") should be("Eva Mustermann")
results.getString("description") should be(null)
results.getDate("birthday") should be(java.sql.Date.valueOf("1968-01-01"))
results.getDouble("salary") should be(1500.00)
results.next() should be(true)
results.getLong("id") should be(3)
results.getString("name") should be("Dr. Evil")
results.getString("description") should be("Beware of Austin Powers!")
results.getDate("birthday") should be(null)
results.getDouble("salary") should be(1500000.00)
}
connection.close()
}
}
}
}
}
| Tensei-Data/tensei-agent | src/it/scala/databases/derby/DatabaseWriterActorTest.scala | Scala | agpl-3.0 | 60,902 |
/**
* Magmanics Licensing. This web application allows for centralized control
* of client application activation, with optional configuration parameters
* to control licensable features, and storage of supplementary information
* about the client machine. Client applications may interface with this
* central server (for activation) using libraries licenced under an
* alternative licence.
*
* Copyright (C) 2010 James Baxter <j.w.baxter(at)gmail.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.magmanics.licensing.ui.content.configuration
import com.magmanics.licensing.model.Configuration
import com.vaadin.ui.Table
import org.slf4j.LoggerFactory
/**
* @author James Baxter <[email protected]>
* @since 22 -Jun-2010
*/
class ConfigurationDetailTable extends Table {
val log = LoggerFactory.getLogger(classOf[ConfigurationDetailTable])
addContainerProperty("property", classOf[String], null, "Option", null, null)
addContainerProperty("value", classOf[String], null, "Value", null, null)
def setConfiguration(c: Configuration) {
removeAllItems()
log.debug("Showing options for: {}", c)
c.options.foreach(o => addItem(Array(o._1, o._2), o._1))
// setPageLength(min(size, 8))
}
} | manicmonkey/licensing | Licensing-UI-Vaadin/src/main/scala/com/magmanics/licensing/ui/content/configuration/ConfigurationDetailTable.scala | Scala | gpl-3.0 | 1,851 |
package org.libss.util
/**
* Created by Kaa
* on 22.06.2016 at 01:41.
*/
trait PageableBase {
var page: Long = 1L
var itemsPerPage: Long = 10L
}
object SortOrdering {
val Asc = "asc"
val Desc = "desc"
}
trait SortableBase {
var sort: String = null
var ordering: String = null
}
| kanischev/libss | libss-utils/src/main/scala/org/libss/util/PageableBase.scala | Scala | apache-2.0 | 300 |
package org.scaladebugger.api.lowlevel.breakpoints
import org.scalamock.scalatest.MockFactory
import org.scalatest.{FunSpec, Matchers, ParallelTestExecution}
import org.scaladebugger.api.lowlevel.requests.JDIRequestArgument
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.TestBreakpointManager
import scala.util.Success
class BreakpointManagerSpec extends ParallelMockFunSpec
{
private val TestRequestId = java.util.UUID.randomUUID().toString
private val mockBreakpointManager = mock[BreakpointManager]
private val testBreakpointManager = new TestBreakpointManager(
mockBreakpointManager
) {
override protected def newRequestId(): String = TestRequestId
}
describe("BreakpointManager") {
describe("#createBreakpointRequest") {
it("should invoke createBreakpointRequestWithId") {
val expected = Success(TestRequestId)
val testFileName = "some/file/name"
val testLineNumber = 999
val testExtraArguments = Seq(stub[JDIRequestArgument])
(mockBreakpointManager.createBreakpointRequestWithId _)
.expects(TestRequestId, testFileName, testLineNumber, testExtraArguments)
.returning(expected).once()
val actual = testBreakpointManager.createBreakpointRequest(
testFileName,
testLineNumber,
testExtraArguments: _*
)
actual should be (expected)
}
}
describe("#createBreakpointRequestFromInfo") {
it("should invoke createBreakpointRequestWithId") {
val expected = Success(TestRequestId)
val testIsPending = false
val testFileName = "some/file/name"
val testLineNumber = 999
val testExtraArguments = Seq(stub[JDIRequestArgument])
(mockBreakpointManager.createBreakpointRequestWithId _)
.expects(TestRequestId, testFileName, testLineNumber, testExtraArguments)
.returning(expected).once()
val info = BreakpointRequestInfo(
TestRequestId,
testIsPending,
testFileName,
testLineNumber,
testExtraArguments
)
val actual = testBreakpointManager.createBreakpointRequestFromInfo(info)
actual should be(expected)
}
}
}
}
| ensime/scala-debugger | scala-debugger-api/src/test/scala/org/scaladebugger/api/lowlevel/breakpoints/BreakpointManagerSpec.scala | Scala | apache-2.0 | 2,257 |
package nest.sparkle.util
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
import akka.util.Timeout
import java.util.concurrent.TimeoutException
/** Utility to repeatedly call a function that returns a future. */
object RetryingFuture extends Log{
/** Call a function that returns a future.
* If the future times out, try again (repeating up to maxAttempts times). */
def retryingFuture[T] // format: OFF
( fn: => Future[T], maxAttempts: Int = 4 )
( implicit executionContext: ExecutionContext, futureTimeout: Timeout = 10.seconds)
: Future[T] = { // format: ON
def retry(attemptsRemaining: Int): Future[T] = {
if (attemptsRemaining <= 0) {
Future.failed(new TimeoutException(s"Timeout after $maxAttempts attempts"))
} else {
fn recoverWith {
case t: TimeoutException =>
log.info(s"retryingFuture retrying. Remaining attempts: $attemptsRemaining")
retry(attemptsRemaining - 1)
}
}
}
retry(maxAttempts)
}
} | mighdoll/sparkle | util/src/main/scala/nest/sparkle/util/RetryingFuture.scala | Scala | apache-2.0 | 1,067 |
package chapter3
sealed trait Tree[+A]
case class Leaf[A](value: A) extends Tree[A]
case class Branch[A](left: Tree[A], right: Tree[A]) extends Tree[A]
object Tree {
def size[A](tree : Tree[A]) : Int = tree match {
case Branch(l, r) => size(l) + size(r) + 1
case Leaf(v) => 1
}
def maximum(tree: Tree[Int]) : Int = {
tree match {
case Branch(l, r) => maximum(l) max maximum(r)
case Leaf(v) => v
}
}
def map[A,B](tree: Tree[A])(f: A => B) : Tree[B] = tree match {
case Branch(l, r) => Branch(map(l)(f),map(r)(f))
case Leaf(v) => Leaf(f(v))
}
} | IvanGuardado/fp-exercises | chapters/src/chapter3/Tree.scala | Scala | mit | 599 |
package utils
import java.util.Properties
import com.typesafe.config.ConfigFactory
import kafka.admin.AdminUtils
import kafka.utils.ZKStringSerializer
import org.I0Itec.zkclient.ZkClient
object KafkaAdminUtils {
val sessionTimeoutMs = 10000
val connectionTimeoutMs = 10000
val zkClient = new ZkClient(zookeperConnect, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer)
def createTopic(topic: String, numPartitions: Int = 1, replicationFactor: Int = 1, topicConfig: Properties = new Properties) = {
AdminUtils.createTopic(zkClient, topic, numPartitions, replicationFactor, topicConfig)
}
def deleteTopic(topic: String) = AdminUtils.deleteTopic(zkClient, topic)
def topicExists(topic: String) = AdminUtils.topicExists(zkClient, topic)
}
| MarianoGappa/kafka-examples | src/main/scala/utils/KafkaAdminUtils.scala | Scala | mit | 768 |
package org.jetbrains.plugins.scala.lang.scaladoc.psi.impl
import com.intellij.lang.ASTNode
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElementImpl
import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.ScDocInnerCodeElement
/**
* User: Dmitry Naidanov
* Date: 11/14/11
*/
class ScDocInnerCodeElementImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScDocInnerCodeElement {
override def toString = "InnerCodeElement"
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/scaladoc/psi/impl/ScDocInnerCodeElementImpl.scala | Scala | apache-2.0 | 445 |
/*
* Copyright (C) 2014 Lymia Aluysia <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package moe.lymia.joustext
object astextension {
import ast._
// value extensions
final case class Variable(s: String) extends Value
final case class Add(a: Value, b: Value) extends Value
final case class Sub(a: Value, b: Value) extends Value
final case class Mul(a: Value, b: Value) extends Value
final case class Div(a: Value, b: Value) extends Value
final case class Mod(a: Value, b: Value) extends Value
// comparisons for compile time if/else
trait Predicate
final case class Equals (a: Value, b: Value) extends Predicate
final case class LessThan (a: Value, b: Value) extends Predicate
final case class GreaterThan(a: Value, b: Value) extends Predicate
final case class Not (v: Predicate) extends Predicate
final case class And (a: Predicate, b: Predicate) extends Predicate
final case class Or (a: Predicate, b: Predicate) extends Predicate
// comments, etc
final case class Abort(reason : String) extends SimpleInstruction
final case class Raw (comment: String) extends SimpleInstruction
final case object Terminate extends SimpleInstruction
// synthetic instructions
trait SyntheticInstruction extends Instruction
final case class Assign(vars: Map[String, Value], block: Block) extends SyntheticInstruction {
def mapContents(f: Block => Block) = copy(block = f(block))
}
final case class IfElse(predicate: Predicate, ifClause: Block, elseClause: Block) extends SyntheticInstruction {
def mapContents(f: Block => Block) =
copy(ifClause = f(ifClause ),
elseClause = f(elseClause))
}
final case class FromTo(name: String, from: Value, to: Value, block: Block) extends SyntheticInstruction {
def mapContents(f: Block => Block) = copy(block = f(block))
}
final case class Splice(block: Block) extends SyntheticInstruction with SimpleBlock
final case class Invert(block: Block) extends SyntheticInstruction with SimpleBlock
final case class CallCC(name: String, block: Block) extends SyntheticInstruction {
def mapContents(f: Block => Block) = copy(block = f(block))
}
final case class Reset (block: Block) extends SyntheticInstruction with SimpleBlock
// functions
case class Function(params: Seq[String], body: Block)
final case class LetIn(definitions: Map[String, Function], block: Block) extends SyntheticInstruction {
def mapContents(f: Block => Block) = copy(block = f(block))
}
final case class FunctionInvocation(name: String, params: Seq[Value]) extends SimpleInstruction
} | Lymia/JoustExt | src/main/scala/moe/lymia/joustext/astextension.scala | Scala | mit | 3,674 |
package io.finch.endpoint
import com.twitter.finagle.http.{Method => FinagleMethod}
import io.finch._
private[finch] class Method[F[_], A](m: FinagleMethod, e: Endpoint[F, A]) extends Endpoint.Mappable[F, A] { self =>
final def apply(input: Input): EndpointResult[F, A] =
if (input.request.method == m) e(input)
else
e(input) match {
case EndpointResult.Matched(_, _, _) => EndpointResult.NotMatched.MethodNotAllowed(m :: Nil)
case skipped => skipped
}
final override def toString: String = s"${m.toString.toUpperCase} /${e.toString}"
}
| finagle/finch | core/src/main/scala/io/finch/endpoint/method.scala | Scala | apache-2.0 | 605 |
package es.weso.rbe
case class OutputInfo[Edge, Node, Label, Err, Evidence](
evidences: List[((Node, Label), String)], // Evidences that are tried
visited: Set[(Node, Edge, Node)] // Visited triples
)
| labra/rbe | src/main/scala/es/weso/rbe/OutputInfo.scala | Scala | mit | 208 |
package scala.slick.ast
import scala.language.{implicitConversions, higherKinds}
import scala.slick.SlickException
import scala.collection.generic.CanBuild
import scala.collection.mutable.{Builder, ArrayBuilder}
import scala.reflect.{ClassTag, classTag => mkClassTag}
import Util._
import scala.collection.mutable.ArrayBuffer
import scala.annotation.implicitNotFound
import scala.slick.util.TupleSupport
/** Super-trait for all types */
trait Type {
/** All children of this Type. */
def children: Seq[Type]
/** Apply a transformation to all type children and reconstruct this
* type with the new children, or return the original object if no
* child is changed. */
def mapChildren(f: Type => Type): Type
def select(sym: Symbol): Type =
throw new SlickException("No type for symbol "+sym+" found in "+this)
/** The structural view of this type */
def structural: Type = this
/** A ClassTag for the erased type of this type's Scala values */
def classTag: ClassTag[_]
}
/** An atomic type (i.e. a type which does not contain other types) */
trait AtomicType extends Type {
final def mapChildren(f: Type => Type): this.type = this
def children: Seq[Type] = Seq.empty
}
final case class StructType(elements: IndexedSeq[(Symbol, Type)]) extends Type {
override def toString = "{" + elements.iterator.map{ case (s, t) => s + ": " + t }.mkString(", ") + "}"
lazy val symbolToIndex: Map[Symbol, Int] =
elements.zipWithIndex.map { case ((sym, _), idx) => (sym, idx) }(collection.breakOut)
def children: IndexedSeq[Type] = elements.map(_._2)
def mapChildren(f: Type => Type): StructType =
mapOrNone(elements.map(_._2))(f) match {
case Some(types2) => StructType((elements, types2).zipped.map((e, t) => (e._1, t)))
case None => this
}
override def select(sym: Symbol) = sym match {
case ElementSymbol(idx) => elements(idx-1)._2
case _ => elements.find(x => x._1 == sym).map(_._2).getOrElse(super.select(sym))
}
def classTag = TupleSupport.classTagForArity(elements.size)
}
trait OptionType extends Type {
override def toString = "Option[" + elementType + "]"
def elementType: Type
def children: Seq[Type] = Seq(elementType)
def classTag = OptionType.classTag
}
object OptionType {
def apply(tpe: Type): OptionType = new OptionType {
def elementType = tpe
def mapChildren(f: Type => Type): OptionType = {
val e2 = f(elementType)
if(e2 eq elementType) this
else OptionType(e2)
}
}
private val classTag = mkClassTag[Option[_]]
}
final case class ProductType(elements: IndexedSeq[Type]) extends Type {
override def toString = "(" + elements.mkString(", ") + ")"
def mapChildren(f: Type => Type): ProductType =
mapOrNone(elements)(f) match {
case Some(e2) => ProductType(e2)
case None => this
}
override def select(sym: Symbol) = sym match {
case ElementSymbol(i) if i <= elements.length => elements(i-1)
case _ => super.select(sym)
}
def children: Seq[Type] = elements
def numberedElements: Iterator[(ElementSymbol, Type)] =
elements.iterator.zipWithIndex.map { case (t, i) => (new ElementSymbol(i+1), t) }
def classTag = TupleSupport.classTagForArity(elements.size)
}
final case class CollectionType(cons: CollectionTypeConstructor, elementType: Type) extends Type {
override def toString = cons + "[" + elementType + "]"
def mapChildren(f: Type => Type): CollectionType = {
val e2 = f(elementType)
if(e2 eq elementType) this
else CollectionType(cons, e2)
}
def children: Seq[Type] = Seq(elementType)
def classTag = cons.classTag
}
/** Represents a type constructor that can be usd for a collection-valued query.
* The relevant information for Slick is whether the elements of the collection
* keep their insertion order (isSequential) and whether only distinct elements
* are allowed (isUnique). */
trait CollectionTypeConstructor {
/** The ClassTag for the type constructor */
def classTag: ClassTag[_]
/** Determines if order is relevant */
def isSequential: Boolean
/** Determines if only distinct elements are allowed */
def isUnique: Boolean
/** Create a `Builder` for the collection type, given a ClassTag for the element type */
def createBuilder[E : ClassTag]: Builder[E, Any]
/** Return a CollectionTypeConstructor which builds a subtype of Iterable
* but has the same properties otherwise. */
def iterableSubstitute: CollectionTypeConstructor =
if(isUnique && !isSequential) TypedCollectionTypeConstructor.set
else TypedCollectionTypeConstructor.seq
//TODO We should have a better substitute for (isUnique && isSequential)
}
@implicitNotFound("Cannot use collection in a query\\n collection type: ${C}[_]\\n requires implicit of type: scala.slick.ast.TypedCollectionTypeConstructor[${C}]")
abstract class TypedCollectionTypeConstructor[C[_]](val classTag: ClassTag[C[_]]) extends CollectionTypeConstructor {
override def toString = s"Coll[$classTag]"
def createBuilder[E : ClassTag]: Builder[E, C[E]]
}
class ErasedCollectionTypeConstructor[C[_]](canBuildFrom: CanBuild[Any, C[Any]], classTag: ClassTag[C[_]]) extends TypedCollectionTypeConstructor[C](classTag) {
val isSequential = classOf[scala.collection.Seq[_]].isAssignableFrom(classTag.runtimeClass)
val isUnique = classOf[scala.collection.Set[_]].isAssignableFrom(classTag.runtimeClass)
def createBuilder[E : ClassTag] = canBuildFrom().asInstanceOf[Builder[E, C[E]]]
}
object TypedCollectionTypeConstructor {
private[this] val arrayClassTag = mkClassTag[Array[_]]
/** The standard TypedCollectionTypeConstructor for Seq */
def seq = forColl[Vector]
/** The standard TypedCollectionTypeConstructor for Set */
def set = forColl[Set]
/** Get a TypedCollectionTypeConstructor for an Iterable type */
implicit def forColl[C[X] <: Iterable[X]](implicit cbf: CanBuild[Any, C[Any]], tag: ClassTag[C[_]]): TypedCollectionTypeConstructor[C] =
new ErasedCollectionTypeConstructor[C](cbf, tag)
/** Get a TypedCollectionTypeConstructor for an Array type */
implicit val forArray: TypedCollectionTypeConstructor[Array] = new TypedCollectionTypeConstructor[Array](arrayClassTag) {
def isSequential = true
def isUnique = false
def createBuilder[E : ClassTag]: Builder[E, Array[E]] = ArrayBuilder.make[E]
}
}
final class MappedScalaType(val baseType: Type, val mapper: MappedScalaType.Mapper, val classTag: ClassTag[_]) extends Type {
override def toString = s"Mapped[$baseType]"
def mapChildren(f: Type => Type): MappedScalaType = {
val e2 = f(baseType)
if(e2 eq baseType) this
else new MappedScalaType(e2, mapper, classTag)
}
def children: Seq[Type] = Seq(baseType)
override def select(sym: Symbol) = baseType.select(sym)
}
object MappedScalaType {
case class Mapper(toBase: Any => Any, toMapped: Any => Any, fastPath: Option[PartialFunction[Any, Any]])
}
/** The standard type for freshly constructed nodes without an explicit type. */
case object UnassignedType extends AtomicType {
def classTag = throw new SlickException("UnassignedType does not have a ClassTag")
}
/** The type of a structural view of a NominalType before computing the
* proper type in the `inferTypes` phase. */
final case class UnassignedStructuralType(sym: TypeSymbol) extends AtomicType {
def classTag = throw new SlickException("UnassignedStructuralType does not have a ClassTag")
}
/* A type with a name, as used by tables.
*
* Compiler phases which change types may keep their own representation
* of the structural view but must update the AST at the end of the phase
* so that all NominalTypes with the same symbol have the same structural
* view. */
final case class NominalType(sym: TypeSymbol)(val structuralView: Type) extends Type {
def toShortString = s"NominalType($sym)"
override def toString = s"$toShortString($structuralView)"
def withStructuralView(t: Type): NominalType =
if(t == structuralView) this else copy()(t)
override def structural: Type = structuralView.structural
override def select(sym: Symbol): Type = structuralView.select(sym)
def mapChildren(f: Type => Type): NominalType = {
val struct2 = f(structuralView)
if(struct2 eq structuralView) this
else new NominalType(sym)(struct2)
}
def children: Seq[Type] = Seq(structuralView)
def sourceNominalType: NominalType = structuralView match {
case n: NominalType => n.sourceNominalType
case _ => this
}
def classTag = structuralView.classTag
}
/** Something that has a type */
trait Typed {
def tpe: Type
}
object Typed {
def unapply(t: Typed) = Some(t.tpe)
}
/* A Type that carries a Scala type argument */
trait TypedType[T] extends Type { self =>
def optionType: OptionTypedType[T] = new OptionTypedType[T] {
val elementType = self
def scalaType = new ScalaOptionType[T](self.scalaType)
def mapChildren(f: Type => Type): OptionTypedType[T] = {
val e2 = f(elementType)
if(e2 eq elementType) this
else e2.asInstanceOf[TypedType[T]].optionType
}
}
def scalaType: ScalaType[T]
}
trait BaseTypedType[T] extends TypedType[T] with AtomicType
trait OptionTypedType[T] extends TypedType[Option[T]] with OptionType {
val elementType: TypedType[T]
}
/** Mark a TypedType as eligible for numeric operators. */
trait NumericTypedType
object TypedType {
@inline implicit def typedTypeToOptionTypedType[T](implicit t: TypedType[T]): OptionTypedType[T] = t.optionType
}
class TypeUtil(val tpe: Type) extends AnyVal {
def asCollectionType: CollectionType = tpe match {
case c: CollectionType => c
case _ => throw new SlickException("Expected a collection type, found "+tpe)
}
def asOptionType: OptionType = tpe match {
case o: OptionType => o
case _ => throw new SlickException("Expected an option type, found "+tpe)
}
def foreach[U](f: (Type => U)) {
def g(n: Type) {
f(n)
n.children.foreach(g)
}
g(tpe)
}
@inline def replace(f: PartialFunction[Type, Type]): Type = TypeUtilOps.replace(tpe, f)
@inline def collect[T](pf: PartialFunction[Type, T]): Iterable[T] = TypeUtilOps.collect(tpe, pf)
@inline def collectAll[T](pf: PartialFunction[Type, Seq[T]]): Iterable[T] = collect[Seq[T]](pf).flatten
}
object TypeUtil {
implicit def typeToTypeUtil(tpe: Type) = new TypeUtil(tpe)
/* An extractor for node types */
object :@ {
def unapply(n: Node) = Some((n, n.nodeType))
}
}
object TypeUtilOps {
import TypeUtil.typeToTypeUtil
def replace(tpe: Type, f: PartialFunction[Type, Type]): Type =
f.applyOrElse(tpe, { case t: Type => t.mapChildren(_.replace(f)) }: PartialFunction[Type, Type])
def collect[T](tpe: Type, pf: PartialFunction[Type, T]): Iterable[T] = {
val b = new ArrayBuffer[T]
tpe.foreach(pf.andThen[Unit]{ case t => b += t }.orElse[Type, Unit]{ case _ => () })
b
}
}
trait SymbolScope {
def + (entry: (Symbol, Type)): SymbolScope
def get(sym: Symbol): Option[Type]
def withDefault(f: (Symbol => Type)): SymbolScope
}
object SymbolScope {
val empty = new DefaultSymbolScope(Map.empty)
}
class DefaultSymbolScope(val m: Map[Symbol, Type]) extends SymbolScope {
def + (entry: (Symbol, Type)) = new DefaultSymbolScope(m + entry)
def get(sym: Symbol): Option[Type] = m.get(sym)
def withDefault(f: (Symbol => Type)) = new DefaultSymbolScope(m.withDefault(f))
}
/** A Slick Type encoding of plain Scala types.
*
* This is used by QueryInterpreter and MemoryDriver. Values stored in
* HeapBackend columns are also expected to use these types.
*
* All drivers should support the following types which are used internally
* by the lifted embedding and the query compiler: Boolean, Char, Int, Long,
* Null, String. */
trait ScalaType[T] extends TypedType[T] {
override def optionType: ScalaOptionType[T] = new ScalaOptionType[T](this)
def nullable: Boolean
def ordered: Boolean
def scalaOrderingFor(ord: Ordering): scala.math.Ordering[T]
final def scalaType = this
final def isPrimitive = classTag.runtimeClass.isPrimitive
}
class ScalaBaseType[T](implicit val classTag: ClassTag[T], val ordering: scala.math.Ordering[T]) extends ScalaType[T] with BaseTypedType[T] {
override def toString = "ScalaType[" + classTag.runtimeClass.getName + "]"
def nullable = false
def ordered = ordering ne null
def scalaOrderingFor(ord: Ordering) = {
if(ordering eq null) throw new SlickException("No ordering defined for "+this)
val base = if(ord.direction == Ordering.Desc) ordering.reverse else ordering
val nullsFirst = if(ord.nulls == Ordering.NullsFirst) -1 else 1
new scala.math.Ordering[T] {
def compare(x: T, y: T): Int = {
if((x.asInstanceOf[AnyRef] eq null) && (y.asInstanceOf[AnyRef] eq null)) 0
else if(x.asInstanceOf[AnyRef] eq null) nullsFirst
else if(y.asInstanceOf[AnyRef] eq null) -nullsFirst
else base.compare(x, y)
}
}
}
override def hashCode = classTag.hashCode
override def equals(o: Any) = o match {
case t: ScalaBaseType[_] => classTag == t.classTag
case _ => false
}
}
object ScalaBaseType {
implicit val booleanType = new ScalaBaseType[Boolean]
implicit val bigDecimalType = new ScalaNumericType[BigDecimal](BigDecimal.apply _)
implicit val byteType = new ScalaNumericType[Byte](_.toByte)
implicit val charType = new ScalaBaseType[Char]
implicit val doubleType = new ScalaNumericType[Double](identity)
implicit val floatType = new ScalaNumericType[Float](_.toFloat)
implicit val intType = new ScalaNumericType[Int](_.toInt)
implicit val longType = new ScalaNumericType[Long](_.toLong)
implicit val nullType = new ScalaBaseType[Null]
implicit val shortType = new ScalaNumericType[Short](_.toShort)
implicit val stringType = new ScalaBaseType[String]
private[this] val all: Map[ClassTag[_], ScalaBaseType[_]] =
Seq(booleanType, bigDecimalType, byteType, charType, doubleType,
floatType, intType, longType, nullType, shortType, stringType).map(s => (s.classTag, s)).toMap
def apply[T](implicit tag: ClassTag[T], ord: scala.math.Ordering[T] = null): ScalaBaseType[T] =
all.getOrElse(tag, new ScalaBaseType[T]).asInstanceOf[ScalaBaseType[T]]
}
class ScalaNumericType[T](val fromDouble: Double => T)(implicit tag: ClassTag[T], val numeric: Numeric[T])
extends ScalaBaseType[T]()(tag, numeric) with NumericTypedType {
def toDouble(v: T) = numeric.toDouble(v)
}
class ScalaOptionType[T](val elementType: ScalaType[T]) extends ScalaType[Option[T]] with OptionTypedType[T] {
override def toString = "ScalaOptionType[" + elementType + "]"
def nullable = true
def ordered = elementType.ordered
def scalaOrderingFor(ord: Ordering) = {
val nullsFirst = if(ord.nulls == Ordering.NullsFirst) -1 else 1
val base = elementType.scalaOrderingFor(ord)
new scala.math.Ordering[Option[T]] {
def compare(x: Option[T], y: Option[T]): Int = {
if(x == None && y == None) 0
else if(x == None) nullsFirst
else if(y == None) -nullsFirst
else base.compare(x.get, y.get)
}
}
}
def mapChildren(f: Type => Type): ScalaOptionType[T] = {
val e2 = f(elementType)
if(e2 eq elementType) this
else e2.asInstanceOf[ScalaType[T]].optionType
}
}
| dvinokurov/slick | src/main/scala/scala/slick/ast/Type.scala | Scala | bsd-2-clause | 15,300 |
/**
* Copyright 2010-2012 Alex Jones
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with work for additional information
* regarding copyright ownership. The ASF licenses file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package update
import java.io.IOException
import java.time.Clock
import com.typesafe.scalalogging.StrictLogging
import dao.GameDao
import html.{DatePlayedLocator, GameKeyLocator, GameLocator, GameUpdateCommand}
import logging.RemoteStream
import model.Game
import models.Location.HOME
import monads.{FE, FO}
import monads.FE.FutureEitherNel
import update.fixtures.FixturesGameScanner
import update.tickets.TicketsGameScanner
import scala.concurrent.{ExecutionContext, Future}
import cats.instances.future._
import dates.ZonedDateTimeFactory
import monads.FO.FutureOption
/**
* The Class MainUpdateServiceImpl.
*
* @author alex
*/
class MainUpdateServiceImpl @javax.inject.Inject() (
/**
* The {@link GameDao} for getting persisted {@link Game} information.
*/
gameDao: GameDao,
/**
* The {@link GamesScanner} for getting game information.
*/
fixturesGameScanner: FixturesGameScanner,
/**
* The {@link GamesScanner} for getting game information.
*/
ticketsGameScanner: TicketsGameScanner,
/**
* The {@link LastUpdated} used to notify the application when calendars were last updated.
*/
lastUpdated: LastUpdated
/**
* The {@link Clock} used to get the current date and time.
*/
)(implicit ec: ExecutionContext,
zonedDateTimeFactory: ZonedDateTimeFactory) extends MainUpdateService with StrictLogging {
/**
* Process all updates required in the database.
*
*/
override def processDatabaseUpdates(remoteStream: RemoteStream): FutureEitherNel[String, Int] = {
implicit val _remoteStream = remoteStream
for {
allGames <- FE(gameDao.getAll)
latestSeason <- FE(gameDao.getLatestSeason.value.map(Right(_))) // Do not short circuit if there is no latest season.
newGames <- processUpdates("fixture", fixturesGameScanner, latestSeason, allGames)
_ <- processUpdates("ticket", ticketsGameScanner, latestSeason, allGames ++ newGames)
} yield {
lastUpdated at zonedDateTimeFactory.now
(allGames ++ newGames).size
}
}
/**
* Process updates.
*
* @param updatesType
* the updates type
* @param scanner
* the scanner
* @throws IOException
* Signals that an I/O exception has occurred.
*/
def processUpdates(updatesType: String, scanner: GameScanner, latestSeason: Option[Int], allGames: Seq[Game])(implicit remoteStream: RemoteStream):
FutureEitherNel[String, List[Game]] = {
logger info s"Scanning for $updatesType changes."
for {
allGameUpdateCommands <- scanner.scan(latestSeason)
games <- FE(updateAndStoreGames(allGames, allGameUpdateCommands))
} yield {
games
}
}
def updateAndStoreGames(allGames: Seq[Game], allGameUpdateCommands: Seq[GameUpdateCommand]) = {
val updatesByGameLocator = allGameUpdateCommands.groupBy(_.gameLocator)
updatesByGameLocator.foldRight(Future.successful(List.empty[Game])) { (gl, fGames) =>
val (gameLocator, updates) = gl
fGames.flatMap { games =>
updateAndStoreGame(allGames, gameLocator, updates).map {
case Some(game) => game :: games
case _ => games
}
}
}
}
def updateAndStoreGame(allGames: Seq[Game], gameLocator: GameLocator, updates: Seq[GameUpdateCommand]): Future[Option[Game]] = {
val (isNew, oGame) = findGame(allGames, gameLocator) match {
case Some(game) => (false, Some(game))
case _ => (true, createNewGame(gameLocator))
}
val oUpdate = for {
game <- oGame
updatedGame <- updateGame(game, updates)
} yield {
gameDao.store(updatedGame).map { persistedGame =>
if (isNew) Some(persistedGame) else None
}
}
oUpdate.getOrElse(Future.successful(None))
}
/**
* Find a game from its game locator.
*/
def findGame(allGames: Seq[Game], gameLocator: GameLocator): Option[Game] = {
allGames.find(gameLocator.matches)
}
/**
* Create a new game if the game locator permits it (i.e. is a gameKey locator) or return None otherwise.
*/
def createNewGame(gameLocator: GameLocator): Option[Game] = {
gameLocator match {
case GameKeyLocator(gameKey) =>
logger info s"Creating new game $gameKey"
Some(Game.gameKey(gameKey))
case DatePlayedLocator(datePlayed) =>
logger info s"Tickets were found for a non-existent game played at $datePlayed. Ignoring."
None
}
}
/**
* Update a game with a list of updates.
*/
def updateGame(game: Game, updates: Traversable[GameUpdateCommand]): Option[Game] = {
case class UpdatedGame(game: Game, updated: Boolean = false) {
def update(newGame: Game) = UpdatedGame(newGame, updated = true)
}
val updatedGame: UpdatedGame = updates.foldLeft(UpdatedGame(game)) { (updatedGame, gameUpdateCommand) =>
gameUpdateCommand.update(updatedGame.game) match {
case Some(newGame) => updatedGame.update(newGame)
case _ => updatedGame
}
}
if (updatedGame.updated) {
Some(updatedGame.game)
}
else {
logger info s"Ignoring game ${game.gameKey}"
None
}
}
def attendGame(gameId: Long): FutureOption[Game] = attendOrUnattendGame(gameId, attend = true)
def unattendGame(gameId: Long): FutureOption[Game] = attendOrUnattendGame(gameId, attend = false)
def attendOrUnattendGame(gameId: Long, attend: Boolean): FutureOption[Game] = FO {
attendOrUnattendGames(gameDao.findById(gameId).value.map(_.toList), attend).map(_.headOption)
}
def attendAllHomeGamesForSeason(season: Int): Future[List[Game]] =
attendOrUnattendGames(gameDao.getAllForSeasonAndLocation(season, HOME), attend = true)
def attendOrUnattendGames(fGames: Future[List[Game]], attend: Boolean): Future[List[Game]] = {
fGames.flatMap { games =>
games.foldRight(Future.successful(List.empty[Game])) { (game, fGames) =>
for {
newGame <- gameDao.store(game.copy(attended = attend))
games <- fGames
} yield newGame :: games
}
}
}
}
| unclealex72/west-ham-calendar | app/update/MainUpdateServiceImpl.scala | Scala | apache-2.0 | 6,880 |
package com.ponkotuy.value
import scala.collection.JavaConversions._
import com.typesafe.config.{Config, ConfigFactory}
import org.slf4j.{Logger, LoggerFactory}
/**
* Date: 14/06/10
*/
case class KCServer(number: Int, ip: String, name: String)
object KCServer {
val configName = "kc-server"
lazy val logger: Logger = LoggerFactory.getLogger(getClass)
lazy val config: Config = {
logger.info(s"config file loading. resource: ${configName}")
ConfigFactory.load(configName);
}
lazy val values = {
logger.info("load servers list")
val configList = config.getConfigList("servers").toList
val serverList = configList.map { config =>
val number = config.getInt("number")
val ip = config.getString("ip")
val name = config.getString("name")
logger.debug(s"add server. number: ${number}, ip: ${ip}, name: ${name}")
KCServer(number,ip,name)
}
serverList
}
lazy val ips = values.map(_.ip).toSet
def list: List[KCServer] = values
def fromNumber(number: Int): Option[KCServer] = values.find(_.number == number)
def fromIP(ip: String): Option[KCServer] = values.find(_.ip == ip)
}
| ponkotuy/MyFleetGirls | library/src/main/scala/com/ponkotuy/value/KCServer.scala | Scala | mit | 1,153 |
package com.ibm.watson.developer_cloud.service
import spray.json._
/**
* Created by Martin Harvan ([email protected]) on 06/04/16.
*/
abstract class GenericModel {
override def equals(o: Any) : Boolean = {
Option(o) match {
case Some(g: GenericModel) => this.toString.equals(g.toString)
case _ => false
}
}
override def hashCode : Int = {
this.toString.hashCode
}
override def toString : String
}
| kane77/watson-scala-wrapper | src/main/scala/com/ibm/watson/developer_cloud/service/GenericModel.scala | Scala | apache-2.0 | 446 |
package reactivemongo.api
import scala.concurrent.{ ExecutionContext, Future }
import reactivemongo.core.protocol.Response
/** Internal cursor operations. */
trait CursorOps[T] { cursor: Cursor[T] =>
/** Sends the initial request. */
private[reactivemongo] def makeRequest(maxDocs: Int)(implicit ec: ExecutionContext): Future[Response]
/**
* Returns a function that can be used to get the next response,
* if allowed according the `maxDocs` and the cursor options
* (cursor not exhausted, tailable, ...)
*/
private[reactivemongo] def nextResponse(maxDocs: Int): (ExecutionContext, Response) => Future[Option[Response]]
/**
* Returns an iterator to read the response documents,
* according the provided read for the element type `T`.
*/
private[reactivemongo] def documentIterator(response: Response): Iterator[T]
/**
* Kills the server resources associated with the specified cursor.
*
* @param id the cursor ID
*/
private[reactivemongo] def killCursor(id: Long)(implicit ec: ExecutionContext): Unit
/** Indicates whether the underlying cursor is [[https://docs.mongodb.com/manual/core/tailable-cursors/ tailable]]. */
def tailable: Boolean
/** The underlying connection */
def connection: MongoConnection
/** The strategy to failover the cursor operations */
def failoverStrategy: FailoverStrategy
}
object CursorOps {
/**
* Wraps exception that has already been passed to the current error handler
* and should not be recovered.
*/
private[reactivemongo] case class UnrecoverableException(cause: Throwable)
extends scala.RuntimeException(cause)
with scala.util.control.NoStackTrace
}
| ReactiveMongo/ReactiveMongo | driver/src/main/scala/api/CursorOps.scala | Scala | apache-2.0 | 1,679 |
package no.finn.repoindexer.flows
import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.{BasicHttpCredentials, Authorization}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Source}
import akka.util.ByteString
import com.typesafe.config.ConfigFactory
import net.ceedubs.ficus.Ficus._
import no.finn.repoindexer._
import org.apache.logging.log4j.LogManager
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._
import akka.stream.scaladsl._
import akka.http.scaladsl.Http
import HttpMethods._
import scala.concurrent.ExecutionContext.Implicits.global
object Stash {
val config = ConfigFactory.load()
val stashUsername = config.getString("stash.username")
val stashPassword = config.getString("stash.password")
val stashBaseUrl = config.getString("stash.url")
val apiPath = s"${stashBaseUrl}/rest/api/1.0"
val projectsUrl = s"${apiPath}/projects"
val log = LogManager.getLogger()
implicit val formats = DefaultFormats
implicit val system = ActorSystem()
implicit val materializer = ActorMaterializer()
def stashAuthenticatedRequest(url: String) = {
val authorization = BasicHttpCredentials(stashUsername, stashPassword)
val req = HttpRequest(GET, uri = url, headers = List(headers.Authorization(authorization)))
Http().singleRequest(req)
.map(res => res.entity.dataBytes.runFold(ByteString(""))(_ ++ _))
.flatMap(bs => bs.map(_.decodeString("ISO8859-1")))
}
/** Performs the initial query against the base url **/
val projectListSource: Source[List[Project], akka.NotUsed] = {
val r = stashAuthenticatedRequest(projectsUrl)
.map(parse(_))
.map(data => data.extract[ProjectResponse].values)
Source.fromFuture(r)
}
val projectUrlFlow:Flow[List[Project], Project, akka.NotUsed] = Flow[List[Project]].mapConcat { identity }
val repoReqFlow : Flow[Project, String, akka.NotUsed] = Flow[Project].map { project =>
s"${apiPath}/projects/${project.key}/repos?limit=500"
}
val repoListFlow : Flow[String, List[StashRepo], akka.NotUsed] = Flow[String].mapAsyncUnordered(2) { r =>
stashAuthenticatedRequest(r)
.map(parse(_))
.map(data => data.extract[RepoResponse].values)
}
val repoFlow : Flow[List[StashRepo], StashRepo, akka.NotUsed] = Flow[List[StashRepo]].mapConcat { identity }
val cloneFlow : Flow[StashRepo, CloneRepo, akka.NotUsed] = Flow[StashRepo].map { repo =>
val url = repo.links("clone").find(l => l.name match {
case Some(name) => name == "ssh"
case None => false
})
url match {
case Some(link) => CloneRepo(Some(link), repo.slug)
case None => CloneRepo(None, repo.slug)
}
}
val repositoriesFlow = projectListSource
.via(projectUrlFlow)
.via(repoReqFlow)
.via(repoListFlow)
.via(repoFlow)
val cloneCandidatesFlow = repositoriesFlow
.via(cloneFlow)
}
| chriswk/repoindexer | indexer/src/main/scala/no/finn/repoindexer/flows/Stash.scala | Scala | mit | 2,945 |
/* _____ _ __ ________ ___ *\\
** / ___/(_) |/ / ___/ |__ \\ Simple Mechanics Simulator 2 **
** \\__ \\/ / /|_/ /\\__ \\ __/ / copyright (c) 2011 Jakob Odersky **
** ___/ / / / / /___/ / / __/ **
** /____/_/_/ /_//____/ /____/ **
\\* */
package sims.collision.broadphase
import sims.collision._
import scala.collection.mutable.ArrayBuffer
abstract class BroadPhaseDetector[A <: Collidable: ClassManifest] {
protected var _items = new ArrayBuffer[A]
/** Collidable items managed by this collision detector. */
def items: Seq[A] = _items
/** Adds an item to this collision detector. */
def +=(item: A) = _items += item
/** Adds a collection of items to this collision detector. */
def ++=(items: Iterable[A]) = for (i <- items) this += i
/**Removes an item from this collision detector. */
def -=(item: A) = _items -= item
/**Removes a collection of items from this collision detector. */
def --=(items: Iterable[A]) = for (i <- items) this -= i
/**Removes all items from this collision detector. */
def clear() = _items.clear
/** Applies a given function to every potentially colliding pair.
* @param f function applied to every potentially colliding pair */
def foreach(f: ((A, A)) => Unit)
} | jodersky/sims2 | src/main/scala/sims/collision/broadphase/BroadPhaseDetector.scala | Scala | bsd-3-clause | 1,437 |
/*
* Copyright 2013 TeamNexus
*
* TeamNexus Licenses this file to you under the MIT License (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://opensource.org/licenses/mit-license.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License
*/
package com.nexus.network
import scala.collection.mutable
import io.netty.channel.ChannelHandlerContext
import com.nexus.network.handlers.{DummyNetworkHandler, NetworkHandlerWebsocket, NetworkHandler}
/**
* No description given
*
* @author jk-5
*/
object NetworkRegistry {
private final val decoderToHandlerClass = mutable.HashMap[String, Class[_ <: NetworkHandler]](
"websocket" -> classOf[NetworkHandlerWebsocket]
)
private final val ctxToHandlerMap = mutable.HashMap[ChannelHandlerContext, NetworkHandler]()
@inline def getHandlerClass(decoder: String) = this.decoderToHandlerClass.get(decoder)
@inline def addHandler(handler: NetworkHandler) = this.ctxToHandlerMap.put(handler.getChannelContext, handler)
@inline def getHandler(ctx: ChannelHandlerContext): Option[NetworkHandler] = this.ctxToHandlerMap.get(ctx)
@inline def getOrCreateHandler(ctx: ChannelHandlerContext): NetworkHandler = this.ctxToHandlerMap.get(ctx).getOrElse(new DummyNetworkHandler(ctx))
def upgradeHandler(handler: NetworkHandler, newHandler: NetworkHandler){
this.ctxToHandlerMap.remove(this.ctxToHandlerMap.find(_._2 == handler).get._1)
this.addHandler(newHandler)
}
}
| crvidya/nexus-scala | src/main/scala/com/nexus/network/NetworkRegistry.scala | Scala | mit | 1,793 |
package io.github.mandar2812.dynaml.modelpipe
import breeze.linalg.{DenseMatrix, DenseVector}
import breeze.stats.distributions.{ContinuousDistr, Moments}
import io.github.mandar2812.dynaml.algebra.{PartitionedPSDMatrix, PartitionedVector}
import io.github.mandar2812.dynaml.models.gp.AbstractGPRegressionModel
import io.github.mandar2812.dynaml.models.stp.{AbstractSTPRegressionModel, MVStudentsTModel}
import io.github.mandar2812.dynaml.models.{
ContinuousProcessModel, GenContinuousMixtureModel,
SecondOrderProcessModel, StochasticProcessMixtureModel}
import io.github.mandar2812.dynaml.optimization.GloballyOptimizable
import io.github.mandar2812.dynaml.pipes.DataPipe2
import io.github.mandar2812.dynaml.probability.{ContinuousRVWithDistr, MatrixTRV, MultGaussianPRV, MultStudentsTPRV}
import io.github.mandar2812.dynaml.probability.distributions.{
BlockedMultiVariateGaussian, BlockedMultivariateStudentsT,
HasErrorBars, MatrixT}
import scala.reflect.ClassTag
/**
* Mixture Pipe takes a sequence of stochastic process models
* and associated probability weights and returns a mixture model.
* @author mandar2812 date 22/06/2017.
* */
abstract class MixturePipe[
T, I: ClassTag, Y, YDomain, YDomainVar,
BaseDistr <: ContinuousDistr[YDomain]
with Moments[YDomain, YDomainVar]
with HasErrorBars[YDomain],
W1 <: ContinuousRVWithDistr[YDomain, BaseDistr],
BaseProcess <: ContinuousProcessModel[T, I, Y, W1]
with SecondOrderProcessModel[T, I, Y, Double, DenseMatrix[Double], W1]
with GloballyOptimizable] extends
DataPipe2[Seq[BaseProcess], DenseVector[Double],
GenContinuousMixtureModel[
T, I, Y, YDomain, YDomainVar,
BaseDistr, W1, BaseProcess]]
class GPMixturePipe[T, I: ClassTag] extends
MixturePipe[T, I, Double, PartitionedVector, PartitionedPSDMatrix,
BlockedMultiVariateGaussian, MultGaussianPRV,
AbstractGPRegressionModel[T, I]] {
override def run(
models: Seq[AbstractGPRegressionModel[T, I]],
weights: DenseVector[Double]) =
StochasticProcessMixtureModel(models, weights)
}
class StudentTMixturePipe[T, I: ClassTag] extends
MixturePipe[T, I, Double, PartitionedVector, PartitionedPSDMatrix,
BlockedMultivariateStudentsT, MultStudentsTPRV,
AbstractSTPRegressionModel[T, I]] {
override def run(
models: Seq[AbstractSTPRegressionModel[T, I]],
weights: DenseVector[Double]) =
StochasticProcessMixtureModel(models, weights)
}
class MVStudentsTMixturePipe[T, I: ClassTag] extends
MixturePipe[
T, I, DenseVector[Double], DenseMatrix[Double],
(DenseMatrix[Double], DenseMatrix[Double]),
MatrixT, MatrixTRV,
MVStudentsTModel[T, I]] {
override def run(
models: Seq[MVStudentsTModel[T, I]],
weights: DenseVector[Double]) =
StochasticProcessMixtureModel(models, weights)
}
| transcendent-ai-labs/DynaML | dynaml-core/src/main/scala/io/github/mandar2812/dynaml/modelpipe/MixturePipe.scala | Scala | apache-2.0 | 2,796 |
package com.datastax.spark.connector.rdd.partitioner
import java.net.InetAddress
import org.apache.spark.Partition
import com.datastax.spark.connector.rdd.partitioner.dht.{Token, TokenFactory, TokenRange}
/** Stores a CQL `WHERE` predicate matching a range of tokens. */
case class CqlTokenRange[V, T <: Token[V]](range: TokenRange[V, T])(implicit tf: TokenFactory[V, T]) {
require(!range.isWrappedAround)
def cql(pk: String): (String, Seq[Any]) =
if (range.start == tf.minToken && range.end == tf.minToken)
(s"token($pk) >= ?", Seq(range.start.value))
else if (range.start == tf.minToken)
(s"token($pk) <= ?", Seq(range.end.value))
else if (range.end == tf.minToken)
(s"token($pk) > ?", Seq(range.start.value))
else
(s"token($pk) > ? AND token($pk) <= ?", Seq(range.start.value, range.end.value))
}
trait EndpointPartition extends Partition {
def endpoints: Iterable[InetAddress]
}
/** Metadata describing Cassandra table partition processed by a single Spark task.
* Beware the term "partition" is overloaded. Here, in the context of Spark,
* it means an arbitrary collection of rows that can be processed locally on a single Cassandra cluster node.
* A `CassandraPartition` typically contains multiple CQL partitions, i.e. rows identified by different values of
* the CQL partitioning key.
*
* @param index identifier of the partition, used internally by Spark
* @param endpoints which nodes the data partition is located on
* @param tokenRanges token ranges determining the row set to be fetched
* @param dataSize estimated amount of data in the partition
*/
case class CassandraPartition[V, T <: Token[V]](
index: Int,
endpoints: Iterable[InetAddress],
tokenRanges: Iterable[CqlTokenRange[V, T]],
dataSize: Long) extends EndpointPartition
| ponkin/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/rdd/partitioner/CassandraPartition.scala | Scala | apache-2.0 | 1,827 |
package io.questions.model.questionnaire
import cats.syntax.show._
import io.questions.QuestionsSpec
import io.questions.model.questionnaire.Element._
import io.questions.model.questionnaire.NodeMetadata._
import io.questions.model.questionnaire.nodekey.NodeKey
class QuestionnaireNodeMetadataUpdaterSpec extends QuestionsSpec {
private val children1 = qn("child1", "answer1")
private val children2 = qn("child2", "answer2")
private val children3 = qn("child3", "answer3")
private val parent1 = qn("parent1", children1, children2)
private val parent2 = qn("parent2", children3)
private val root = qn("root", parent1, parent2)
"update metadata" - {
"doesn't update metadata if the given id doesn't exist in the tree" in {
val badKey = NodeKey.random
QuestionnaireNodeMetadataUpdater(root, badKey, Seq.empty[NodeMetadata]).left.value must startWith(s"Key ${badKey.show} is unknown")
}
"updates metadata" - {
val newMetadata = Seq(SectionTag, PageTag, MoneyQuestion)
"if node exists and the new metadata fully replaces the old one, not modifying the rest of the tree" in {
val newChildren1 = children1.copy(metadata = newMetadata)
val newParent1 = parent1.copy(element = NonRepeatingParent(newChildren1, children2))
val expected = root.copy(element = NonRepeatingParent(newParent1, parent2))
QuestionnaireNodeMetadataUpdater(root, children1.key, newMetadata).right.value mustBe expected
}
"updates metadata for root node" in {
val expected = root.copy(metadata = newMetadata)
QuestionnaireNodeMetadataUpdater(root, root.key, newMetadata).right.value mustBe expected
}
"updates metadata for intermediate node" in {
val newParent2 = parent2.copy(metadata = newMetadata)
val expected = root.copy(element = NonRepeatingParent(parent1, newParent2))
QuestionnaireNodeMetadataUpdater(root, parent2.key, newMetadata).right.value mustBe expected
}
}
}
}
| channingwalton/qanda | questionnaire/src/test/scala/io/questions/model/questionnaire/QuestionnaireNodeMetadataUpdaterSpec.scala | Scala | mit | 2,025 |
package org.gbougeard.model.changes
/**
* Created with IntelliJ IDEA.
* User: gbougeard
* Date: 13/07/13
* Time: 20:53
* To change this template use File | Settings | File Templates.
*/
case class CommitInfo(commit: String,
parent: List[CommitInfo],
author: GitPersonInfo,
commiter: GitPersonInfo,
subject: String,
message: String)
object CommitInfo {
import play.api.libs.json.Json
import play.api.libs.functional.syntax._
import GitPersonInfo._
implicit val format = Json.format[CommitInfo]
}
| gbougeard/gas | src/main/scala/org/gbougeard/model/changes/CommitInfo.scala | Scala | apache-2.0 | 624 |
package me.yingrui.segment.word2vec
import java.io._
import java.lang.Math.sqrt
import java.lang.System.currentTimeMillis
import scala.collection.mutable.ListBuffer
class WordIndexReader(val inputFile: String, val vocab: Vocabulary, val window: Int = 5) {
private val reader = new DataInputStream(new FileInputStream(inputFile))
private val random = new scala.util.Random(currentTimeMillis())
private def read(): Int = readWord()
def close(): Unit = reader.close();
def readWindow(words: List[Int], index: Int): List[Int] = {
var result = List[Int]()
if (index < window) {
result = (0 until window - index).map(i => 0).toList
}
val end = index + window + 1
result ++= words.slice(index - window, if (end > words.size) words.size else end)
if (result.size < 2 * window + 1) {
result ++= (0 until 2 * window + 1 - result.size).map(i => 0).toList
}
result
}
def readWordListAndRandomlyDiscardFrequentWords(length: Int, sample: Double): (Long, List[Int]) = {
assert(length > 0)
val result = new ListBuffer[Int]()
var word = readWord()
var count = 0L
while (word > 0 && result.size < length) {
count += 1
val wordCount = vocab.getCountByIndex(word).toDouble
val ran = (sqrt(wordCount / (sample * vocab.getTotalWordCount.toDouble)) + 1D) * (sample * vocab.getTotalWordCount.toDouble) / wordCount
val randomFilter = random.nextDouble()
if (ran > randomFilter) {
result += word
}
word = readWord()
}
(count, result.toList)
}
private def readWord(): Int =
try {
reader.readInt()
} catch {
case eof: EOFException => -1
}
}
| yingrui/mahjong | lib-segment/src/main/scala/me/yingrui/segment/word2vec/WordIndexReader.scala | Scala | gpl-3.0 | 1,687 |
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.processor
import java.io._
import java.util.{Properties ⇒ JProperties}
import javax.activation.{DataHandler, DataSource}
import javax.mail.Message.RecipientType
import javax.mail._
import javax.mail.internet._
import javax.xml.transform.OutputKeys
import javax.xml.transform.stream.StreamResult
import org.orbeon.dom.{Document, DocumentFactory, Element}
import org.orbeon.oxf.common.{OXFException, ValidationException}
import org.orbeon.oxf.http.Headers
import org.orbeon.oxf.pipeline.api.PipelineContext
import org.orbeon.oxf.processor.EmailProcessor._
import org.orbeon.oxf.properties.PropertySet
import org.orbeon.oxf.util.IOUtils._
import org.orbeon.oxf.util.StringUtils._
import org.orbeon.oxf.util._
import org.orbeon.oxf.xml._
import org.orbeon.oxf.xml.dom4j._
import scala.collection.JavaConverters._
/**
* This processor allows sending emails. It supports multipart messages and inline as well as out-of-line attachments.
*
* For some useful JavaMail information: http://java.sun.com/products/javamail/FAQ.html
*
* TODO:
*
* - built-in support for HTML could handle src="cid:*" with part/message ids
* - build message with SAX, not DOM, so streaming of input is possible [not necessarily a big win]
*/
class EmailProcessor extends ProcessorImpl {
import Private._
addInputInfo(new ProcessorInputOutputInfo(ProcessorImpl.INPUT_DATA, ConfigNamespaceURI))
override def start(pipelineContext: PipelineContext): Unit = {
val dataDocument = readInputAsOrbeonDom(pipelineContext, ProcessorImpl.INPUT_DATA)
val messageElement = dataDocument.getRootElement
// Get system id (will likely be null if document is generated dynamically)
val dataInputSystemId = Option(messageElement.getData) map (_.asInstanceOf[LocationData].file) orNull
implicit val propertySet = getPropertySet
// Set SMTP host
val properties = new JProperties
val host =
propertySet.getNonBlankString(TestSMTPHost) orElse
valueFromElementOrProperty(messageElement, SMTPHost) getOrElse
(throw new OXFException("Could not find SMTP host in configuration or in properties"))
properties.setProperty("mail.smtp.host", host)
// Create session
val session = {
// Get credentials if any
val (usernameOption, passwordOption) = {
Option(messageElement.element("credentials")) match {
case Some(credentials) ⇒
val usernameElement = credentials.element(Username)
val passwordElement = credentials.element(Password)
(optionalValueTrim(usernameElement), optionalValueTrim(passwordElement))
case None ⇒
(propertySet.getNonBlankString(Username), propertySet.getNonBlankString(Password))
}
}
def ensureCredentials(encryption: String) =
if (usernameOption.isEmpty)
throw new OXFException("Credentails are required when using " + encryption.toUpperCase)
val defaultUpdatePort: String ⇒ Unit =
properties.setProperty("mail.smtp.port", _)
// SSL and TLS
val (defaultPort, updatePort) =
valueFromElementOrProperty(messageElement, Encryption) match {
case Some("ssl") ⇒
ensureCredentials("ssl") // partly enforced by the schema, but could have been blank
properties.setProperty("mail.smtp.auth", "true")
properties.setProperty("mail.smtp.socketFactory.class", "javax.net.ssl.SSLSocketFactory")
val updatePort: String ⇒ Unit = { port ⇒
properties.setProperty("mail.smtp.socketFactory.port", port)
defaultUpdatePort(port)
}
// Should we change the default to 587?
// "Although some servers support port 465 for legacy secure SMTP in violation of the
// specifications" http://en.wikipedia.org/wiki/Simple_Mail_Transfer_Protocol#Ports
(Some("465"), updatePort)
case Some("tls") ⇒
ensureCredentials("tls") // partly enforced by the schema, but could have been blank
properties.setProperty("mail.smtp.auth", "true")
properties.setProperty("mail.smtp.starttls.enable", "true")
(Some("587"), defaultUpdatePort)
case _ ⇒
(None, defaultUpdatePort)
}
// Set or override port depending on the encryption settings
valueFromElementOrProperty(messageElement, SMTPPort) orElse defaultPort foreach updatePort
usernameOption match {
case Some(username) ⇒
if (Logger.isInfoEnabled) Logger.info("Authentication")
properties.setProperty("mail.smtp.auth", "true")
if (Logger.isInfoEnabled) Logger.info("Username: " + usernameOption)
Session.getInstance(properties, new Authenticator {
override def getPasswordAuthentication: PasswordAuthentication = {
new PasswordAuthentication(username, passwordOption getOrElse "")
}
})
case None ⇒
if (Logger.isInfoEnabled) Logger.info("No Authentication")
Session.getInstance(properties)
}
}
// Create message
val message = new MimeMessage(session)
def createAddresses(addressElement: Element): Array[Address] = {
val email = addressElement.element("email").getTextTrim // required
val result = Option(addressElement.element("name")) match {
case Some(nameElement) ⇒ Seq(new InternetAddress(email, nameElement.getTextTrim))
case None ⇒ InternetAddress.parse(email).toList
}
result.toArray
}
def addRecipients(elementName: String, recipientType: RecipientType) =
for (element ← messageElement.elements(elementName).asScala) {
val addresses = createAddresses(element)
message.addRecipients(recipientType, addresses)
}
// Set From
message.addFrom(createAddresses(messageElement.element("from")))
// Set To
propertySet.getNonBlankString(TestTo) match {
case Some(testTo) ⇒ message.addRecipient(Message.RecipientType.TO, new InternetAddress(testTo))
case None ⇒ addRecipients("to", Message.RecipientType.TO)
}
addRecipients("cc", Message.RecipientType.CC)
addRecipients("bcc", Message.RecipientType.BCC)
// Set headers if any
for (headerElement ← messageElement.elements("header").asScala) {
val headerName = headerElement.element("name").getTextTrim // required
val headerValue = headerElement.element("value").getTextTrim // required
// NOTE: Use encodeText() in case there are non-ASCII characters
message.addHeader(headerName, MimeUtility.encodeText(headerValue, DefaultCharacterEncoding, null))
}
// Set the email subject
// The JavaMail spec is badly written and is not clear about whether this needs to be done here. But it
// seems to use the platform's default charset, which we don't want to deal with. So we preemptively encode.
// The result is pure ASCII so that setSubject() will not attempt to re-encode it.
message.setSubject(MimeUtility.encodeText(messageElement.element("subject").getStringValue, DefaultCharacterEncoding, null))
// Handle body
val textElement = messageElement.element("text")
val bodyElement = messageElement.element("body")
if (textElement ne null)
// Old deprecated mechanism (simple text body)
message.setText(textElement.getStringValue)
else if (bodyElement ne null)
// New mechanism with body and parts
handleBody(pipelineContext, dataInputSystemId, message, bodyElement)
else
throw new OXFException("Main text or body element not found")
// Send message
useAndClose(session.getTransport("smtp")) { _ ⇒
Transport.send(message)
}
}
private object Private {
def handleBody(pipelineContext: PipelineContext, dataInputSystemId: String, parentPart: Part, bodyElement: Element): Unit = {
// Find out if there are embedded parts
val parts = bodyElement.elementIterator("part")
val multipartOption =
if (bodyElement.getName == "body") {
val bodyMultipart = bodyElement.attributeValueOpt("mime-multipart")
if (parts.hasNext)
bodyMultipart orElse Some("mixed")
else if (bodyMultipart.isDefined)
throw new OXFException("mime-multipart attribute on body element requires part children elements")
else
None
} else {
ContentTypes.getContentTypeMediaType(bodyElement.attributeValue(Headers.ContentTypeLower)) filter
(_.startsWith("multipart/")) map
(_.substring("multipart/".length))
}
multipartOption match {
case Some(multipart) ⇒
// Multipart content is requested
val mimeMultipart = new MimeMultipart(multipart)
while (parts.hasNext) {
val partElement = parts.next()
val mimeBodyPart = new MimeBodyPart
handleBody(pipelineContext, dataInputSystemId, mimeBodyPart, partElement)
mimeMultipart.addBodyPart(mimeBodyPart)
}
// Set content on parent part
parentPart.setContent(mimeMultipart)
case None ⇒
// No multipart, just use the content of the element and add to the current part (which can be the main message)
handlePart(pipelineContext, dataInputSystemId, parentPart, bodyElement)
}
}
def handlePart(pipelineContext: PipelineContext, dataInputSystemId: String, parentPart: Part, partOrBodyElement: Element): Unit = {
val name = partOrBodyElement.attributeValue("name")
val contentTypeAttribute = partOrBodyElement.attributeValue(Headers.ContentTypeLower)
val mediatype = ContentTypes.getContentTypeMediaType(contentTypeAttribute) getOrElse (throw new IllegalArgumentException)
val charset = ContentTypes.getContentTypeCharset(contentTypeAttribute) getOrElse DefaultCharacterEncoding
val contentTypeWithCharset = mediatype + "; charset=" + charset
// Either a String or a FileItem
val content =
partOrBodyElement.attributeValueOpt("src") match {
case Some(src) ⇒
// Content of the part is not inline
// Generate a FileItem from the source
val source = PartUtils.getSAXSource(EmailProcessor.this, pipelineContext, src, dataInputSystemId, mediatype)
Left(PartUtils.handleStreamedPartContent(pipelineContext, source))
case None ⇒
// Content of the part is inline
// For HTML, we support inline HTML or inline XHTML for backward compatibility
val needsRootElement = mediatype == ContentTypes.XhtmlContentType
val mayHaveRootElement = mediatype == ContentTypes.HtmlContentType
if (needsRootElement && partOrBodyElement.elements.size != 1)
throw new ValidationException(
s"The `<body>` or `<part>` element must contain exactly one element for ${ContentTypes.XhtmlContentType}",
partOrBodyElement.getData.asInstanceOf[LocationData]
)
val hasRootElement = needsRootElement || mayHaveRootElement && ! partOrBodyElement.elements.isEmpty
// Create Document and convert it into a String
val rootElement = if (hasRootElement) partOrBodyElement.elements.get(0) else partOrBodyElement
val partDocument = DocumentFactory.createDocument
partDocument.setRootElement(rootElement.deepCopy.asInstanceOf[Element])
Right(handleInlinePartContent(partDocument, mediatype, hasRootElement))
}
if (! ContentTypes.isTextOrJSONContentType(mediatype)) {
// This is binary content (including application/xml)
content match {
case Left(fileItem) ⇒
parentPart.setDataHandler(new DataHandler(new ReadonlyDataSource {
def getContentType = mediatype
def getInputStream = fileItem.getInputStream
def getName = name
}))
case Right(inline) ⇒
val data = NetUtils.base64StringToByteArray(inline)
parentPart.setDataHandler(new DataHandler(new SimpleBinaryDataSource(name, mediatype, data)))
}
} else {
// This is text content (including text/xml)
content match {
case Left(fileItem) ⇒
parentPart.setDataHandler(new DataHandler(new ReadonlyDataSource {
// This always contains a charset
def getContentType = contentTypeWithCharset
// This is encoded with the appropriate charset (user-defined, or the default)
def getInputStream = fileItem.getInputStream
def getName = name
}))
case Right(inline) ⇒
parentPart.setDataHandler(new DataHandler(new SimpleTextDataSource(name, contentTypeWithCharset, inline)))
}
}
// Set content-disposition header
partOrBodyElement.attributeValueOpt("content-disposition") foreach
(contentDisposition ⇒ parentPart.setDisposition(contentDisposition))
// Set content-id header
partOrBodyElement.attributeValueOpt("content-id") foreach
(contentId ⇒ parentPart.setHeader("content-id", "<" + contentId + ">"))
//part.setContentID(contentId);
}
def handleInlinePartContent(document: Document, contentType: String, hasRootElement: Boolean) =
if (hasRootElement) {
// Convert nested XHTML into an HTML String
val writer = new StringBuilderWriter
val identity = TransformerUtils.getIdentityTransformerHandler
identity.getTransformer.setOutputProperty(OutputKeys.METHOD, "html")
identity.setResult(new StreamResult(writer))
val locationSAXWriter = new LocationSAXWriter
locationSAXWriter.setContentHandler(identity)
locationSAXWriter.write(document)
writer.toString
} else
// For other types, just return the text nodes
document.getStringValue
}
}
private object EmailProcessor {
val Logger = LoggerFactory.createLogger(classOf[EmailProcessor])
val SMTPHost = "smtp-host"
val SMTPPort = "smtp-port"
val Username = "username"
val Password = "password"
val Encryption = "encryption"
val TestTo = "test-to"
val TestSMTPHost = "test-smtp-host"
val ConfigNamespaceURI = "http://www.orbeon.com/oxf/email"
// Use utf-8 as most email clients support it. This allows us not to have to pick an inferior encoding.
val DefaultCharacterEncoding = "utf-8"
// Get Some(trimmed value of the element) or None if the element is null
def optionalValueTrim(e: Element) = (Option(e) map(_.getStringValue) orNull).trimAllToOpt
// First try to get the value from a child element, then from the properties
def valueFromElementOrProperty(e: Element, name: String)(implicit propertySet: PropertySet) =
optionalValueTrim(e.element(name)) orElse propertySet.getNonBlankString(name)
trait ReadonlyDataSource extends DataSource {
def getOutputStream = throw new IOException("Write operation not supported")
}
class SimpleTextDataSource(val getName: String, val getContentType: String, text: String) extends ReadonlyDataSource {
def getInputStream = new ByteArrayInputStream(text.getBytes("utf-8"))
}
class SimpleBinaryDataSource(val getName: String, val getContentType: String, data: Array[Byte]) extends ReadonlyDataSource {
def getInputStream = new ByteArrayInputStream(data)
}
}
| brunobuzzi/orbeon-forms | src/main/scala/org/orbeon/oxf/processor/EmailProcessor.scala | Scala | lgpl-2.1 | 16,301 |
package breeze.numerics
import org.scalatest.funsuite.AnyFunSuite
import breeze.numerics.constants._
/**
* @author ktakagaki
* @date 3/13/14.
*/
class constantsTest extends AnyFunSuite {
test("constants test") {
assert(Database.unit("atomic mass constant energy equivalent") == "J")
assert(Database.unit(""".*Planck.*""".r).size == 12)
}
}
| scalanlp/breeze | math/src/test/scala/breeze/numerics/constantsTest.scala | Scala | apache-2.0 | 359 |
/*
* #%L
* Active OCR Utilities
* %%
* Copyright (C) 2011 - 2012 Maryland Institute for Technology in the Humanities
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package edu.umd.mith.activeocr.util.model
import java.net.URI
import javax.imageio.ImageIO
import scala.util.control.Breaks._
import scala.xml.MetaData
import scala.xml.pull._
object TessReader extends HocrReader {
def parsePage(reader: XMLEventReader, facsimileUri: URI): Seq[Page] = {
var pages = Seq[Page]()
val image = ImageIO.read(facsimileUri.toURL)
while (reader.hasNext) {
reader.next match {
case EvElemStart(_, "div", attrs, _) =>
val clss = attrs.asAttrMap.getOrElse("class", "")
if (clss == "ocr_page") {
val page = makeNewPage(
reader, attrs, facsimileUri, image.getWidth, image.getHeight
)
pages = pages :+ page
}
else assert(false, "Unexpected <div>.")
case EvElemStart(_, "title", _, _) => eatTitle(reader)
case EvElemStart(_, "body"|"head"|"html"|"meta", _, _) => ()
case EvElemEnd(_, "body"|"head"|"html"|"meta") => ()
case EvText(text) => assume(text.trim.isEmpty)
case _: EvComment => ()
case _ => assert(false, "Unexpected XML event.")
}
}
pages
}
override def makeNewPage(reader: XMLEventReader, attributes: MetaData,
uri: URI, imageW: Int, imageH: Int): Page = {
var page = new Page(IndexedSeq[Zone](), uri.toString, imageW, imageH)
breakable {
while (reader.hasNext) {
reader.next match {
case EvElemEnd(_, "div") => break
case EvElemStart(_, "div", attrs, _) =>
val clss = attrs.asAttrMap.getOrElse("class", "")
if (clss == "ocr_carea") {
page = page.addChild(makeNewZone(reader, attrs))
}
else assert(false, "Unexpected <div>.")
case EvText(text) => assume(text.trim.isEmpty)
case _ => assert(false, "Unexpected XML event.")
}
}
}
page
}
def makeNewZone(reader: XMLEventReader, attributes: MetaData): Zone = {
var zone = new Zone(IndexedSeq[Line]())
breakable {
while (reader.hasNext) {
reader.next match {
case EvElemEnd(_, "div") => break
case EvElemStart(_, "span", attrs, _) =>
val clss = attrs.asAttrMap.getOrElse("class", "")
if (clss == "ocr_line") {
zone = zone.addChild(makeNewLine(reader, attrs))
}
else assert(false, "Unexpected <span>.")
case EvElemStart(_, "p", attrs, _) => () // do nothing
case EvElemEnd(_, "p") => () // do nothing
case EvText(text) => assume(text.trim.isEmpty)
case _ => assert(false, "Unexpected XML event.")
}
}
}
zone
}
def makeNewLine(reader: XMLEventReader, attributes: MetaData): Line = {
var line = new ContLine(IndexedSeq[Word]())
breakable {
while (reader.hasNext) {
reader.next match {
case EvElemEnd(_, "span") => break
case EvElemStart(_, "span", attrs, _) =>
val clss = attrs.asAttrMap.getOrElse("class", "")
if (clss == "ocrx_word") {
line = line.addChild(makeNewWord(reader, attrs))
}
else assert(false, "Unexpected <span>.")
case EvText(text) => assume(text.trim.isEmpty)
case _ => assert(false, "Unexpected XML event.")
}
}
}
line
}
def makeNewWord(reader: XMLEventReader, attributes: MetaData): Word = {
val title = attributes.asAttrMap.getOrElse("title", "")
val (x, y, w, h) = unpackDimensions(title)
var tmpWord = ""
breakable {
while (reader.hasNext) {
val event = reader.next
event match {
case EvElemEnd(_, "span") => break
case EvElemStart(_, "em"|"strong", _, _) => ()
case EvElemEnd(_, "em"|"strong") => ()
case EvEntityRef("amp") => tmpWord += "&"
case EvEntityRef("gt") => tmpWord += ">"
case EvEntityRef("lt") => tmpWord += "<"
case EvEntityRef("quot") => tmpWord += "\""
case EvText(text) => tmpWord += text
case _ => assert(false, "Unexpected XML event.")
}
}
}
val bboxAndCuts = title
val matchString = """(bbox \d+ \d+ \d+ \d+); cuts$"""
val Re = matchString.r
val word =
if (bboxAndCuts.matches(matchString)) {
val Re(bboxOnly) = title
HocrBboxParser(bboxOnly).get.toWord(tmpWord)
}
else {
HocrBboxParser(bboxAndCuts).get.toWord(tmpWord)
}
word
}
def unpackDimensions(title: String): (Int, Int, Int, Int) = {
val Re = ".*bbox (\\d+) (\\d+) (\\d+) (\\d+).*".r
val Re(x0, y0, x1, y1) = title
val x = x0.toInt; val y = y0.toInt
val w = x1.toInt - x0.toInt
val h = y1.toInt - y0.toInt
(x, y, w, h)
}
}
| umd-mith/activeocr | util/src/main/scala/edu/umd/mith/activeocr/util/model/TessReader.scala | Scala | apache-2.0 | 5,502 |
package sgl.geometry
object Collisions {
def circleWithCircle(c1: Circle, c2: Circle): Boolean = {
val dist = (c1.center - c2.center)
val n2 = dist.x*dist.x + dist.y*dist.y
n2 <= (c1.radius+c2.radius)*(c1.radius+c2.radius)
}
def aabbWithAabb(r1: Rect, r2: Rect): Boolean = {
val noCollision = r2.left >= r1.left + r1.width ||
r2.left + r2.width <= r1.left ||
r2.top >= r1.top + r1.height ||
r2.top + r2.height <= r1.top
!noCollision
}
def circleWithAabb(c: Circle, r: Rect): Boolean = {
val circleBoundingBox = c.boundingBox
if(!aabbWithAabb(circleBoundingBox, r)) {
//no collision with overapproximation rect means for sure no collision
false
} else if(r.vertices.exists(p => c.intersect(p))) {
//if one of the vertices of rect is in circle, we found a real collision
true
} else if(r.intersect(c.center)) {
true
} else {
/* finally, there are two remaining cases. Either the circle intersects the
* rectangle from one of the side, or it does not intersect at all.
*/
val verticalProj = projectsOnSegment(c.center, r.topLeft, r.bottomLeft)
val horizontalProj = projectsOnSegment(c.center, r.topLeft, r.topRight)
if(verticalProj || horizontalProj)
true
else
false
}
}
/** Check collision of convex polygon versus polygon using Separated Axis Theorem.
*
* The SAT technique compares the shadows of each polygon along all axis
* defined by all lines of both polygons, and if they don't overlap in any
* of them, then the polygons do not intersect. This only works with convex
* polygons.
*
* Axis are created by taking the normal of each line of each polygons.
*/
def polygonWithPolygonSat(p1: Polygon, p2: Polygon): Boolean = {
// Check for all axis of p1, that the shadows overlap.
def check(p1: Polygon, p2: Polygon): Boolean = {
for(i <- 0 until p1.nbEdges) {
val a = p1.edgeStart(i)
val b = p1.edgeEnd(i)
val n = (b-a).normal
// We consider the axis defined by the normal to the line segment a->b.
// We project all points to it to get a range for the shadow.
var p1min = Float.MaxValue
var p1max = Float.MinValue
for(v <- p1.vertices) {
val dp = v*n
p1min = p1min min dp
p1max = p1max max dp
}
var p2min = Float.MaxValue
var p2max = Float.MinValue
for(v <- p2.vertices) {
val dp = v*n
p2min = p2min min dp
p2max = p2max max dp
}
// Finally, check if they overlap.
if(p1min > p2max || p2min > p1max)
return false
}
return true
}
check(p1, p2) && check(p2, p1)
}
private def projectsOnSegment(c: Point, ss: Point, se: Point): Boolean = {
val s1 = (c - ss) * (se - ss)
val s2 = (c - se) * (se - ss)
s1*s2 < 0
}
}
| regb/scala-game-library | core/src/main/scala/sgl/geometry/Collisions.scala | Scala | mit | 3,022 |
package com.monovore.example.coast
import com.monovore.coast.flow._
import com.monovore.coast.wire.BinaryFormat
import scala.collection.immutable.SortedSet
/**
* An implementation of connected components: given a stream of new edges, we
* incrementally maintain a mapping of node id to component id -- where the id
* for the component is the smallest id of any node in that component.
*
* This cribs heavily off the MR-based implementation presented here:
*
* http://mmds-data.org/presentations/2014_/vassilvitskii_mmds14.pdf
*/
object ConnectedComponents extends ExampleMain {
import com.monovore.coast.wire.pretty._
type NodeID = Long
implicit val eventFormat = BinaryFormat.javaSerialization[SortedSet[NodeID]]
val Edges = Topic[Long, Long]("edges")
val Components = Topic[Long, Long]("components")
def connect(a: NodeID, b: NodeID) = Seq(a -> b, b -> a)
implicit val graph = Flow.context()
val connected =
Edges.asSource
.zipWithKey
.flatMap { case (one, other) => connect(one, other) }
.groupByKey
.addStream("connected-input")
val largeStar = graph.addCycle[NodeID, NodeID]("large-star") { largeStar =>
val smallStar =
Flow.merge("large" -> largeStar, "input" -> connected)
.withKeys.transform(SortedSet.empty[NodeID]) { node => (neighbours, newEdge) =>
val all = (neighbours + node)
val least = all.min
if (node < newEdge || all.contains(newEdge)) neighbours -> Nil
else if (least < newEdge) SortedSet(newEdge) -> connect(newEdge, least)
else SortedSet(newEdge) -> all.toSeq.flatMap(connect(_, newEdge))
}
.groupByKey
.addStream("small-star")
smallStar
.withKeys.transform(SortedSet.empty[NodeID]) { node => (neighbours, newEdge) =>
val all = neighbours + node
val least = all.min
val newNeigbours = neighbours + newEdge
if (newEdge < least) {
val larger = neighbours.toSeq.filter {_ > node}
newNeigbours -> larger.flatMap {connect(_, newEdge)}
}
else if (newEdge < node || all.contains(newEdge)) newNeigbours -> Nil
else newNeigbours -> connect(newEdge, least)
}
.groupByKey
}
largeStar
.withKeys.transform(Long.MaxValue) { node => (currentOrMax, next) =>
val current = currentOrMax min node
val min = current min next
if (min < current) min -> Seq(min)
else current -> Nil
}
.addSink(Components)
}
| milinda/coast | core/src/main/scala/com/monovore/example/coast/ConnectedComponents.scala | Scala | apache-2.0 | 2,510 |
/* NSC -- new scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author Martin Odersky
*/
package scala
package reflect
package internal
import scala.annotation.elidable
import scala.collection.mutable
import util._
import java.util.concurrent.TimeUnit
import scala.reflect.internal.{TreeGen => InternalTreeGen}
abstract class SymbolTable extends macros.Universe
with Collections
with Names
with Symbols
with Types
with Variances
with Kinds
with ExistentialsAndSkolems
with FlagSets
with Scopes
with Mirrors
with Definitions
with Constants
with BaseTypeSeqs
with InfoTransformers
with transform.Transforms
with StdNames
with AnnotationInfos
with AnnotationCheckers
with Trees
with Printers
with Positions
with TypeDebugging
with Importers
with Required
with CapturedVariables
with StdAttachments
with StdCreators
with ReificationSupport
with PrivateWithin
with pickling.Translations
with FreshNames
with Internals
with Reporting
{
val gen = new InternalTreeGen { val global: SymbolTable.this.type = SymbolTable.this }
var lastKnownPosition: Position = NoPosition
def log(msg: => AnyRef): Unit
protected def elapsedMessage(msg: String, start: Long) =
msg + " in " + (TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) - start) + "ms"
def informProgress(msg: String) = if (settings.verbose) inform("[" + msg + "]")
def informTime(msg: String, start: Long) = informProgress(elapsedMessage(msg, start))
def shouldLogAtThisPhase = false
def isPastTyper = false
protected def isDeveloper: Boolean = settings.debug
@deprecated("use devWarning if this is really a warning; otherwise use log", "2.11.0")
def debugwarn(msg: => String): Unit = devWarning(msg)
/** Override with final implementation for inlining. */
def debuglog(msg: => String): Unit = if (settings.debug) log(msg)
def devWarning(msg: => String): Unit = if (isDeveloper) Console.err.println(msg)
def throwableAsString(t: Throwable): String = "" + t
def throwableAsString(t: Throwable, maxFrames: Int): String = t.getStackTrace take maxFrames mkString "\n at "
@inline final def devWarningDumpStack(msg: => String, maxFrames: Int): Unit =
devWarning(msg + "\n" + throwableAsString(new Throwable, maxFrames))
/** Prints a stack trace if -Ydebug or equivalent was given, otherwise does nothing. */
def debugStack(t: Throwable): Unit = devWarning(throwableAsString(t))
private[scala] def printCaller[T](msg: String)(result: T) = {
Console.err.println("%s: %s\nCalled from: %s".format(msg, result,
(new Throwable).getStackTrace.drop(2).take(50).mkString("\n")))
result
}
private[scala] def printResult[T](msg: String)(result: T) = {
Console.err.println(msg + ": " + result)
result
}
@inline
final private[scala] def logResult[T](msg: => String)(result: T): T = {
log(msg + ": " + result)
result
}
@inline
final private[scala] def debuglogResult[T](msg: => String)(result: T): T = {
debuglog(msg + ": " + result)
result
}
@inline
final private[scala] def devWarningResult[T](msg: => String)(result: T): T = {
devWarning(msg + ": " + result)
result
}
@inline
final private[scala] def logResultIf[T](msg: => String, cond: T => Boolean)(result: T): T = {
if (cond(result))
log(msg + ": " + result)
result
}
@inline
final private[scala] def debuglogResultIf[T](msg: => String, cond: T => Boolean)(result: T): T = {
if (cond(result))
debuglog(msg + ": " + result)
result
}
@inline final def findSymbol(xs: TraversableOnce[Symbol])(p: Symbol => Boolean): Symbol = {
xs find p getOrElse NoSymbol
}
// For too long have we suffered in order to sort NAMES.
// I'm pretty sure there's a reasonable default for that.
// Notice challenge created by Ordering's invariance.
implicit def lowPriorityNameOrdering[T <: Names#Name]: Ordering[T] =
SimpleNameOrdering.asInstanceOf[Ordering[T]]
private object SimpleNameOrdering extends Ordering[Names#Name] {
def compare(n1: Names#Name, n2: Names#Name) = (
if (n1 eq n2) 0
else n1.toString compareTo n2.toString
)
}
/** Dump each symbol to stdout after shutdown.
*/
final val traceSymbolActivity = sys.props contains "scalac.debug.syms"
object traceSymbols extends {
val global: SymbolTable.this.type = SymbolTable.this
} with util.TraceSymbolActivity
val treeInfo: TreeInfo { val global: SymbolTable.this.type }
/** Check that the executing thread is the compiler thread. No-op here,
* overridden in interactive.Global. */
@elidable(elidable.WARNING)
def assertCorrectThread() {}
/** A last effort if symbol in a select <owner>.<name> is not found.
* This is overridden by the reflection compiler to make up a package
* when it makes sense (i.e. <owner> is a package and <name> is a term name).
*/
def missingHook(owner: Symbol, name: Name): Symbol = NoSymbol
/** Returns the mirror that loaded given symbol */
def mirrorThatLoaded(sym: Symbol): Mirror
/** A period is an ordinal number for a phase in a run.
* Phases in later runs have higher periods than phases in earlier runs.
* Later phases have higher periods than earlier phases in the same run.
*/
type Period = Int
final val NoPeriod = 0
/** An ordinal number for compiler runs. First run has number 1. */
type RunId = Int
final val NoRunId = 0
// sigh, this has to be public or enteringPhase doesn't inline.
var phStack: List[Phase] = Nil
private[this] var ph: Phase = NoPhase
private[this] var per = NoPeriod
final def atPhaseStack: List[Phase] = phStack
final def phase: Phase = {
if (Statistics.hotEnabled)
Statistics.incCounter(SymbolTableStats.phaseCounter)
ph
}
def atPhaseStackMessage = atPhaseStack match {
case Nil => ""
case ps => ps.reverseMap("->" + _).mkString("(", " ", ")")
}
final def phase_=(p: Phase) {
//System.out.println("setting phase to " + p)
assert((p ne null) && p != NoPhase, p)
ph = p
per = period(currentRunId, p.id)
}
final def pushPhase(ph: Phase): Phase = {
val current = phase
phase = ph
phStack ::= ph
current
}
final def popPhase(ph: Phase) {
phStack = phStack.tail
phase = ph
}
/** The current compiler run identifier. */
def currentRunId: RunId
/** The run identifier of the given period. */
final def runId(period: Period): RunId = period >> 8
/** The phase identifier of the given period. */
final def phaseId(period: Period): Phase#Id = period & 0xFF
/** The current period. */
final def currentPeriod: Period = {
//assert(per == (currentRunId << 8) + phase.id)
per
}
/** The phase associated with given period. */
final def phaseOf(period: Period): Phase = phaseWithId(phaseId(period))
final def period(rid: RunId, pid: Phase#Id): Period =
(rid << 8) + pid
/** Are we later than given phase in compilation? */
final def isAtPhaseAfter(p: Phase) =
p != NoPhase && phase.id > p.id
/** Perform given operation at given phase. */
@inline final def enteringPhase[T](ph: Phase)(op: => T): T = {
val saved = pushPhase(ph)
try op
finally popPhase(saved)
}
final def findPhaseWithName(phaseName: String): Phase = {
var ph = phase
while (ph != NoPhase && ph.name != phaseName) {
ph = ph.prev
}
if (ph eq NoPhase) phase else ph
}
final def enteringPhaseWithName[T](phaseName: String)(body: => T): T = {
val phase = findPhaseWithName(phaseName)
enteringPhase(phase)(body)
}
def slowButSafeEnteringPhase[T](ph: Phase)(op: => T): T = {
if (isCompilerUniverse) enteringPhase(ph)(op)
else op
}
@inline final def exitingPhase[T](ph: Phase)(op: => T): T = enteringPhase(ph.next)(op)
@inline final def enteringPrevPhase[T](op: => T): T = enteringPhase(phase.prev)(op)
@inline final def enteringPhaseNotLaterThan[T](target: Phase)(op: => T): T =
if (isAtPhaseAfter(target)) enteringPhase(target)(op) else op
def slowButSafeEnteringPhaseNotLaterThan[T](target: Phase)(op: => T): T =
if (isCompilerUniverse) enteringPhaseNotLaterThan(target)(op) else op
final def isValid(period: Period): Boolean =
period != 0 && runId(period) == currentRunId && {
val pid = phaseId(period)
if (phase.id > pid) infoTransformers.nextFrom(pid).pid >= phase.id
else infoTransformers.nextFrom(phase.id).pid >= pid
}
final def isValidForBaseClasses(period: Period): Boolean = {
def noChangeInBaseClasses(it: InfoTransformer, limit: Phase#Id): Boolean = (
it.pid >= limit ||
!it.changesBaseClasses && noChangeInBaseClasses(it.next, limit)
)
period != 0 && runId(period) == currentRunId && {
val pid = phaseId(period)
if (phase.id > pid) noChangeInBaseClasses(infoTransformers.nextFrom(pid), phase.id)
else noChangeInBaseClasses(infoTransformers.nextFrom(phase.id), pid)
}
}
def openPackageModule(container: Symbol, dest: Symbol) {
// unlink existing symbols in the package
for (member <- container.info.decls.iterator) {
if (!member.isPrivate && !member.isConstructor) {
// todo: handle overlapping definitions in some way: mark as errors
// or treat as abstractions. For now the symbol in the package module takes precedence.
for (existing <- dest.info.decl(member.name).alternatives)
dest.info.decls.unlink(existing)
}
}
// enter non-private decls the class
for (member <- container.info.decls.iterator) {
if (!member.isPrivate && !member.isConstructor) {
dest.info.decls.enter(member)
}
}
// enter decls of parent classes
for (p <- container.parentSymbols) {
if (p != definitions.ObjectClass) {
openPackageModule(p, dest)
}
}
}
/** Convert array parameters denoting a repeated parameter of a Java method
* to `JavaRepeatedParamClass` types.
*/
def arrayToRepeated(tp: Type): Type = tp match {
case MethodType(params, rtpe) =>
val formals = tp.paramTypes
assert(formals.last.typeSymbol == definitions.ArrayClass, formals)
val method = params.last.owner
val elemtp = formals.last.typeArgs.head match {
case RefinedType(List(t1, t2), _) if (t1.typeSymbol.isAbstractType && t2.typeSymbol == definitions.ObjectClass) =>
t1 // drop intersection with Object for abstract types in varargs. UnCurry can handle them.
case t =>
t
}
val newParams = method.newSyntheticValueParams(formals.init :+ definitions.javaRepeatedType(elemtp))
MethodType(newParams, rtpe)
case PolyType(tparams, rtpe) =>
PolyType(tparams, arrayToRepeated(rtpe))
}
abstract class SymLoader extends LazyType {
def fromSource = false
}
/** if there's a `package` member object in `pkgClass`, enter its members into it. */
def openPackageModule(pkgClass: Symbol) {
val pkgModule = pkgClass.packageObject
def fromSource = pkgModule.rawInfo match {
case ltp: SymLoader => ltp.fromSource
case _ => false
}
if (pkgModule.isModule && !fromSource) {
openPackageModule(pkgModule, pkgClass)
}
}
object perRunCaches {
import scala.collection.generic.Clearable
// Weak references so the garbage collector will take care of
// letting us know when a cache is really out of commission.
import java.lang.ref.WeakReference
private var caches = List[WeakReference[Clearable]]()
def recordCache[T <: Clearable](cache: T): T = {
caches ::= new WeakReference(cache)
cache
}
/**
* Removes a cache from the per-run caches. This is useful for testing: it allows running the
* compiler and then inspect the state of a cache.
*/
def unrecordCache[T <: Clearable](cache: T): Unit = {
caches = caches.filterNot(_.get eq cache)
}
def clearAll() = {
debuglog("Clearing " + caches.size + " caches.")
caches foreach (ref => Option(ref.get).foreach(_.clear))
caches = caches.filterNot(_.get == null)
}
def newWeakMap[K, V]() = recordCache(mutable.WeakHashMap[K, V]())
def newMap[K, V]() = recordCache(mutable.HashMap[K, V]())
def newSet[K]() = recordCache(mutable.HashSet[K]())
def newWeakSet[K <: AnyRef]() = recordCache(new WeakHashSet[K]())
def newAnyRefMap[K <: AnyRef, V]() = recordCache(mutable.AnyRefMap[K, V]())
/**
* Register a cache specified by a factory function and (optionally) a cleanup function.
*
* @return A function that will return cached value, or create a fresh value when a new run is started.
*/
def newGeneric[T](f: => T, cleanup: T => Unit = (x: Any) => ()): () => T = {
val NoCached: T = null.asInstanceOf[T]
var cached: T = NoCached
var cachedRunId = NoRunId
val clearable = new Clearable with (() => T) {
def clear(): Unit = {
if (cached != NoCached)
cleanup(cached)
cached = NoCached
}
def apply(): T = {
if (currentRunId != cachedRunId || cached == NoCached) {
cached = f
cachedRunId = currentRunId
}
cached
}
}
recordCache(clearable)
}
}
/** The set of all installed infotransformers. */
var infoTransformers = new InfoTransformer {
val pid = NoPhase.id
val changesBaseClasses = true
def transform(sym: Symbol, tpe: Type): Type = tpe
}
/** The phase which has given index as identifier. */
val phaseWithId: Array[Phase]
/** Is this symbol table a part of a compiler universe?
*/
def isCompilerUniverse = false
@deprecated("use enteringPhase", "2.10.0") // Used in SBT 0.12.4
@inline final def atPhase[T](ph: Phase)(op: => T): T = enteringPhase(ph)(op)
/**
* Adds the `sm` String interpolator to a [[scala.StringContext]].
*/
implicit val StringContextStripMarginOps: StringContext => StringContextStripMarginOps = util.StringContextStripMarginOps
}
object SymbolTableStats {
val phaseCounter = Statistics.newCounter("#phase calls")
}
| jvican/scala | src/reflect/scala/reflect/internal/SymbolTable.scala | Scala | bsd-3-clause | 15,158 |
package app.adapters.database.support
import scala.slick.driver.JdbcProfile
trait DbProfile {
val profile: JdbcProfile
val dbConfig: DbConfig
}
| mericano1/spray-akka-slick-postgres | src/main/scala/app/adapters/database/support/DbProfile.scala | Scala | mit | 150 |
package com.socrata.balboa.impl
import com.socrata.balboa.metrics.Message
import com.socrata.metrics.components.{EmergencyFileWriterComponent, MessageQueueComponent}
import org.slf4j.LoggerFactory
/**
* Component that contains a collection of other [[MessageQueueComponent]]s. When a metrics
* message is sent through the dispatcher the metrics message is dispatched to each one of the other components.
* This dispatcher does not preform any Component Validation. If an internal component fails then it writes to its
* internal emergence handling procedure.
*/
trait BalboaDispatcherComponent extends MessageQueueComponent {
self: DispatcherInformation with EmergencyFileWriterComponent =>
class MessageDispatcher extends MessageQueueLike {
private val Log = LoggerFactory.getLogger(classOf[MessageDispatcher])
/**
* Create all the internal queues at queue creation time.
*/
val queues = components.map(_.MessageQueue())
/**
* Initialize the message queue and prepares to recieve messages.
*/
override def start(): Unit = queues.foreach(_.start())
/**
* Stops and destroys the underlying queue.
*/
override def stop(): Unit = queues.foreach(_.stop())
/**
* Sends a message using the underlying queue.
* @param msg Messsage to send. Should not be null.
*/
override def send(msg: Message): Unit = queues.foreach(_.send(msg))
}
// scalastyle:off method.name
override def MessageQueue(): MessageQueueLike = new MessageDispatcher
}
| socrata-platform/balboa | balboa-client-dispatcher/src/main/scala/com/socrata/balboa/impl/BalboaDispatcherComponent.scala | Scala | apache-2.0 | 1,537 |
import io.gatling.core.Predef._ // 2
import io.gatling.http.Predef._
import scala.concurrent.duration._
class CustomerResourceTest extends Simulation {
val conf = http.baseURL("http://localhost:8080")
val scn = scenario("Gatling")
.during(1 minute) {
exec(
http("json").get("/products")
)
}
setUp(scn.inject(rampUsers(5) over (30 seconds)))
.protocols(conf)
} | AlexBischof/vertx-elasticsearch-quickstart | src/test/scala/ProductsPerformanceTest.scala | Scala | apache-2.0 | 402 |
/*
* GNU GENERAL PUBLIC LICENSE
* Version 2, June 1991
*
* Copyright (C) 1989, 1991 Free Software Foundation, Inc., <http://fsf.org/>
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
* Everyone is permitted to copy and distribute verbatim copies
* of this license document, but changing it is not allowed.
*
* Preamble
*
* The licenses for most software are designed to take away your
* freedom to share and change it. By contrast, the GNU General Public
* License is intended to guarantee your freedom to share and change free
* software--to make sure the software is free for all its users. This
* General Public License applies to most of the Free Software
* Foundation's software and to any other program whose authors commit to
* using it. (Some other Free Software Foundation software is covered by
* the GNU Lesser General Public License instead.) You can apply it to
* your programs, too.
*
* When we speak of free software, we are referring to freedom, not
* price. Our General Public Licenses are designed to make sure that you
* have the freedom to distribute copies of free software (and charge for
* this service if you wish), that you receive source code or can get it
* if you want it, that you can change the software or use pieces of it
* in new free programs; and that you know you can do these things.
*
* To protect your rights, we need to make restrictions that forbid
* anyone to deny you these rights or to ask you to surrender the rights.
* These restrictions translate to certain responsibilities for you if you
* distribute copies of the software, or if you modify it.
*
* For example, if you distribute copies of such a program, whether
* gratis or for a fee, you must give the recipients all the rights that
* you have. You must make sure that they, too, receive or can get the
* source code. And you must show them these terms so they know their
* rights.
*
* We protect your rights with two steps: (1) copyright the software, and
* (2) offer you this license which gives you legal permission to copy,
* distribute and/or modify the software.
*
* Also, for each author's protection and ours, we want to make certain
* that everyone understands that there is no warranty for this free
* software. If the software is modified by someone else and passed on, we
* want its recipients to know that what they have is not the original, so
* that any problems introduced by others will not reflect on the original
* authors' reputations.
*
* Finally, any free program is threatened constantly by software
* patents. We wish to avoid the danger that redistributors of a free
* program will individually obtain patent licenses, in effect making the
* program proprietary. To prevent this, we have made it clear that any
* patent must be licensed for everyone's free use or not licensed at all.
*
* The precise terms and conditions for copying, distribution and
* modification follow.
*
* GNU GENERAL PUBLIC LICENSE
* TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
*
* 0. This License applies to any program or other work which contains
* a notice placed by the copyright holder saying it may be distributed
* under the terms of this General Public License. The "Program", below,
* refers to any such program or work, and a "work based on the Program"
* means either the Program or any derivative work under copyright law:
* that is to say, a work containing the Program or a portion of it,
* either verbatim or with modifications and/or translated into another
* language. (Hereinafter, translation is included without limitation in
* the term "modification".) Each licensee is addressed as "you".
*
* Activities other than copying, distribution and modification are not
* covered by this License; they are outside its scope. The act of
* running the Program is not restricted, and the output from the Program
* is covered only if its contents constitute a work based on the
* Program (independent of having been made by running the Program).
* Whether that is true depends on what the Program does.
*
* 1. You may copy and distribute verbatim copies of the Program's
* source code as you receive it, in any medium, provided that you
* conspicuously and appropriately publish on each copy an appropriate
* copyright notice and disclaimer of warranty; keep intact all the
* notices that refer to this License and to the absence of any warranty;
* and give any other recipients of the Program a copy of this License
* along with the Program.
*
* You may charge a fee for the physical act of transferring a copy, and
* you may at your option offer warranty protection in exchange for a fee.
*
* 2. You may modify your copy or copies of the Program or any portion
* of it, thus forming a work based on the Program, and copy and
* distribute such modifications or work under the terms of Section 1
* above, provided that you also meet all of these conditions:
*
* a) You must cause the modified files to carry prominent notices
* stating that you changed the files and the date of any change.
*
* b) You must cause any work that you distribute or publish, that in
* whole or in part contains or is derived from the Program or any
* part thereof, to be licensed as a whole at no charge to all third
* parties under the terms of this License.
*
* c) If the modified program normally reads commands interactively
* when run, you must cause it, when started running for such
* interactive use in the most ordinary way, to print or display an
* announcement including an appropriate copyright notice and a
* notice that there is no warranty (or else, saying that you provide
* a warranty) and that users may redistribute the program under
* these conditions, and telling the user how to view a copy of this
* License. (Exception: if the Program itself is interactive but
* does not normally print such an announcement, your work based on
* the Program is not required to print an announcement.)
*
* These requirements apply to the modified work as a whole. If
* identifiable sections of that work are not derived from the Program,
* and can be reasonably considered independent and separate works in
* themselves, then this License, and its terms, do not apply to those
* sections when you distribute them as separate works. But when you
* distribute the same sections as part of a whole which is a work based
* on the Program, the distribution of the whole must be on the terms of
* this License, whose permissions for other licensees extend to the
* entire whole, and thus to each and every part regardless of who wrote it.
*
* Thus, it is not the intent of this section to claim rights or contest
* your rights to work written entirely by you; rather, the intent is to
* exercise the right to control the distribution of derivative or
* collective works based on the Program.
*
* In addition, mere aggregation of another work not based on the Program
* with the Program (or with a work based on the Program) on a volume of
* a storage or distribution medium does not bring the other work under
* the scope of this License.
*
* 3. You may copy and distribute the Program (or a work based on it,
* under Section 2) in object code or executable form under the terms of
* Sections 1 and 2 above provided that you also do one of the following:
*
* a) Accompany it with the complete corresponding machine-readable
* source code, which must be distributed under the terms of Sections
* 1 and 2 above on a medium customarily used for software interchange; or,
*
* b) Accompany it with a written offer, valid for at least three
* years, to give any third party, for a charge no more than your
* cost of physically performing source distribution, a complete
* machine-readable copy of the corresponding source code, to be
* distributed under the terms of Sections 1 and 2 above on a medium
* customarily used for software interchange; or,
*
* c) Accompany it with the information you received as to the offer
* to distribute corresponding source code. (This alternative is
* allowed only for noncommercial distribution and only if you
* received the program in object code or executable form with such
* an offer, in accord with Subsection b above.)
*
* The source code for a work means the preferred form of the work for
* making modifications to it. For an executable work, complete source
* code means all the source code for all modules it contains, plus any
* associated interface definition files, plus the scripts used to
* control compilation and installation of the executable. However, as a
* special exception, the source code distributed need not include
* anything that is normally distributed (in either source or binary
* form) with the major components (compiler, kernel, and so on) of the
* operating system on which the executable runs, unless that component
* itself accompanies the executable.
*
* If distribution of executable or object code is made by offering
* access to copy from a designated place, then offering equivalent
* access to copy the source code from the same place counts as
* distribution of the source code, even though third parties are not
* compelled to copy the source along with the object code.
*
* 4. You may not copy, modify, sublicense, or distribute the Program
* except as expressly provided under this License. Any attempt
* otherwise to copy, modify, sublicense or distribute the Program is
* void, and will automatically terminate your rights under this License.
* However, parties who have received copies, or rights, from you under
* this License will not have their licenses terminated so long as such
* parties remain in full compliance.
*
* 5. You are not required to accept this License, since you have not
* signed it. However, nothing else grants you permission to modify or
* distribute the Program or its derivative works. These actions are
* prohibited by law if you do not accept this License. Therefore, by
* modifying or distributing the Program (or any work based on the
* Program), you indicate your acceptance of this License to do so, and
* all its terms and conditions for copying, distributing or modifying
* the Program or works based on it.
*
* 6. Each time you redistribute the Program (or any work based on the
* Program), the recipient automatically receives a license from the
* original licensor to copy, distribute or modify the Program subject to
* these terms and conditions. You may not impose any further
* restrictions on the recipients' exercise of the rights granted herein.
* You are not responsible for enforcing compliance by third parties to
* this License.
*
* 7. If, as a consequence of a court judgment or allegation of patent
* infringement or for any other reason (not limited to patent issues),
* conditions are imposed on you (whether by court order, agreement or
* otherwise) that contradict the conditions of this License, they do not
* excuse you from the conditions of this License. If you cannot
* distribute so as to satisfy simultaneously your obligations under this
* License and any other pertinent obligations, then as a consequence you
* may not distribute the Program at all. For example, if a patent
* license would not permit royalty-free redistribution of the Program by
* all those who receive copies directly or indirectly through you, then
* the only way you could satisfy both it and this License would be to
* refrain entirely from distribution of the Program.
*
* If any portion of this section is held invalid or unenforceable under
* any particular circumstance, the balance of the section is intended to
* apply and the section as a whole is intended to apply in other
* circumstances.
*
* It is not the purpose of this section to induce you to infringe any
* patents or other property right claims or to contest validity of any
* such claims; this section has the sole purpose of protecting the
* integrity of the free software distribution system, which is
* implemented by public license practices. Many people have made
* generous contributions to the wide range of software distributed
* through that system in reliance on consistent application of that
* system; it is up to the author/donor to decide if he or she is willing
* to distribute software through any other system and a licensee cannot
* impose that choice.
*
* This section is intended to make thoroughly clear what is believed to
* be a consequence of the rest of this License.
*
* 8. If the distribution and/or use of the Program is restricted in
* certain countries either by patents or by copyrighted interfaces, the
* original copyright holder who places the Program under this License
* may add an explicit geographical distribution limitation excluding
* those countries, so that distribution is permitted only in or among
* countries not thus excluded. In such case, this License incorporates
* the limitation as if written in the body of this License.
*
* 9. The Free Software Foundation may publish revised and/or new versions
* of the General Public License from time to time. Such new versions will
* be similar in spirit to the present version, but may differ in detail to
* address new problems or concerns.
*
* Each version is given a distinguishing version number. If the Program
* specifies a version number of this License which applies to it and "any
* later version", you have the option of following the terms and conditions
* either of that version or of any later version published by the Free
* Software Foundation. If the Program does not specify a version number of
* this License, you may choose any version ever published by the Free Software
* Foundation.
*
* 10. If you wish to incorporate parts of the Program into other free
* programs whose distribution conditions are different, write to the author
* to ask for permission. For software which is copyrighted by the Free
* Software Foundation, write to the Free Software Foundation; we sometimes
* make exceptions for this. Our decision will be guided by the two goals
* of preserving the free status of all derivatives of our free software and
* of promoting the sharing and reuse of software generally.
*
* NO WARRANTY
*
* 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
* FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
* OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
* PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
* OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
* TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
* PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
* REPAIR OR CORRECTION.
*
* 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
* WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
* REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
* INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
* OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
* TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
* YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
* PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* END OF TERMS AND CONDITIONS
*
* How to Apply These Terms to Your New Programs
*
* If you develop a new program, and you want it to be of the greatest
* possible use to the public, the best way to achieve this is to make it
* free software which everyone can redistribute and change under these terms.
*
* To do so, attach the following notices to the program. It is safest
* to attach them to the start of each source file to most effectively
* convey the exclusion of warranty; and each file should have at least
* the "copyright" line and a pointer to where the full notice is found.
*
* {description}
* Copyright (C) {year} {fullname}
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Also add information on how to contact you by electronic and paper mail.
*
* If the program is interactive, make it output a short notice like this
* when it starts in an interactive mode:
*
* Gnomovision version 69, Copyright (C) year name of author
* Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
* This is free software, and you are welcome to redistribute it
* under certain conditions; type `show c' for details.
*
* The hypothetical commands `show w' and `show c' should show the appropriate
* parts of the General Public License. Of course, the commands you use may
* be called something other than `show w' and `show c'; they could even be
* mouse-clicks or menu items--whatever suits your program.
*
* You should also get your employer (if you work as a programmer) or your
* school, if any, to sign a "copyright disclaimer" for the program, if
* necessary. Here is a sample; alter the names:
*
* Yoyodyne, Inc., hereby disclaims all copyright interest in the program
* `Gnomovision' (which makes passes at compilers) written by James Hacker.
*
* {signature of Ty Coon}, 1 April 1989
* Ty Coon, President of Vice
*
* This General Public License does not permit incorporating your program into
* proprietary programs. If your program is a subroutine library, you may
* consider it more useful to permit linking proprietary applications with the
* library. If this is what you want to do, use the GNU Lesser General
* Public License instead of this License.
*/
package models.fhs.pages.blaimport
import models.Transactions
import models.fhs.pages.JavaList
import models.persistence.participants.{Student, Course, Group}
import models.persistence.subject.AbstractSubject
import org.hibernate.FetchMode
import org.hibernate.criterion.Restrictions
import scala.collection.JavaConversions._
import play.api.Logger
/**
* @author fabian
* on 27.12.14.
*/
object MBLAFileUpload {
def renameCourses() = {
Transactions.hibernateAction {
implicit s =>
s.createCriteria(classOf[Course]).list().asInstanceOf[JavaList[Course]].foreach {
c =>
val oldName = c.getShortName
val oldChar = oldName.charAt(oldName.length - 1)
val newChar = (oldChar.toInt + 1).toChar
val newName = oldName.replace(oldChar, newChar)
c.setShortName(newName)
s.saveOrUpdate(c)
}
}
}
def findCourses() = {
Transactions.hibernateAction{
implicit s=>
s.createCriteria(classOf[Course]).setFetchMode("students",FetchMode.SELECT).list().asInstanceOf[JavaList[Course]].toList.sortBy(_.getShortName)
}
}
def removeOldCourses(courseIds:List[Long])={
Transactions.hibernateAction{
implicit s=>
val courses = s.createCriteria(classOf[Course]).add(Restrictions.in("id",courseIds)).list().asInstanceOf[JavaList[Course]].toList
var criterion = s.createCriteria(classOf[AbstractSubject])
criterion.createCriteria("courses").add(Restrictions.in("id",courseIds))
val subjects = criterion.list().asInstanceOf[JavaList[AbstractSubject]]
subjects.foreach {
sub =>
sub.setCourses(sub.getCourses -- courses)
s.saveOrUpdate(sub)
}
criterion = s.createCriteria(classOf[Group])
criterion.createCriteria("course").add(Restrictions.in("id", courseIds))
/*
val groups = criterion.list().asInstanceOf[JavaList[Group]]
groups.foreach{
g=>
g.setCourse(null)
s.saveOrUpdate(g)
s.delete(g)
}
*/
// s.flush()
courses.foreach{
c=>
c.getGroups.foreach{
g=>
g.setCourse(null)
s.saveOrUpdate(g)
s.delete(g)
}
c.getStudents.foreach{
stud=>
s.delete(stud)
}
c.setStudents(Set[Student]())
c.setGroups(List[Group]())
s.saveOrUpdate(c)
s.delete(c)
s.flush()
}
}
}
}
case class OldCourses(courseIds:List[Long])
| P1tt187/fhs-schedule-generator | app/models/fhs/pages/blaimport/MBLAFileUpload.scala | Scala | gpl-2.0 | 21,644 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package collection
package mutable
import java.util.ConcurrentModificationException
/**
* Utilities to check that mutations to a client that tracks
* its mutations have not occurred since a given point.
* [[Iterator `Iterator`]]s that perform this check automatically
* during iteration can be created by wrapping an `Iterator`
* in a [[MutationTracker.CheckedIterator `CheckedIterator`]],
* or by manually using the [[MutationTracker.checkMutations() `checkMutations`]]
* and [[MutationTracker.checkMutationsForIteration() `checkMutationsForIteration`]]
* methods.
*/
private object MutationTracker {
/**
* Checks whether or not the actual mutation count differs from
* the expected one, throwing an exception, if it does.
*
* @param expectedCount the expected mutation count
* @param actualCount the actual mutation count
* @param message the exception message in case of mutations
* @throws ConcurrentModificationException if the expected and actual
* mutation counts differ
*/
@throws[ConcurrentModificationException]
def checkMutations(expectedCount: Int, actualCount: Int, message: String): Unit = {
if (actualCount != expectedCount) throw new ConcurrentModificationException(message)
}
/**
* Checks whether or not the actual mutation count differs from
* the expected one, throwing an exception, if it does. This method
* produces an exception message saying that it was called because a
* backing collection was mutated during iteration.
*
* @param expectedCount the expected mutation count
* @param actualCount the actual mutation count
* @throws ConcurrentModificationException if the expected and actual
* mutation counts differ
*/
@throws[ConcurrentModificationException]
@inline def checkMutationsForIteration(expectedCount: Int, actualCount: Int): Unit =
checkMutations(expectedCount, actualCount, "mutation occurred during iteration")
/**
* An iterator wrapper that checks if the underlying collection has
* been mutated.
*
* @param underlying the underlying iterator
* @param mutationCount a by-name provider of the current mutation count
* @tparam A the type of the iterator's elements
*/
final class CheckedIterator[A](underlying: Iterator[A], mutationCount: => Int) extends AbstractIterator[A] {
private[this] val expectedCount = mutationCount
def hasNext: Boolean = {
checkMutationsForIteration(expectedCount, mutationCount)
underlying.hasNext
}
def next(): A = underlying.next()
}
}
| lrytz/scala | src/library/scala/collection/mutable/MutationTracker.scala | Scala | apache-2.0 | 2,944 |
package io.buoyant
package admin.names
import com.fasterxml.jackson.annotation._
import com.fasterxml.jackson.core.{io => _, _}
import com.fasterxml.jackson.databind._
import com.fasterxml.jackson.databind.annotation.JsonSerialize.Inclusion
import com.fasterxml.jackson.databind.module.SimpleModule
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper
import com.twitter.conversions.time._
import com.twitter.finagle.http._
import com.twitter.finagle.naming.NameInterpreter
import com.twitter.finagle.util.DefaultTimer
import com.twitter.finagle.{Path, Addr => FAddr, Address => FAddress, Status => _, TimeoutException => _, _}
import com.twitter.io.Buf
import com.twitter.util._
import io.buoyant.namer._
import java.net.InetSocketAddress
object DelegateApiHandler {
private def err(status: Status) = Future.value(Response(status))
private def err(status: Status, content: String) = {
val resp = Response(status)
resp.contentString = content
Future.value(resp)
}
case class Address(ip: String, port: Int, meta: Map[String, Any])
object Address {
def mk(addr: FAddress): Option[Address] = addr match {
case FAddress.Inet(isa, meta) => Some(Address(isa.getAddress.getHostAddress, isa.getPort, meta))
case _ => None
}
def toFinagle(addr: Address): FAddress =
FAddress.Inet(new InetSocketAddress(addr.ip, addr.port), addr.meta)
}
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type")
@JsonSubTypes(Array(
new JsonSubTypes.Type(value = classOf[Addr.Bound], name = "bound"),
new JsonSubTypes.Type(value = classOf[Addr.Failed], name = "failed"),
new JsonSubTypes.Type(value = classOf[Addr.Neg], name = "neg"),
new JsonSubTypes.Type(value = classOf[Addr.Pending], name = "pending")
))
sealed trait Addr
object Addr {
case class Pending() extends Addr
case class Neg() extends Addr
case class Failed(cause: String) extends Addr
case class Bound(addrs: Set[Address], meta: Map[String, Any]) extends Addr
def mk(fa: FAddr): Addr = fa match {
case FAddr.Bound(addrs, meta) => Bound(addrs.flatMap(Address.mk), meta)
case FAddr.Failed(e) => Failed(e.getMessage)
case FAddr.Neg => Neg()
case FAddr.Pending => Pending()
}
def toFinagle(addr: Addr): FAddr = addr match {
case Pending() => FAddr.Pending
case Neg() => FAddr.Neg
case Failed(cause) => FAddr.Failed(cause)
case Bound(addrs, meta) => FAddr.Bound(addrs.map(Address.toFinagle), meta)
}
}
case class Bound(addr: Addr, id: Path, path: Path)
object Bound {
def mk(path: Path, name: Name.Bound): Future[Bound] = {
val id = name.id match {
case id: Path => id
case _ => path
}
name.addr.changes.filter(_ != FAddr.Pending).toFuture.map { addr =>
Bound(Addr.mk(addr), id, name.path)
}
}
}
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type")
@JsonSubTypes(Array(
new JsonSubTypes.Type(value = classOf[JsonDelegateTree.Empty], name = "empty"),
new JsonSubTypes.Type(value = classOf[JsonDelegateTree.Fail], name = "fail"),
new JsonSubTypes.Type(value = classOf[JsonDelegateTree.Neg], name = "neg"),
new JsonSubTypes.Type(value = classOf[JsonDelegateTree.Exception], name = "exception"),
new JsonSubTypes.Type(value = classOf[JsonDelegateTree.Delegate], name = "delegate"),
new JsonSubTypes.Type(value = classOf[JsonDelegateTree.Leaf], name = "leaf"),
new JsonSubTypes.Type(value = classOf[JsonDelegateTree.Alt], name = "alt"),
new JsonSubTypes.Type(value = classOf[JsonDelegateTree.Union], name = "union"),
new JsonSubTypes.Type(value = classOf[JsonDelegateTree.Transformation], name = "transformation")
))
sealed trait JsonDelegateTree
object JsonDelegateTree {
case class Empty(path: Path, dentry: Option[Dentry]) extends JsonDelegateTree
case class Fail(path: String, dentry: Option[Dentry]) extends JsonDelegateTree
case class Neg(path: String, dentry: Option[Dentry]) extends JsonDelegateTree
case class Exception(path: Path, dentry: Option[Dentry], message: String) extends JsonDelegateTree
case class Delegate(path: Path, dentry: Option[Dentry], delegate: JsonDelegateTree) extends JsonDelegateTree
case class Leaf(path: Path, dentry: Option[Dentry], bound: Bound) extends JsonDelegateTree
case class Alt(path: Path, dentry: Option[Dentry], alt: Seq[JsonDelegateTree]) extends JsonDelegateTree
case class Union(path: Path, dentry: Option[Dentry], union: Seq[Weighted]) extends JsonDelegateTree
case class Weighted(weight: Double, tree: JsonDelegateTree)
case class Transformation(path: Path, name: String, bound: Bound, tree: JsonDelegateTree) extends JsonDelegateTree
private[this] val fail = Path.read("/$/fail")
def mk(d: DelegateTree[Name.Bound]): Future[JsonDelegateTree] = d match {
case DelegateTree.Exception(p, d, e) =>
Future.value(JsonDelegateTree.Exception(p, mkDentry(d), e.getMessage))
case DelegateTree.Empty(p, d) =>
Future.value(JsonDelegateTree.Empty(p, mkDentry(d)))
case DelegateTree.Fail(p, d) =>
val path = if (p == null) null
else if (p.startsWith(fail)) p.show
else "!"
Future.value(JsonDelegateTree.Fail(path, mkDentry(d)))
case DelegateTree.Neg(p, d) =>
val path = if (p == null) null
else if (p.isEmpty) "~"
else p.show
Future.value(JsonDelegateTree.Neg(path, mkDentry(d)))
case DelegateTree.Delegate(p, d, t) =>
mk(t).map(JsonDelegateTree.Delegate(p, mkDentry(d), _))
case DelegateTree.Alt(p, d, ts@_*) =>
Future.collect(ts.map(mk)).map(JsonDelegateTree.Alt(p, mkDentry(d), _))
case DelegateTree.Union(p, d, ts@_*) =>
val weights = ts.map { case DelegateTree.Weighted(w, t) => mk(t).map(Weighted(w, _)) }
Future.collect(weights).map(JsonDelegateTree.Union(p, mkDentry(d), _))
case DelegateTree.Leaf(p, d, b) =>
Bound.mk(p, b).map(JsonDelegateTree.Leaf(p, mkDentry(d), _))
case DelegateTree.Transformation(p, n, b, t) =>
mk(t).join(Bound.mk(p, b)).map {
case (tree, bound) =>
JsonDelegateTree.Transformation(p, n, bound, tree)
}
}
def parseBound(bound: Bound): Name.Bound = Name.Bound(Var(Addr.toFinagle(bound.addr)), bound.id, bound.path)
def toDelegateTree(jdt: JsonDelegateTree): DelegateTree[Name.Bound] = jdt match {
case Empty(p, d) =>
DelegateTree.Empty(p, d.getOrElse(Dentry.nop))
case Fail(p, d) =>
DelegateTree.Fail(Path.read(p), d.getOrElse(Dentry.nop))
case Neg(p, d) =>
DelegateTree.Neg(Path.read(p), d.getOrElse(Dentry.nop))
case Exception(p, d, msg) =>
DelegateTree.Exception(p, d.getOrElse(Dentry.nop), new java.lang.Exception(msg))
case Delegate(p, d, delegate) =>
DelegateTree.Delegate(p, d.getOrElse(Dentry.nop), toDelegateTree(delegate))
case Leaf(p, d, bound) =>
DelegateTree.Leaf(p, d.getOrElse(Dentry.nop), parseBound(bound))
case Alt(p, d, alts) =>
DelegateTree.Alt(p, d.getOrElse(Dentry.nop), alts.map(toDelegateTree): _*)
case Union(p, d, weighteds) =>
val delegateTreeWeighteds = weighteds.map {
case Weighted(w, tree) =>
DelegateTree.Weighted(w, toDelegateTree(tree))
}
DelegateTree.Union(p, d.getOrElse(Dentry.nop), delegateTreeWeighteds: _*)
case Transformation(p, name, bound, tree) =>
DelegateTree.Transformation(p, name, parseBound(bound), toDelegateTree(tree))
}
def toNameTree(d: JsonDelegateTree): NameTree[Name.Bound] = d match {
case Empty(_, _) => NameTree.Empty
case Fail(_, _) => NameTree.Fail
case Neg(_, _) => NameTree.Neg
case Exception(_, _, msg) => throw new IllegalStateException(msg)
case Delegate(_, _, delegate) => toNameTree(delegate)
case Leaf(_, _, bound) =>
NameTree.Leaf(Name.Bound(Var(Addr.toFinagle(bound.addr)), bound.id, bound.path))
case Alt(_, _, alts) => NameTree.Alt(alts.map(toNameTree): _*)
case Union(_, _, weighteds) =>
val nameTreeWeighteds = weighteds.map {
case Weighted(w, tree) =>
NameTree.Weighted(w, toNameTree(tree))
}
NameTree.Union(nameTreeWeighteds: _*)
case Transformation(_, _, _, tree) => toNameTree(tree)
}
def mkDentry(d: Dentry): Option[Dentry] = Some(d).filterNot(Dentry.equiv.equiv(Dentry.nop, _))
}
object Codec {
private[this] def mkModule() = {
val module = new SimpleModule
module.addSerializer(classOf[Path], new JsonSerializer[Path] {
override def serialize(path: Path, json: JsonGenerator, p: SerializerProvider) {
json.writeString(path.show)
}
})
module.addDeserializer(classOf[Path], new JsonDeserializer[Path] {
override def deserialize(json: JsonParser, ctx: DeserializationContext) =
Path.read(json.getValueAsString)
})
module.addSerializer(classOf[Dentry.Prefix], new JsonSerializer[Dentry.Prefix] {
override def serialize(pfx: Dentry.Prefix, json: JsonGenerator, p: SerializerProvider) {
json.writeString(pfx.show)
}
})
module.addDeserializer(classOf[Dentry.Prefix], new JsonDeserializer[Dentry.Prefix] {
override def deserialize(json: JsonParser, ctx: DeserializationContext) =
Dentry.Prefix.read(json.getValueAsString)
})
module.addSerializer(classOf[NameTree[Path]], new JsonSerializer[NameTree[Path]] {
override def serialize(
nameTree: NameTree[Path],
json: JsonGenerator,
p: SerializerProvider
) {
json.writeString(nameTree.show)
}
})
module.addDeserializer(classOf[NameTree[Path]], new JsonDeserializer[NameTree[Path]] {
override def deserialize(json: JsonParser, ctx: DeserializationContext) =
NameTree.read(json.getValueAsString)
})
module
}
val mapper = new ObjectMapper with ScalaObjectMapper
mapper.registerModule(DefaultScalaModule)
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
mapper.setSerializationInclusion(JsonInclude.Include.NON_ABSENT)
mapper.registerModule(mkModule())
def writeStr[T](t: T): String = mapper.writeValueAsString(t)
def writeBuf[T](t: T): Buf = Buf.ByteArray.Owned(mapper.writeValueAsBytes(t))
def readBuf[T: Manifest](buf: Buf): Try[T] = {
val Buf.ByteBuffer.Owned(bb) = Buf.ByteBuffer.coerce(buf)
Try { mapper.readValue[T](bb.array) }
}
}
private implicit val timer = DefaultTimer.twitter
def getDelegateRsp(dtab: String, path: String, delegator: Delegator): Future[Response] = {
val dtabTry = if (dtab == null) Return(Dtab.empty) else Try(Dtab.read(dtab))
val pathTry = Try(Path.read(path))
(dtabTry, pathTry) match {
case (Return(d), Return(p)) =>
delegator.delegate(d, p)
.toFuture
.flatMap(JsonDelegateTree.mk).map { tree =>
val rsp = Response()
rsp.content = Codec.writeBuf(tree)
rsp.contentType = MediaType.Json
rsp
}.within(2.seconds).rescue {
case e: TimeoutException =>
err(Status.ServiceUnavailable, "Request timed out.")
}
case (Throw(e), _) =>
err(Status.BadRequest, s"Invalid dtab: ${e.getMessage}")
case (_, Throw(e)) =>
err(Status.BadRequest, s"Invalid path: ${e.getMessage}")
}
}
case class DelegationRequest(
namespace: Option[String],
dtab: Option[String],
path: Option[String]
)
sealed trait DelegationRequestCodec {
def contentTypes: Set[String]
def read(buf: Buf): Try[DelegationRequest]
}
object DelegationRequestCodec {
object JsonCodec extends DelegationRequestCodec {
val contentTypes = Set(MediaType.Json)
def read(buf: Buf): Try[DelegationRequest] = Codec.readBuf[DelegationRequest](buf)
}
def byContentType(ct: String): Option[DelegationRequestCodec] =
if (JsonCodec.contentTypes(ct)) Some(DelegationRequestCodec.JsonCodec) else None
}
}
class DelegateApiHandler(
interpreters: String => NameInterpreter,
namers: Seq[(Path, Namer)] = Nil
) extends Service[Request, Response] {
import DelegateApiHandler._
def apply(req: Request): Future[Response] = req.method match {
case Method.Post =>
req.contentType.flatMap(DelegationRequestCodec.byContentType) match {
case Some(codec) =>
codec.read(req.content) match {
case Return(DelegationRequest(ns, Some(dtab), Some(path))) => getResponse(ns, dtab, path)
case _ => err(Status.BadRequest, s"Malformed delegation request: ${req.getContentString}")
}
case _ => err(Status.UnsupportedMediaType)
}
case Method.Get =>
getResponse(req.params.get("namespace"), req.getParam("dtab"), req.getParam("path"))
case _ => err(Status.MethodNotAllowed)
}
private def getResponse(ns: Option[String], dtab: String, path: String) = ns match {
case Some(namespace) =>
interpreters(namespace) match {
case delegator: Delegator =>
getDelegateRsp(dtab, path, delegator)
case _ =>
err(Status.NotImplemented, s"Name Interpreter for $namespace cannot show delegations")
}
case None =>
getDelegateRsp(dtab, path, ConfiguredNamersInterpreter(namers))
}
}
| pawelprazak/linkerd | admin/src/main/scala/io/buoyant/admin/names/DelegateApiHandler.scala | Scala | apache-2.0 | 13,692 |
// code-examples/AdvOOP/objects/button.scala
package objects
import ui3.Clickable
class Button(val label: String) extends Widget with Clickable {
def click() = {
// Logic to give the appearance of clicking a button...
}
def draw() = {
// Logic to draw the button on the display, web page, etc.
}
override def toString() = "(button: label="+label+", "+super.toString()+")"
}
object Button {
def unapply(button: Button) = Some(button.label)
} | foomango/scalaex | minimalscala/src/main/scala/AdvOOP/objects/button.scala | Scala | mit | 472 |
package breeze.collection.mutable
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import org.scalacheck._
import org.scalatest._;
import org.scalatest.junit._;
import org.scalatest.prop._;
import org.junit.runner.RunWith
@RunWith(classOf[JUnitRunner])
class BeamTest extends FunSuite with Checkers {
test("creation doesn't go over size") {
check( Prop.forAll{ (size: Int, cl: List[Int]) => size <= 0 || {
val beam = new Beam[Int](size.abs,cl:_*);
beam.size <= size.abs && (cl.size < size.abs || beam.size == size.abs)
};
})
}
test("addition doesn't go over size") {
check( Prop.forAll{ (size: Int, cl: List[Int]) => (size <= 0) || {
val beam = new Beam[Int](size.abs);
beam ++= cl;
beam.size <= size.abs && (cl.size < size.abs || beam.size == size.abs);
}
})
}
}
| wavelets/breeze | src/test/scala/breeze/collection/mutable/BeamTest.scala | Scala | apache-2.0 | 1,377 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.NotSerializableException
import java.nio.ByteBuffer
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.immutable.Map
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.math.max
import scala.util.control.NonFatal
import org.apache.spark._
import org.apache.spark.TaskState.TaskState
import org.apache.spark.internal.{config, Logging}
import org.apache.spark.internal.config._
import org.apache.spark.resource.ResourceInformation
import org.apache.spark.scheduler.SchedulingMode._
import org.apache.spark.util.{AccumulatorV2, Clock, LongAccumulator, SystemClock, Utils}
import org.apache.spark.util.collection.MedianHeap
/**
* Schedules the tasks within a single TaskSet in the TaskSchedulerImpl. This class keeps track of
* each task, retries tasks if they fail (up to a limited number of times), and
* handles locality-aware scheduling for this TaskSet via delay scheduling. The main interfaces
* to it are resourceOffer, which asks the TaskSet whether it wants to run a task on one node,
* and handleSuccessfulTask/handleFailedTask, which tells it that one of its tasks changed state
* (e.g. finished/failed).
*
* THREADING: This class is designed to only be called from code with a lock on the
* TaskScheduler (e.g. its event handlers). It should not be called from other threads.
*
* @param sched the TaskSchedulerImpl associated with the TaskSetManager
* @param taskSet the TaskSet to manage scheduling for
* @param maxTaskFailures if any particular task fails this number of times, the entire
* task set will be aborted
*/
private[spark] class TaskSetManager(
sched: TaskSchedulerImpl,
val taskSet: TaskSet,
val maxTaskFailures: Int,
blacklistTracker: Option[BlacklistTracker] = None,
clock: Clock = new SystemClock()) extends Schedulable with Logging {
private val conf = sched.sc.conf
// SPARK-21563 make a copy of the jars/files so they are consistent across the TaskSet
private val addedJars = HashMap[String, Long](sched.sc.addedJars.toSeq: _*)
private val addedFiles = HashMap[String, Long](sched.sc.addedFiles.toSeq: _*)
val maxResultSize = conf.get(config.MAX_RESULT_SIZE)
// Serializer for closures and tasks.
val env = SparkEnv.get
val ser = env.closureSerializer.newInstance()
val tasks = taskSet.tasks
private[scheduler] val partitionToIndex = tasks.zipWithIndex
.map { case (t, idx) => t.partitionId -> idx }.toMap
val numTasks = tasks.length
val copiesRunning = new Array[Int](numTasks)
val speculationEnabled = conf.get(SPECULATION_ENABLED)
// Quantile of tasks at which to start speculation
val speculationQuantile = conf.get(SPECULATION_QUANTILE)
val speculationMultiplier = conf.get(SPECULATION_MULTIPLIER)
val minFinishedForSpeculation = math.max((speculationQuantile * numTasks).floor.toInt, 1)
// User provided threshold for speculation regardless of whether the quantile has been reached
val speculationTaskDurationThresOpt = conf.get(SPECULATION_TASK_DURATION_THRESHOLD)
// SPARK-29976: Only when the total number of tasks in the stage is less than or equal to the
// number of slots on a single executor, would the task manager speculative run the tasks if
// their duration is longer than the given threshold. In this way, we wouldn't speculate too
// aggressively but still handle basic cases.
// SPARK-30417: #cores per executor might not be set in spark conf for standalone mode, then
// the value of the conf would 1 by default. However, the executor would use all the cores on
// the worker. Therefore, CPUS_PER_TASK is okay to be greater than 1 without setting #cores.
// To handle this case, we assume the minimum number of slots is 1.
// TODO: use the actual number of slots for standalone mode.
val speculationTasksLessEqToSlots =
numTasks <= Math.max(conf.get(EXECUTOR_CORES) / sched.CPUS_PER_TASK, 1)
// For each task, tracks whether a copy of the task has succeeded. A task will also be
// marked as "succeeded" if it failed with a fetch failure, in which case it should not
// be re-run because the missing map data needs to be regenerated first.
val successful = new Array[Boolean](numTasks)
private val numFailures = new Array[Int](numTasks)
// Add the tid of task into this HashSet when the task is killed by other attempt tasks.
// This happened while we set the `spark.speculation` to true. The task killed by others
// should not resubmit while executor lost.
private val killedByOtherAttempt = new HashSet[Long]
val taskAttempts = Array.fill[List[TaskInfo]](numTasks)(Nil)
private[scheduler] var tasksSuccessful = 0
val weight = 1
val minShare = 0
var priority = taskSet.priority
var stageId = taskSet.stageId
val name = "TaskSet_" + taskSet.id
var parent: Pool = null
private var totalResultSize = 0L
private var calculatedTasks = 0
private[scheduler] val taskSetBlacklistHelperOpt: Option[TaskSetBlacklist] = {
blacklistTracker.map { _ =>
new TaskSetBlacklist(sched.sc.listenerBus, conf, stageId, taskSet.stageAttemptId, clock)
}
}
private[scheduler] val runningTasksSet = new HashSet[Long]
override def runningTasks: Int = runningTasksSet.size
def someAttemptSucceeded(tid: Long): Boolean = {
successful(taskInfos(tid).index)
}
// True once no more tasks should be launched for this task set manager. TaskSetManagers enter
// the zombie state once at least one attempt of each task has completed successfully, or if the
// task set is aborted (for example, because it was killed). TaskSetManagers remain in the zombie
// state until all tasks have finished running; we keep TaskSetManagers that are in the zombie
// state in order to continue to track and account for the running tasks.
// TODO: We should kill any running task attempts when the task set manager becomes a zombie.
private[scheduler] var isZombie = false
// Whether the taskSet run tasks from a barrier stage. Spark must launch all the tasks at the
// same time for a barrier stage.
private[scheduler] def isBarrier = taskSet.tasks.nonEmpty && taskSet.tasks(0).isBarrier
// Store tasks waiting to be scheduled by locality preferences
private[scheduler] val pendingTasks = new PendingTasksByLocality()
// Tasks that can be speculated. Since these will be a small fraction of total
// tasks, we'll just hold them in a HashSet. The HashSet here ensures that we do not add
// duplicate speculatable tasks.
private[scheduler] val speculatableTasks = new HashSet[Int]
// Store speculatable tasks by locality preferences
private[scheduler] val pendingSpeculatableTasks = new PendingTasksByLocality()
// Task index, start and finish time for each task attempt (indexed by task ID)
private[scheduler] val taskInfos = new HashMap[Long, TaskInfo]
// Use a MedianHeap to record durations of successful tasks so we know when to launch
// speculative tasks. This is only used when speculation is enabled, to avoid the overhead
// of inserting into the heap when the heap won't be used.
val successfulTaskDurations = new MedianHeap()
// How frequently to reprint duplicate exceptions in full, in milliseconds
val EXCEPTION_PRINT_INTERVAL =
conf.getLong("spark.logging.exceptionPrintInterval", 10000)
// Map of recent exceptions (identified by string representation and top stack frame) to
// duplicate count (how many times the same exception has appeared) and time the full exception
// was printed. This should ideally be an LRU map that can drop old exceptions automatically.
private val recentExceptions = HashMap[String, (Int, Long)]()
// Figure out the current map output tracker epoch and set it on all tasks
val epoch = sched.mapOutputTracker.getEpoch
logDebug("Epoch for " + taskSet + ": " + epoch)
for (t <- tasks) {
t.epoch = epoch
}
// Add all our tasks to the pending lists. We do this in reverse order
// of task index so that tasks with low indices get launched first.
addPendingTasks()
private def addPendingTasks(): Unit = {
val (_, duration) = Utils.timeTakenMs {
for (i <- (0 until numTasks).reverse) {
addPendingTask(i, resolveRacks = false)
}
// Resolve the rack for each host. This can be slow, so de-dupe the list of hosts,
// and assign the rack to all relevant task indices.
val (hosts, indicesForHosts) = pendingTasks.forHost.toSeq.unzip
val racks = sched.getRacksForHosts(hosts)
racks.zip(indicesForHosts).foreach {
case (Some(rack), indices) =>
pendingTasks.forRack.getOrElseUpdate(rack, new ArrayBuffer) ++= indices
case (None, _) => // no rack, nothing to do
}
}
logDebug(s"Adding pending tasks took $duration ms")
}
/**
* Track the set of locality levels which are valid given the tasks locality preferences and
* the set of currently available executors. This is updated as executors are added and removed.
* This allows a performance optimization, of skipping levels that aren't relevant (eg., skip
* PROCESS_LOCAL if no tasks could be run PROCESS_LOCAL for the current set of executors).
*/
private[scheduler] var myLocalityLevels = computeValidLocalityLevels()
// Time to wait at each level
private[scheduler] var localityWaits = myLocalityLevels.map(getLocalityWait)
// Delay scheduling variables: we keep track of our current locality level and the time we
// last launched a task at that level, and move up a level when localityWaits[curLevel] expires.
// We then move down if we manage to launch a "more local" task.
private var currentLocalityIndex = 0 // Index of our current locality level in validLocalityLevels
private var lastLaunchTime = clock.getTimeMillis() // Time we last launched a task at this level
override def schedulableQueue: ConcurrentLinkedQueue[Schedulable] = null
override def schedulingMode: SchedulingMode = SchedulingMode.NONE
private[scheduler] var emittedTaskSizeWarning = false
/** Add a task to all the pending-task lists that it should be on. */
private[spark] def addPendingTask(
index: Int,
resolveRacks: Boolean = true,
speculatable: Boolean = false): Unit = {
// A zombie TaskSetManager may reach here while handling failed task.
if (isZombie) return
val pendingTaskSetToAddTo = if (speculatable) pendingSpeculatableTasks else pendingTasks
for (loc <- tasks(index).preferredLocations) {
loc match {
case e: ExecutorCacheTaskLocation =>
pendingTaskSetToAddTo.forExecutor.getOrElseUpdate(e.executorId, new ArrayBuffer) += index
case e: HDFSCacheTaskLocation =>
val exe = sched.getExecutorsAliveOnHost(loc.host)
exe match {
case Some(set) =>
for (e <- set) {
pendingTaskSetToAddTo.forExecutor.getOrElseUpdate(e, new ArrayBuffer) += index
}
logInfo(s"Pending task $index has a cached location at ${e.host} " +
", where there are executors " + set.mkString(","))
case None => logDebug(s"Pending task $index has a cached location at ${e.host} " +
", but there are no executors alive there.")
}
case _ =>
}
pendingTaskSetToAddTo.forHost.getOrElseUpdate(loc.host, new ArrayBuffer) += index
if (resolveRacks) {
sched.getRackForHost(loc.host).foreach { rack =>
pendingTaskSetToAddTo.forRack.getOrElseUpdate(rack, new ArrayBuffer) += index
}
}
}
if (tasks(index).preferredLocations == Nil) {
pendingTaskSetToAddTo.noPrefs += index
}
pendingTaskSetToAddTo.all += index
}
/**
* Dequeue a pending task from the given list and return its index.
* Return None if the list is empty.
* This method also cleans up any tasks in the list that have already
* been launched, since we want that to happen lazily.
*/
private def dequeueTaskFromList(
execId: String,
host: String,
list: ArrayBuffer[Int],
speculative: Boolean = false): Option[Int] = {
var indexOffset = list.size
while (indexOffset > 0) {
indexOffset -= 1
val index = list(indexOffset)
if (!isTaskBlacklistedOnExecOrNode(index, execId, host) &&
!(speculative && hasAttemptOnHost(index, host))) {
// This should almost always be list.trimEnd(1) to remove tail
list.remove(indexOffset)
// Speculatable task should only be launched when at most one copy of the
// original task is running
if (!successful(index)) {
if (copiesRunning(index) == 0) {
return Some(index)
} else if (speculative && copiesRunning(index) == 1) {
return Some(index)
}
}
}
}
None
}
/** Check whether a task once ran an attempt on a given host */
private def hasAttemptOnHost(taskIndex: Int, host: String): Boolean = {
taskAttempts(taskIndex).exists(_.host == host)
}
private def isTaskBlacklistedOnExecOrNode(index: Int, execId: String, host: String): Boolean = {
taskSetBlacklistHelperOpt.exists { blacklist =>
blacklist.isNodeBlacklistedForTask(host, index) ||
blacklist.isExecutorBlacklistedForTask(execId, index)
}
}
/**
* Dequeue a pending task for a given node and return its index and locality level.
* Only search for tasks matching the given locality constraint.
*
* @return An option containing (task index within the task set, locality, is speculative?)
*/
private def dequeueTask(
execId: String,
host: String,
maxLocality: TaskLocality.Value): Option[(Int, TaskLocality.Value, Boolean)] = {
// Tries to schedule a regular task first; if it returns None, then schedules
// a speculative task
dequeueTaskHelper(execId, host, maxLocality, false).orElse(
dequeueTaskHelper(execId, host, maxLocality, true))
}
protected def dequeueTaskHelper(
execId: String,
host: String,
maxLocality: TaskLocality.Value,
speculative: Boolean): Option[(Int, TaskLocality.Value, Boolean)] = {
if (speculative && speculatableTasks.isEmpty) {
return None
}
val pendingTaskSetToUse = if (speculative) pendingSpeculatableTasks else pendingTasks
def dequeue(list: ArrayBuffer[Int]): Option[Int] = {
val task = dequeueTaskFromList(execId, host, list, speculative)
if (speculative && task.isDefined) {
speculatableTasks -= task.get
}
task
}
dequeue(pendingTaskSetToUse.forExecutor.getOrElse(execId, ArrayBuffer())).foreach { index =>
return Some((index, TaskLocality.PROCESS_LOCAL, speculative))
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.NODE_LOCAL)) {
dequeue(pendingTaskSetToUse.forHost.getOrElse(host, ArrayBuffer())).foreach { index =>
return Some((index, TaskLocality.NODE_LOCAL, speculative))
}
}
// Look for noPref tasks after NODE_LOCAL for minimize cross-rack traffic
if (TaskLocality.isAllowed(maxLocality, TaskLocality.NO_PREF)) {
dequeue(pendingTaskSetToUse.noPrefs).foreach { index =>
return Some((index, TaskLocality.PROCESS_LOCAL, speculative))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.RACK_LOCAL)) {
for {
rack <- sched.getRackForHost(host)
index <- dequeue(pendingTaskSetToUse.forRack.getOrElse(rack, ArrayBuffer()))
} {
return Some((index, TaskLocality.RACK_LOCAL, speculative))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.ANY)) {
dequeue(pendingTaskSetToUse.all).foreach { index =>
return Some((index, TaskLocality.ANY, speculative))
}
}
None
}
/**
* Respond to an offer of a single executor from the scheduler by finding a task
*
* NOTE: this function is either called with a maxLocality which
* would be adjusted by delay scheduling algorithm or it will be with a special
* NO_PREF locality which will be not modified
*
* @param execId the executor Id of the offered resource
* @param host the host Id of the offered resource
* @param maxLocality the maximum locality we want to schedule the tasks at
*/
@throws[TaskNotSerializableException]
def resourceOffer(
execId: String,
host: String,
maxLocality: TaskLocality.TaskLocality,
availableResources: Map[String, Seq[String]] = Map.empty)
: Option[TaskDescription] =
{
val offerBlacklisted = taskSetBlacklistHelperOpt.exists { blacklist =>
blacklist.isNodeBlacklistedForTaskSet(host) ||
blacklist.isExecutorBlacklistedForTaskSet(execId)
}
if (!isZombie && !offerBlacklisted) {
val curTime = clock.getTimeMillis()
var allowedLocality = maxLocality
if (maxLocality != TaskLocality.NO_PREF) {
allowedLocality = getAllowedLocalityLevel(curTime)
if (allowedLocality > maxLocality) {
// We're not allowed to search for farther-away tasks
allowedLocality = maxLocality
}
}
dequeueTask(execId, host, allowedLocality).map { case ((index, taskLocality, speculative)) =>
// Found a task; do some bookkeeping and return a task description
val task = tasks(index)
val taskId = sched.newTaskId()
// Do various bookkeeping
copiesRunning(index) += 1
val attemptNum = taskAttempts(index).size
val info = new TaskInfo(taskId, index, attemptNum, curTime,
execId, host, taskLocality, speculative)
taskInfos(taskId) = info
taskAttempts(index) = info :: taskAttempts(index)
// Update our locality level for delay scheduling
// NO_PREF will not affect the variables related to delay scheduling
if (maxLocality != TaskLocality.NO_PREF) {
currentLocalityIndex = getLocalityIndex(taskLocality)
lastLaunchTime = curTime
}
// Serialize and return the task
val serializedTask: ByteBuffer = try {
ser.serialize(task)
} catch {
// If the task cannot be serialized, then there's no point to re-attempt the task,
// as it will always fail. So just abort the whole task-set.
case NonFatal(e) =>
val msg = s"Failed to serialize task $taskId, not attempting to retry it."
logError(msg, e)
abort(s"$msg Exception during serialization: $e")
throw new TaskNotSerializableException(e)
}
if (serializedTask.limit() > TaskSetManager.TASK_SIZE_TO_WARN_KIB * 1024 &&
!emittedTaskSizeWarning) {
emittedTaskSizeWarning = true
logWarning(s"Stage ${task.stageId} contains a task of very large size " +
s"(${serializedTask.limit() / 1024} KiB). The maximum recommended task size is " +
s"${TaskSetManager.TASK_SIZE_TO_WARN_KIB} KiB.")
}
addRunningTask(taskId)
// We used to log the time it takes to serialize the task, but task size is already
// a good proxy to task serialization time.
// val timeTaken = clock.getTime() - startTime
val taskName = s"task ${info.id} in stage ${taskSet.id}"
logInfo(s"Starting $taskName (TID $taskId, $host, executor ${info.executorId}, " +
s"partition ${task.partitionId}, $taskLocality, ${serializedTask.limit()} bytes)")
val extraResources = sched.resourcesReqsPerTask.map { taskReq =>
val rName = taskReq.resourceName
val count = taskReq.amount
val rAddresses = availableResources.getOrElse(rName, Seq.empty)
assert(rAddresses.size >= count, s"Required $count $rName addresses, but only " +
s"${rAddresses.size} available.")
// We'll drop the allocated addresses later inside TaskSchedulerImpl.
val allocatedAddresses = rAddresses.take(count)
(rName, new ResourceInformation(rName, allocatedAddresses.toArray))
}.toMap
sched.dagScheduler.taskStarted(task, info)
new TaskDescription(
taskId,
attemptNum,
execId,
taskName,
index,
task.partitionId,
addedFiles,
addedJars,
task.localProperties,
extraResources,
serializedTask)
}
} else {
None
}
}
private def maybeFinishTaskSet(): Unit = {
if (isZombie && runningTasks == 0) {
sched.taskSetFinished(this)
if (tasksSuccessful == numTasks) {
blacklistTracker.foreach(_.updateBlacklistForSuccessfulTaskSet(
taskSet.stageId,
taskSet.stageAttemptId,
taskSetBlacklistHelperOpt.get.execToFailures))
}
}
}
/**
* Get the level we can launch tasks according to delay scheduling, based on current wait time.
*/
private def getAllowedLocalityLevel(curTime: Long): TaskLocality.TaskLocality = {
// Remove the scheduled or finished tasks lazily
def tasksNeedToBeScheduledFrom(pendingTaskIds: ArrayBuffer[Int]): Boolean = {
var indexOffset = pendingTaskIds.size
while (indexOffset > 0) {
indexOffset -= 1
val index = pendingTaskIds(indexOffset)
if (copiesRunning(index) == 0 && !successful(index)) {
return true
} else {
pendingTaskIds.remove(indexOffset)
}
}
false
}
// Walk through the list of tasks that can be scheduled at each location and returns true
// if there are any tasks that still need to be scheduled. Lazily cleans up tasks that have
// already been scheduled.
def moreTasksToRunIn(pendingTasks: HashMap[String, ArrayBuffer[Int]]): Boolean = {
val emptyKeys = new ArrayBuffer[String]
val hasTasks = pendingTasks.exists {
case (id: String, tasks: ArrayBuffer[Int]) =>
if (tasksNeedToBeScheduledFrom(tasks)) {
true
} else {
emptyKeys += id
false
}
}
// The key could be executorId, host or rackId
emptyKeys.foreach(id => pendingTasks.remove(id))
hasTasks
}
while (currentLocalityIndex < myLocalityLevels.length - 1) {
val moreTasks = myLocalityLevels(currentLocalityIndex) match {
case TaskLocality.PROCESS_LOCAL => moreTasksToRunIn(pendingTasks.forExecutor)
case TaskLocality.NODE_LOCAL => moreTasksToRunIn(pendingTasks.forHost)
case TaskLocality.NO_PREF => pendingTasks.noPrefs.nonEmpty
case TaskLocality.RACK_LOCAL => moreTasksToRunIn(pendingTasks.forRack)
}
if (!moreTasks) {
// This is a performance optimization: if there are no more tasks that can
// be scheduled at a particular locality level, there is no point in waiting
// for the locality wait timeout (SPARK-4939).
lastLaunchTime = curTime
logDebug(s"No tasks for locality level ${myLocalityLevels(currentLocalityIndex)}, " +
s"so moving to locality level ${myLocalityLevels(currentLocalityIndex + 1)}")
currentLocalityIndex += 1
} else if (curTime - lastLaunchTime >= localityWaits(currentLocalityIndex)) {
// Jump to the next locality level, and reset lastLaunchTime so that the next locality
// wait timer doesn't immediately expire
lastLaunchTime += localityWaits(currentLocalityIndex)
logDebug(s"Moving to ${myLocalityLevels(currentLocalityIndex + 1)} after waiting for " +
s"${localityWaits(currentLocalityIndex)}ms")
currentLocalityIndex += 1
} else {
return myLocalityLevels(currentLocalityIndex)
}
}
myLocalityLevels(currentLocalityIndex)
}
/**
* Find the index in myLocalityLevels for a given locality. This is also designed to work with
* localities that are not in myLocalityLevels (in case we somehow get those) by returning the
* next-biggest level we have. Uses the fact that the last value in myLocalityLevels is ANY.
*/
def getLocalityIndex(locality: TaskLocality.TaskLocality): Int = {
var index = 0
while (locality > myLocalityLevels(index)) {
index += 1
}
index
}
/**
* Check whether the given task set has been blacklisted to the point that it can't run anywhere.
*
* It is possible that this taskset has become impossible to schedule *anywhere* due to the
* blacklist. The most common scenario would be if there are fewer executors than
* spark.task.maxFailures. We need to detect this so we can avoid the job from being hung.
* We try to acquire new executor/s by killing an existing idle blacklisted executor.
*
* There's a tradeoff here: we could make sure all tasks in the task set are schedulable, but that
* would add extra time to each iteration of the scheduling loop. Here, we take the approach of
* making sure at least one of the unscheduled tasks is schedulable. This means we may not detect
* the hang as quickly as we could have, but we'll always detect the hang eventually, and the
* method is faster in the typical case. In the worst case, this method can take
* O(maxTaskFailures + numTasks) time, but it will be faster when there haven't been any task
* failures (this is because the method picks one unscheduled task, and then iterates through each
* executor until it finds one that the task isn't blacklisted on).
*/
private[scheduler] def getCompletelyBlacklistedTaskIfAny(
hostToExecutors: HashMap[String, HashSet[String]]): Option[Int] = {
taskSetBlacklistHelperOpt.flatMap { taskSetBlacklist =>
val appBlacklist = blacklistTracker.get
// Only look for unschedulable tasks when at least one executor has registered. Otherwise,
// task sets will be (unnecessarily) aborted in cases when no executors have registered yet.
if (hostToExecutors.nonEmpty) {
// find any task that needs to be scheduled
val pendingTask: Option[Int] = {
// usually this will just take the last pending task, but because of the lazy removal
// from each list, we may need to go deeper in the list. We poll from the end because
// failed tasks are put back at the end of allPendingTasks, so we're more likely to find
// an unschedulable task this way.
val indexOffset = pendingTasks.all.lastIndexWhere { indexInTaskSet =>
copiesRunning(indexInTaskSet) == 0 && !successful(indexInTaskSet)
}
if (indexOffset == -1) {
None
} else {
Some(pendingTasks.all(indexOffset))
}
}
pendingTask.find { indexInTaskSet =>
// try to find some executor this task can run on. Its possible that some *other*
// task isn't schedulable anywhere, but we will discover that in some later call,
// when that unschedulable task is the last task remaining.
hostToExecutors.forall { case (host, execsOnHost) =>
// Check if the task can run on the node
val nodeBlacklisted =
appBlacklist.isNodeBlacklisted(host) ||
taskSetBlacklist.isNodeBlacklistedForTaskSet(host) ||
taskSetBlacklist.isNodeBlacklistedForTask(host, indexInTaskSet)
if (nodeBlacklisted) {
true
} else {
// Check if the task can run on any of the executors
execsOnHost.forall { exec =>
appBlacklist.isExecutorBlacklisted(exec) ||
taskSetBlacklist.isExecutorBlacklistedForTaskSet(exec) ||
taskSetBlacklist.isExecutorBlacklistedForTask(exec, indexInTaskSet)
}
}
}
}
} else {
None
}
}
}
private[scheduler] def abortSinceCompletelyBlacklisted(indexInTaskSet: Int): Unit = {
taskSetBlacklistHelperOpt.foreach { taskSetBlacklist =>
val partition = tasks(indexInTaskSet).partitionId
abort(s"""
|Aborting $taskSet because task $indexInTaskSet (partition $partition)
|cannot run anywhere due to node and executor blacklist.
|Most recent failure:
|${taskSetBlacklist.getLatestFailureReason}
|
|Blacklisting behavior can be configured via spark.blacklist.*.
|""".stripMargin)
}
}
/**
* Marks the task as getting result and notifies the DAG Scheduler
*/
def handleTaskGettingResult(tid: Long): Unit = {
val info = taskInfos(tid)
info.markGettingResult(clock.getTimeMillis())
sched.dagScheduler.taskGettingResult(info)
}
/**
* Check whether has enough quota to fetch the result with `size` bytes
*/
def canFetchMoreResults(size: Long): Boolean = sched.synchronized {
totalResultSize += size
calculatedTasks += 1
if (maxResultSize > 0 && totalResultSize > maxResultSize) {
val msg = s"Total size of serialized results of ${calculatedTasks} tasks " +
s"(${Utils.bytesToString(totalResultSize)}) is bigger than ${config.MAX_RESULT_SIZE.key} " +
s"(${Utils.bytesToString(maxResultSize)})"
logError(msg)
abort(msg)
false
} else {
true
}
}
/**
* Marks a task as successful and notifies the DAGScheduler that the task has ended.
*/
def handleSuccessfulTask(tid: Long, result: DirectTaskResult[_]): Unit = {
val info = taskInfos(tid)
val index = info.index
// Check if any other attempt succeeded before this and this attempt has not been handled
if (successful(index) && killedByOtherAttempt.contains(tid)) {
// Undo the effect on calculatedTasks and totalResultSize made earlier when
// checking if can fetch more results
calculatedTasks -= 1
val resultSizeAcc = result.accumUpdates.find(a =>
a.name == Some(InternalAccumulator.RESULT_SIZE))
if (resultSizeAcc.isDefined) {
totalResultSize -= resultSizeAcc.get.asInstanceOf[LongAccumulator].value
}
// Handle this task as a killed task
handleFailedTask(tid, TaskState.KILLED,
TaskKilled("Finish but did not commit due to another attempt succeeded"))
return
}
info.markFinished(TaskState.FINISHED, clock.getTimeMillis())
if (speculationEnabled) {
successfulTaskDurations.insert(info.duration)
}
removeRunningTask(tid)
// Kill any other attempts for the same task (since those are unnecessary now that one
// attempt completed successfully).
for (attemptInfo <- taskAttempts(index) if attemptInfo.running) {
logInfo(s"Killing attempt ${attemptInfo.attemptNumber} for task ${attemptInfo.id} " +
s"in stage ${taskSet.id} (TID ${attemptInfo.taskId}) on ${attemptInfo.host} " +
s"as the attempt ${info.attemptNumber} succeeded on ${info.host}")
killedByOtherAttempt += attemptInfo.taskId
sched.backend.killTask(
attemptInfo.taskId,
attemptInfo.executorId,
interruptThread = true,
reason = "another attempt succeeded")
}
if (!successful(index)) {
tasksSuccessful += 1
logInfo(s"Finished task ${info.id} in stage ${taskSet.id} (TID ${info.taskId}) in" +
s" ${info.duration} ms on ${info.host} (executor ${info.executorId})" +
s" ($tasksSuccessful/$numTasks)")
// Mark successful and stop if all the tasks have succeeded.
successful(index) = true
if (tasksSuccessful == numTasks) {
isZombie = true
}
} else {
logInfo("Ignoring task-finished event for " + info.id + " in stage " + taskSet.id +
" because task " + index + " has already completed successfully")
}
// This method is called by "TaskSchedulerImpl.handleSuccessfulTask" which holds the
// "TaskSchedulerImpl" lock until exiting. To avoid the SPARK-7655 issue, we should not
// "deserialize" the value when holding a lock to avoid blocking other threads. So we call
// "result.value()" in "TaskResultGetter.enqueueSuccessfulTask" before reaching here.
// Note: "result.value()" only deserializes the value when it's called at the first time, so
// here "result.value()" just returns the value and won't block other threads.
sched.dagScheduler.taskEnded(tasks(index), Success, result.value(), result.accumUpdates,
result.metricPeaks, info)
maybeFinishTaskSet()
}
private[scheduler] def markPartitionCompleted(partitionId: Int): Unit = {
partitionToIndex.get(partitionId).foreach { index =>
if (!successful(index)) {
tasksSuccessful += 1
successful(index) = true
if (tasksSuccessful == numTasks) {
isZombie = true
}
maybeFinishTaskSet()
}
}
}
/**
* Marks the task as failed, re-adds it to the list of pending tasks, and notifies the
* DAG Scheduler.
*/
def handleFailedTask(tid: Long, state: TaskState, reason: TaskFailedReason): Unit = {
val info = taskInfos(tid)
if (info.failed || info.killed) {
return
}
removeRunningTask(tid)
info.markFinished(state, clock.getTimeMillis())
val index = info.index
copiesRunning(index) -= 1
var accumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty
var metricPeaks: Array[Long] = Array.empty
val failureReason = s"Lost task ${info.id} in stage ${taskSet.id} (TID $tid, ${info.host}," +
s" executor ${info.executorId}): ${reason.toErrorString}"
val failureException: Option[Throwable] = reason match {
case fetchFailed: FetchFailed =>
logWarning(failureReason)
if (!successful(index)) {
successful(index) = true
tasksSuccessful += 1
}
isZombie = true
if (fetchFailed.bmAddress != null) {
blacklistTracker.foreach(_.updateBlacklistForFetchFailure(
fetchFailed.bmAddress.host, fetchFailed.bmAddress.executorId))
}
None
case ef: ExceptionFailure =>
// ExceptionFailure's might have accumulator updates
accumUpdates = ef.accums
metricPeaks = ef.metricPeaks.toArray
if (ef.className == classOf[NotSerializableException].getName) {
// If the task result wasn't serializable, there's no point in trying to re-execute it.
logError("Task %s in stage %s (TID %d) had a not serializable result: %s; not retrying"
.format(info.id, taskSet.id, tid, ef.description))
abort("Task %s in stage %s (TID %d) had a not serializable result: %s".format(
info.id, taskSet.id, tid, ef.description))
return
}
if (ef.className == classOf[TaskOutputFileAlreadyExistException].getName) {
// If we can not write to output file in the task, there's no point in trying to
// re-execute it.
logError("Task %s in stage %s (TID %d) can not write to output file: %s; not retrying"
.format(info.id, taskSet.id, tid, ef.description))
abort("Task %s in stage %s (TID %d) can not write to output file: %s".format(
info.id, taskSet.id, tid, ef.description))
return
}
val key = ef.description
val now = clock.getTimeMillis()
val (printFull, dupCount) = {
if (recentExceptions.contains(key)) {
val (dupCount, printTime) = recentExceptions(key)
if (now - printTime > EXCEPTION_PRINT_INTERVAL) {
recentExceptions(key) = (0, now)
(true, 0)
} else {
recentExceptions(key) = (dupCount + 1, printTime)
(false, dupCount + 1)
}
} else {
recentExceptions(key) = (0, now)
(true, 0)
}
}
if (printFull) {
logWarning(failureReason)
} else {
logInfo(
s"Lost task ${info.id} in stage ${taskSet.id} (TID $tid) on ${info.host}, executor" +
s" ${info.executorId}: ${ef.className} (${ef.description}) [duplicate $dupCount]")
}
ef.exception
case tk: TaskKilled =>
// TaskKilled might have accumulator updates
accumUpdates = tk.accums
metricPeaks = tk.metricPeaks.toArray
logWarning(failureReason)
None
case e: ExecutorLostFailure if !e.exitCausedByApp =>
logInfo(s"Task $tid failed because while it was being computed, its executor " +
"exited for a reason unrelated to the task. Not counting this failure towards the " +
"maximum number of failures for the task.")
None
case e: TaskFailedReason => // TaskResultLost and others
logWarning(failureReason)
None
}
if (tasks(index).isBarrier) {
isZombie = true
}
sched.dagScheduler.taskEnded(tasks(index), reason, null, accumUpdates, metricPeaks, info)
if (!isZombie && reason.countTowardsTaskFailures) {
assert (null != failureReason)
taskSetBlacklistHelperOpt.foreach(_.updateBlacklistForFailedTask(
info.host, info.executorId, index, failureReason))
numFailures(index) += 1
if (numFailures(index) >= maxTaskFailures) {
logError("Task %d in stage %s failed %d times; aborting job".format(
index, taskSet.id, maxTaskFailures))
abort("Task %d in stage %s failed %d times, most recent failure: %s\\nDriver stacktrace:"
.format(index, taskSet.id, maxTaskFailures, failureReason), failureException)
return
}
}
if (successful(index)) {
logInfo(s"Task ${info.id} in stage ${taskSet.id} (TID $tid) failed, but the task will not" +
s" be re-executed (either because the task failed with a shuffle data fetch failure," +
s" so the previous stage needs to be re-run, or because a different copy of the task" +
s" has already succeeded).")
} else {
addPendingTask(index)
}
maybeFinishTaskSet()
}
def abort(message: String, exception: Option[Throwable] = None): Unit = sched.synchronized {
// TODO: Kill running tasks if we were not terminated due to a Mesos error
sched.dagScheduler.taskSetFailed(taskSet, message, exception)
isZombie = true
maybeFinishTaskSet()
}
/** If the given task ID is not in the set of running tasks, adds it.
*
* Used to keep track of the number of running tasks, for enforcing scheduling policies.
*/
def addRunningTask(tid: Long): Unit = {
if (runningTasksSet.add(tid) && parent != null) {
parent.increaseRunningTasks(1)
}
}
/** If the given task ID is in the set of running tasks, removes it. */
def removeRunningTask(tid: Long): Unit = {
if (runningTasksSet.remove(tid) && parent != null) {
parent.decreaseRunningTasks(1)
}
}
override def getSchedulableByName(name: String): Schedulable = {
null
}
override def addSchedulable(schedulable: Schedulable): Unit = {}
override def removeSchedulable(schedulable: Schedulable): Unit = {}
override def getSortedTaskSetQueue(): ArrayBuffer[TaskSetManager] = {
val sortedTaskSetQueue = new ArrayBuffer[TaskSetManager]()
sortedTaskSetQueue += this
sortedTaskSetQueue
}
/** Called by TaskScheduler when an executor is lost so we can re-enqueue our tasks */
override def executorLost(execId: String, host: String, reason: ExecutorLossReason): Unit = {
// Re-enqueue any tasks that ran on the failed executor if this is a shuffle map stage,
// and we are not using an external shuffle server which could serve the shuffle outputs.
// The reason is the next stage wouldn't be able to fetch the data from this dead executor
// so we would need to rerun these tasks on other executors.
if (tasks(0).isInstanceOf[ShuffleMapTask] && !env.blockManager.externalShuffleServiceEnabled
&& !isZombie) {
for ((tid, info) <- taskInfos if info.executorId == execId) {
val index = taskInfos(tid).index
// We may have a running task whose partition has been marked as successful,
// this partition has another task completed in another stage attempt.
// We treat it as a running task and will call handleFailedTask later.
if (successful(index) && !info.running && !killedByOtherAttempt.contains(tid)) {
successful(index) = false
copiesRunning(index) -= 1
tasksSuccessful -= 1
addPendingTask(index)
// Tell the DAGScheduler that this task was resubmitted so that it doesn't think our
// stage finishes when a total of tasks.size tasks finish.
sched.dagScheduler.taskEnded(
tasks(index), Resubmitted, null, Seq.empty, Array.empty, info)
}
}
}
for ((tid, info) <- taskInfos if info.running && info.executorId == execId) {
val exitCausedByApp: Boolean = reason match {
case exited: ExecutorExited => exited.exitCausedByApp
case ExecutorKilled => false
case _ => true
}
handleFailedTask(tid, TaskState.FAILED, ExecutorLostFailure(info.executorId, exitCausedByApp,
Some(reason.toString)))
}
// recalculate valid locality levels and waits when executor is lost
recomputeLocality()
}
/**
* Check if the task associated with the given tid has past the time threshold and should be
* speculative run.
*/
private def checkAndSubmitSpeculatableTask(
tid: Long,
currentTimeMillis: Long,
threshold: Double): Boolean = {
val info = taskInfos(tid)
val index = info.index
if (!successful(index) && copiesRunning(index) == 1 &&
info.timeRunning(currentTimeMillis) > threshold && !speculatableTasks.contains(index)) {
addPendingTask(index, speculatable = true)
logInfo(
("Marking task %d in stage %s (on %s) as speculatable because it ran more" +
" than %.0f ms(%d speculatable tasks in this taskset now)")
.format(index, taskSet.id, info.host, threshold, speculatableTasks.size + 1))
speculatableTasks += index
sched.dagScheduler.speculativeTaskSubmitted(tasks(index))
true
} else {
false
}
}
/**
* Check for tasks to be speculated and return true if there are any. This is called periodically
* by the TaskScheduler.
*
*/
override def checkSpeculatableTasks(minTimeToSpeculation: Int): Boolean = {
// No need to speculate if the task set is zombie or is from a barrier stage. If there is only
// one task we don't speculate since we don't have metrics to decide whether it's taking too
// long or not, unless a task duration threshold is explicitly provided.
if (isZombie || isBarrier || (numTasks == 1 && !speculationTaskDurationThresOpt.isDefined)) {
return false
}
var foundTasks = false
logDebug("Checking for speculative tasks: minFinished = " + minFinishedForSpeculation)
// It's possible that a task is marked as completed by the scheduler, then the size of
// `successfulTaskDurations` may not equal to `tasksSuccessful`. Here we should only count the
// tasks that are submitted by this `TaskSetManager` and are completed successfully.
val numSuccessfulTasks = successfulTaskDurations.size()
if (numSuccessfulTasks >= minFinishedForSpeculation) {
val time = clock.getTimeMillis()
val medianDuration = successfulTaskDurations.median
val threshold = max(speculationMultiplier * medianDuration, minTimeToSpeculation)
// TODO: Threshold should also look at standard deviation of task durations and have a lower
// bound based on that.
logDebug("Task length threshold for speculation: " + threshold)
for (tid <- runningTasksSet) {
foundTasks |= checkAndSubmitSpeculatableTask(tid, time, threshold)
}
} else if (speculationTaskDurationThresOpt.isDefined && speculationTasksLessEqToSlots) {
val time = clock.getTimeMillis()
val threshold = speculationTaskDurationThresOpt.get
logDebug(s"Tasks taking longer time than provided speculation threshold: $threshold")
for (tid <- runningTasksSet) {
foundTasks |= checkAndSubmitSpeculatableTask(tid, time, threshold)
}
}
foundTasks
}
private def getLocalityWait(level: TaskLocality.TaskLocality): Long = {
val localityWait = level match {
case TaskLocality.PROCESS_LOCAL => config.LOCALITY_WAIT_PROCESS
case TaskLocality.NODE_LOCAL => config.LOCALITY_WAIT_NODE
case TaskLocality.RACK_LOCAL => config.LOCALITY_WAIT_RACK
case _ => null
}
if (localityWait != null) {
conf.get(localityWait)
} else {
0L
}
}
/**
* Compute the locality levels used in this TaskSet. Assumes that all tasks have already been
* added to queues using addPendingTask.
*
*/
private def computeValidLocalityLevels(): Array[TaskLocality.TaskLocality] = {
import TaskLocality.{PROCESS_LOCAL, NODE_LOCAL, NO_PREF, RACK_LOCAL, ANY}
val levels = new ArrayBuffer[TaskLocality.TaskLocality]
if (!pendingTasks.forExecutor.isEmpty &&
pendingTasks.forExecutor.keySet.exists(sched.isExecutorAlive(_))) {
levels += PROCESS_LOCAL
}
if (!pendingTasks.forHost.isEmpty &&
pendingTasks.forHost.keySet.exists(sched.hasExecutorsAliveOnHost(_))) {
levels += NODE_LOCAL
}
if (!pendingTasks.noPrefs.isEmpty) {
levels += NO_PREF
}
if (!pendingTasks.forRack.isEmpty &&
pendingTasks.forRack.keySet.exists(sched.hasHostAliveOnRack(_))) {
levels += RACK_LOCAL
}
levels += ANY
logDebug("Valid locality levels for " + taskSet + ": " + levels.mkString(", "))
levels.toArray
}
def executorDecommission(execId: String): Unit = {
recomputeLocality()
// Future consideration: if an executor is decommissioned it may make sense to add the current
// tasks to the spec exec queue.
}
def recomputeLocality(): Unit = {
// A zombie TaskSetManager may reach here while executorLost happens
if (isZombie) return
val previousLocalityLevel = myLocalityLevels(currentLocalityIndex)
myLocalityLevels = computeValidLocalityLevels()
localityWaits = myLocalityLevels.map(getLocalityWait)
currentLocalityIndex = getLocalityIndex(previousLocalityLevel)
}
def executorAdded(): Unit = {
recomputeLocality()
}
}
private[spark] object TaskSetManager {
// The user will be warned if any stages contain a task that has a serialized size greater than
// this.
val TASK_SIZE_TO_WARN_KIB = 1000
}
/**
* Set of pending tasks for various levels of locality: executor, host, rack,
* noPrefs and anyPrefs. These collections are actually
* treated as stacks, in which new tasks are added to the end of the
* ArrayBuffer and removed from the end. This makes it faster to detect
* tasks that repeatedly fail because whenever a task failed, it is put
* back at the head of the stack. These collections may contain duplicates
* for two reasons:
* (1): Tasks are only removed lazily; when a task is launched, it remains
* in all the pending lists except the one that it was launched from.
* (2): Tasks may be re-added to these lists multiple times as a result
* of failures.
* Duplicates are handled in dequeueTaskFromList, which ensures that a
* task hasn't already started running before launching it.
*/
private[scheduler] class PendingTasksByLocality {
// Set of pending tasks for each executor.
val forExecutor = new HashMap[String, ArrayBuffer[Int]]
// Set of pending tasks for each host. Similar to pendingTasksForExecutor, but at host level.
val forHost = new HashMap[String, ArrayBuffer[Int]]
// Set containing pending tasks with no locality preferences.
val noPrefs = new ArrayBuffer[Int]
// Set of pending tasks for each rack -- similar to the above.
val forRack = new HashMap[String, ArrayBuffer[Int]]
// Set containing all pending tasks (also used as a stack, as above).
val all = new ArrayBuffer[Int]
}
| darionyaphet/spark | core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala | Scala | apache-2.0 | 48,902 |
package net.azalea.curl
import org.apache.http.impl.client.{DefaultRedirectStrategy, HttpClients}
import org.apache.http.client.methods._
import org.apache.http.protocol.HTTP._
import org.apache.http._
import org.apache.commons.io.IOUtils
import java.nio.charset.Charset
import org.apache.http.client.{CredentialsProvider, RedirectStrategy}
import org.apache.http.client.protocol.HttpClientContext
import scala.concurrent._
case class Response(response: HttpResponse) {
private lazy val statusLine = response.getStatusLine
lazy val headers:Map[String, String] = response.getAllHeaders.map(h => h.getName -> h.getValue).toMap
private lazy val body = response.getEntity
def status = statusLine.getStatusCode
def reasonPhase = statusLine.getReasonPhrase
def protocolVersion = statusLine.getProtocolVersion.getProtocol
lazy val bodyAsBytes = IOUtils.toByteArray(body.getContent)
def bodyAsString(encoding: String = "UTF-8") = new String(bodyAsBytes, encoding)
}
case class RequestOption(protocolVersion: ProtocolVersion = HttpVersion.HTTP_1_1,
charset: String = UTF_8,
acceptEncoding: String = "gzip, deflate",
acceptLanguage: String = "ja, en",
agent: String = HTTP.DEFAULT_AGENT,
customHeaders: Map[String, String] = Map.empty,
redirectStrategy: RedirectStrategy = DefaultRedirectStrategy.INSTANCE,
credentials: Option[CredentialsProvider] = None) {
def inject(request: HttpRequestBase) {
request.setProtocolVersion(protocolVersion)
request.addHeader("Accept-Encoding", acceptEncoding)
request.addHeader("Accept-Language", acceptLanguage)
request.addHeader("User-Agent", agent)
customHeaders.map {
case (k, v) => request.addHeader(k, v)
}
}
lazy val encoding = Charset.forName(charset)
}
object HTTP {
val DEFAULT_AGENT = "User-Agent: ScalaCurl/0.1"
val options = RequestOption()
private def withClient(request: HttpRequestBase, option: RequestOption) = {
val client = HttpClients.custom()
.setRedirectStrategy(option.redirectStrategy)
.build()
val httpContext = HttpClientContext.create()
option.credentials.foreach(provider => {
httpContext.setCredentialsProvider(provider)
})
client.execute(request)
}
/**
* GET command.
*/
def get(url: String)(implicit requestOption: RequestOption = options): Response = {
val setting = new HttpGet(url)
requestOption.inject(setting)
Response(withClient(setting, requestOption))
}
/**
* GET command.
*/
def asyncGet(url: String)(implicit execctx: ExecutionContext, requestOption: RequestOption = HTTP.options): Future[Response] = future { get(url) }
/**
* DELETE command.
* @param url
* @param requestOption
* @return
*/
def delete(url:String)(implicit requestOption: RequestOption = options): Response = {
val setting = new HttpDelete(url)
requestOption.inject(setting)
Response(withClient(setting, requestOption))
}
/**
* DELETE command.
*/
def asyncDelete(url: String)(implicit execctx: ExecutionContext, requestOption: RequestOption = HTTP.options): Future[Response] = future { delete(url) }
/**
* PUT Command.
* @param url
* @param entity
* @param requestOption
* @return
*/
def put(url:String, entity: HttpEntity)(implicit requestOption: RequestOption = options): Response = {
val setting = new HttpPut(url)
requestOption.inject(setting)
setting.setEntity(entity)
Response(withClient(setting, requestOption))
}
/**
* PUT command.
*/
def asyncPut(url:String, entity: HttpEntity)(implicit execctx: ExecutionContext, requestOption: RequestOption = HTTP.options): Future[Response] = future { put(url, entity) }
/**
* POST Command.
* @param url
* @param entity
* @param requestOption
* @return
*/
def post(url:String, entity: HttpEntity)(implicit requestOption: RequestOption = options): Response = {
val setting = new HttpPost(url)
requestOption.inject(setting)
setting.setEntity(entity)
Response(withClient(setting, requestOption))
}
/**
* POST command.
*/
def asyncPost(url:String, entity: HttpEntity)(implicit execctx: ExecutionContext, requestOption: RequestOption = HTTP.options): Future[Response] = future { post(url, entity) }
}
| Sunao-Yoshii/scala_curl | src/main/scala/net/azalea/curl/HTTP.scala | Scala | apache-2.0 | 4,451 |
package scredis.exceptions
/**
* Wraps all IO exceptions
*/
final case class RedisIOException(
message: String = null,
cause: Throwable = null
) extends RedisException(message, cause) | Livestream/scredis | src/main/scala/scredis/exceptions/RedisIOException.scala | Scala | apache-2.0 | 190 |
package weatherApp.components
import japgolly.scalajs.react.ScalaFnComponent
import japgolly.scalajs.react.vdom.html_<^._
import scala.scalajs.js
import scala.scalajs.js.annotation.JSImport
object HeaderBtn {
@JSImport("../../assets/images/github.png", JSImport.Namespace)
@js.native
object GithubLogo extends js.Any
case class Props(text: String, url: String, isLogin: Boolean = false)
val Component = ScalaFnComponent[Props](props => {
val icon = if (props.isLogin) <.img(^.width := 25.px, ^.marginRight := 5.px, ^.src := GithubLogo.asInstanceOf[String])
else <.div()
<.div(
^.display := "flex",
^.justifyContent := "flex-end",
<.a(
^.cls := "secondary-hover",
^.display := "flex",
^.justifyContent := "center",
^.alignItems := "center",
^.border := "1px solid black",
^.borderRadius := 3.px,
^.padding := 5.px,
^.textDecoration := "none",
^.href := props.url,
icon,
props.text
)
)
})
def apply(props: Props) = {
GithubLogo
Component(props)
}
}
| malaman/scala-weather-app | frontend/src/main/scala/weatherApp/components/HeaderBtn.scala | Scala | mit | 1,106 |
// Copyright (c) 2018. Distributed under the MIT License (see included LICENSE file).
package cocoa.foundation
import scalanative.native._
import objc._
@ObjC
class NSMutableDictionary[K<:NSObject,V<:NSObject] extends NSDictionary[K,V] {
@inline def removeObjectForKey_(aKey: K): Unit = extern
@inline def setObject_aKey_(anObject: V, aKey: K): Unit = extern
@inline def initWithCapacity_(numItems: NSUInteger): NSMutableDictionary[K, V] = extern
@inline def addEntriesFromDictionary_(otherDictionary: V): Unit = extern
@inline def removeAllObjects(): Unit = extern
@inline def removeObjectsForKeys_(keyArray: K): Unit = extern
@inline def setDictionary_(otherDictionary: V): Unit = extern
@inline def setObject_forKey_(obj: V, key: K): Unit = extern
@inline override def initWithContentsOfFile_(path: NSString): NSMutableDictionary[K, V] = extern
@inline override def initWithContentsOfURL_(url: NSURL): NSMutableDictionary[K, V] = extern
}
@ObjCClass
abstract class NSMutableDictionaryClass extends NSDictionaryClass {
@inline def dictionaryWithCapacity_[K<:NSObject,V<:NSObject](numItems: NSUInteger): NSMutableDictionary[K, V] = extern
@inline override def dictionaryWithContentsOfFile_[K<:NSObject,V<:NSObject](path: NSString): NSMutableDictionary[K, V] = extern
@inline override def dictionaryWithContentsOfURL_[K<:NSObject,V<:NSObject](url: NSURL): NSMutableDictionary[K, V] = extern
@inline def dictionaryWithSharedKeySet_[K<:NSObject,V<:NSObject](keyset: id): NSMutableDictionary[K, V] = extern
@inline override def dictionary[K<:NSObject,V<:NSObject](): NSMutableDictionary[K, V] = extern
@inline override def dictionaryWithObject_key_[K<:NSObject,V<:NSObject](`object`: V, key: K): NSMutableDictionary[K, V] = extern
@inline override def dictionaryWithObjects_forKeys_count_[K<:NSObject,V<:NSObject](objects: Ptr[id], keys: Ptr[id], cnt: NSUInteger): NSMutableDictionary[K, V] = extern
@inline override def dictionaryWithObjectsAndKeys_[K<:NSObject,V<:NSObject](firstObject: id): NSMutableDictionary[K, V] = extern
@inline override def dictionaryWithDictionary_[K<:NSObject,V<:NSObject](dict: V): NSMutableDictionary[K, V] = extern
@inline override def dictionaryWithObjects_forKeys_[K<:NSObject,V<:NSObject](objects: Ptr[id], keys: Ptr[id]): NSMutableDictionary[K, V] = extern
@inline override def sharedKeySetForKeys_[K<:NSObject](keys: K): id = extern
}
object NSMutableDictionary extends NSMutableDictionaryClass {
override type InstanceType = NSMutableDictionary[_,_]
def apply[K<:NSObject, V<:NSObject](kv: (K,V)*): NSDictionary[K,V] = dictionaryWithObjects(kv)
// TODO: use Iterable instead of Seq?
def dictionaryWithObjects[K<:NSObject, V<:NSObject](objects: Seq[(K,V)]): NSMutableDictionary[K,V] = Zone { implicit z =>
val count = objects.size
val objArray = stackalloc[id]( sizeof[id] * count)
val keyArray = stackalloc[id]( sizeof[id] * count )
for(i<-0 until count) {
!(keyArray + i) = objects(i)._1.toPtr
!(objArray + i) = objects(i)._2.toPtr
}
dictionaryWithObjects_forKeys_count_(objArray,keyArray,count.toULong)
//objc_msgSend(__cls,__sel_dictionaryWithObjects_forKeys_count,objArray,keyArray,count).cast[NSDictionary[K,V]]
}
def empty[K<:NSObject,V<:NSObject]: NSMutableDictionary[K,V] = alloc().init().asInstanceOf[NSMutableDictionary[K,V]]
} | jokade/scalanative-cocoa | foundation/src/main/scala/cocoa/foundation/NSMutableDictionary.scala | Scala | mit | 3,374 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.keras.nn
import com.intel.analytics.bigdl.keras.KerasBaseSpec
import com.intel.analytics.bigdl.dllib.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.dllib.nn.internal.SpatialDropout3D
import com.intel.analytics.bigdl.dllib.nn.internal.{Sequential => KSequential}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.Shape
import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest
import scala.util.Random
class SpatialDropout3DSpec extends KerasBaseSpec {
"SpatialDropout3D CHANNEL_FIRST forward and backward" should "work properly" in {
val seq = KSequential[Float]()
val layer = SpatialDropout3D[Float](0.5, "th", inputShape = Shape(3, 4, 5, 6))
seq.add(layer)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 3, 4, 5, 6))
val input = Tensor[Float](2, 3, 4, 5, 6).rand()
val output = seq.forward(input)
val gradInput = seq.backward(input, output)
}
"SpatialDropout3D CHANNEL_LAST forward and backward" should "work properly" in {
val seq = KSequential[Float]()
val layer = SpatialDropout3D[Float](0.5, "tf", inputShape = Shape(3, 4, 5, 6))
seq.add(layer)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 3, 4, 5, 6))
val input = Tensor[Float](2, 3, 4, 5, 6).rand()
val output = seq.forward(input)
val gradInput = seq.backward(input, output)
}
}
class SpatialDropout3DSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = SpatialDropout3D[Float](0.5, "tf", inputShape = Shape(3, 4, 5, 6))
layer.build(Shape(2, 3, 4, 5, 6))
val input = Tensor[Float](2, 3, 4, 5, 6).apply1(_ => Random.nextFloat())
runSerializationTest(layer, input)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/keras/nn/SpatialDropout3DSpec.scala | Scala | apache-2.0 | 2,398 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.events
import org.scalatest.junit.JUnit3Suite
import org.scalatest.DoNotDiscover
@DoNotDiscover
class TestLocationJUnit3Suite extends JUnit3Suite with TestLocationServices {
val suiteTypeName = "org.scalatest.events.TestLocationJUnit3Suite"
val expectedSuiteStartingList = Nil
val expectedSuiteCompletedList = Nil
val expectedSuiteAbortedList = Nil
val expectedTestSucceededList = Nil
val expectedTestFailedList = List(SeeStackDepthExceptionPair("testFail(org.scalatest.events.TestLocationJUnit3Suite)"))
val expectedInfoProvidedList = Nil
def testFail() { fail }
}
| travisbrown/scalatest | src/test/scala/org/scalatest/events/TestLocationJUnit3Suite.scala | Scala | apache-2.0 | 1,210 |
/*
* Copyright 2015
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package influxdbreporter.core.collectors
import com.codahale.metrics.Gauge
import influxdbreporter.core.metrics.Metric
import Metric._
import influxdbreporter.core.metrics.Metric
trait CollectorOps[T <: CodahaleMetric] {
def collector: MetricCollector[T]
}
object CollectorOps {
implicit object CollectorForCounter extends CollectorOps[CodahaleCounter] {
override def collector: MetricCollector[CodahaleCounter] = CounterCollector()
}
implicit def CollectorForGauge[T] = new CollectorOps[Gauge[T]] {
override def collector: MetricCollector[Gauge[T]] = GaugeCollector[T]()
}
implicit object CollectorForHistogram extends CollectorOps[CodahaleHistogram] {
override def collector: MetricCollector[CodahaleHistogram] = HistogramCollector()
}
implicit object CollectorForMeter extends CollectorOps[CodahaleMeter] {
override def collector: MetricCollector[CodahaleMeter] = MeterCollector()
}
implicit object CollectorForTimer extends CollectorOps[CodahaleTimer] {
override def collector: MetricCollector[CodahaleTimer] = SecondTimerCollector
}
} | TouK/influxdb-reporter | core/src/main/scala/influxdbreporter/core/collectors/CollectorOps.scala | Scala | apache-2.0 | 1,674 |
package main.scala.nl.in4392.worker
import com.typesafe.config.ConfigFactory
import akka.kernel.Bootable
import akka.actor.{ ActorRef, Props, Actor, ActorSystem }
import akka.actor.ActorPath
import java.util.UUID
import nl.tudelft.ec2interface.instancemanager._
class WorkerDaemon extends Bootable {
val instanceInfo = new RemoteActorInfo().getInfoFromFile("conf/masterInfo")
val workerId = instanceInfo.getSelfInstanceID()
val config = ConfigFactory.load().getConfig("workerSys")
val system = ActorSystem("WorkerNode", config)
val workerActor = system.actorOf(Props(new WorkerActor(workerId,ActorPath.fromString(instanceInfo.getActorPath()))))
val watchActor = system.actorOf(Props(new MonitorActor(workerId,ActorPath.fromString(instanceInfo.getActorPath()))))
def startup() {
}
def shutdown() {
system.shutdown()
}
}
object WorkerApp {
def main(args: Array[String]) {
val app = new WorkerDaemon
println("[WorkerNode] Started WorkerNode")
}
}
| jhejderup/Awsseract | awsseract-core/src/main/scala/nl/in4392/worker/WorkerDaemon.scala | Scala | apache-2.0 | 993 |
package scray.hdfs.compaction.conf
/**
* container class for configuration parameters.
*/
case class CompactionJobParameter(
var indexFilesInputPath: String = "hdfs://10.11.22.41/scray-data/in",
var indexFilesOutputPath: String = "hdfs://10.11.22.41/scray-data/out",
var dataFilesInputPath: String = "hdfs://10.11.22.41/scray-data/in",
var dataFilesOutputPath: String = "hdfs://10.11.22.41/scray-data/out"
) | scray/scray | scray-hdfs/modules/scray-hdfs-compaction-job/src/main/scala/scray/hdfs/compaction/conf/CompactionJobParameter.scala | Scala | apache-2.0 | 437 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.util
import org.apache.avro.generic.GenericData
import org.apache.avro.{ SchemaBuilder, Schema }
import org.scalatest._
class FlattenerSuite extends FunSuite {
test("Flatten schema and record") {
val recordSchema: Schema = SchemaBuilder.record("Test").fields
.requiredString("aString")
.name("nested1").`type`.record("NestedType1").fields
.optionalInt("anInt")
.requiredLong("aLong")
.nullableFloat("aFloat", 11.0f)
.endRecord
.noDefault
.name("nested2").`type`.optional.record("NestedType2").fields
.optionalInt("anInt")
.requiredLong("aLong")
.nullableFloat("aFloat", 12.0f)
.endRecord
.optionalInt("anInt")
.optionalLong("aLong")
.endRecord
val expectedSchema: Schema = SchemaBuilder.record("Test_flat").fields
.requiredString("aString")
.optionalInt("nested1__anInt")
.requiredLong("nested1__aLong")
.nullableFloat("nested1__aFloat", 11.0f)
.optionalInt("nested2__anInt")
.optionalLong("nested2__aLong") // optional since nested2 is optional
.nullableFloat("nested2__aFloat", 12.0f)
.optionalInt("anInt")
.optionalLong("aLong")
.endRecord
assert(Flattener.flattenSchema(recordSchema) === expectedSchema)
val record: GenericData.Record = new GenericData.Record(recordSchema)
record.put("aString", "triangle")
val nested1: GenericData.Record = new GenericData.Record(
recordSchema.getField("nested1").schema)
nested1.put("anInt", 1)
nested1.put("aLong", 2L)
nested1.put("aFloat", 100.0f)
record.put("nested1", nested1)
record.put("nested2", null)
record.put("anInt", null)
record.put("aLong", 3L)
val expected: GenericData.Record = new GenericData.Record(expectedSchema)
expected.put("aString", "triangle")
expected.put("nested1__anInt", 1)
expected.put("nested1__aLong", 2L)
expected.put("nested1__aFloat", 100.0f)
expected.put("nested2__anInt", null)
expected.put("nested2__aLong", null)
expected.put("nested2__aFloat", null)
expected.put("anInt", null)
expected.put("aLong", 3L)
assert(Flattener.flattenRecord(expectedSchema, record) === expected)
}
}
| VinACE/adam | adam-core/src/test/scala/org/bdgenomics/adam/util/FlattenerSuite.scala | Scala | apache-2.0 | 3,044 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.orbeon.apache.xerces.impl
import java.io.EOFException
import java.io.IOException
import org.orbeon.apache.xerces.impl.XMLDocumentScannerImpl._
import org.orbeon.apache.xerces.impl.dtd.XMLDTDDescription
import org.orbeon.apache.xerces.impl.io.MalformedByteSequenceException
import org.orbeon.apache.xerces.impl.validation.ValidationManager
import org.orbeon.apache.xerces.util.NamespaceSupport
import org.orbeon.apache.xerces.util.XMLChar
import org.orbeon.apache.xerces.util.XMLStringBuffer
import org.orbeon.apache.xerces.xni.Augmentations
import org.orbeon.apache.xerces.xni.NamespaceContext
import org.orbeon.apache.xerces.xni.XMLResourceIdentifier
import org.orbeon.apache.xerces.xni.XMLString
import org.orbeon.apache.xerces.xni.XNIException
import org.orbeon.apache.xerces.xni.parser.XMLComponentManager
import org.orbeon.apache.xerces.xni.parser.XMLConfigurationException
import org.orbeon.apache.xerces.xni.parser.XMLDTDScanner
import org.orbeon.apache.xerces.xni.parser.XMLInputSource
import scala.util.control.Breaks
protected[impl] object XMLDocumentScannerImpl {
/**
Scanner state: XML declaration.
*/
val SCANNER_STATE_XML_DECL = 0
/**
Scanner state: prolog.
*/
val SCANNER_STATE_PROLOG = 5
/**
Scanner state: trailing misc.
*/
val SCANNER_STATE_TRAILING_MISC = 12
/**
Scanner state: DTD internal declarations.
*/
val SCANNER_STATE_DTD_INTERNAL_DECLS = 17
/**
Scanner state: open DTD external subset.
*/
val SCANNER_STATE_DTD_EXTERNAL = 18
/**
Scanner state: DTD external declarations.
*/
val SCANNER_STATE_DTD_EXTERNAL_DECLS = 19
/**
Feature identifier: load external DTD.
*/
val LOAD_EXTERNAL_DTD = Constants.XERCES_FEATURE_PREFIX + Constants.LOAD_EXTERNAL_DTD_FEATURE
/**
Feature identifier: load external DTD.
*/
val DISALLOW_DOCTYPE_DECL_FEATURE = Constants.XERCES_FEATURE_PREFIX + Constants.DISALLOW_DOCTYPE_DECL_FEATURE
/**
Property identifier: DTD scanner.
*/
val DTD_SCANNER = Constants.XERCES_PROPERTY_PREFIX + Constants.DTD_SCANNER_PROPERTY
/**
property identifier: ValidationManager
*/
val VALIDATION_MANAGER = Constants.XERCES_PROPERTY_PREFIX + Constants.VALIDATION_MANAGER_PROPERTY
/**
property identifier: NamespaceContext
*/
val NAMESPACE_CONTEXT = Constants.XERCES_PROPERTY_PREFIX + Constants.NAMESPACE_CONTEXT_PROPERTY
/**
Recognized features.
*/
private val RECOGNIZED_FEATURES = Array(LOAD_EXTERNAL_DTD, DISALLOW_DOCTYPE_DECL_FEATURE)
/**
Feature defaults.
*/
private val FEATURE_DEFAULTS = Array[java.lang.Boolean](true, false)
/**
Recognized properties.
*/
private val RECOGNIZED_PROPERTIES = Array(DTD_SCANNER, VALIDATION_MANAGER, NAMESPACE_CONTEXT)
/**
Property defaults.
*/
private val PROPERTY_DEFAULTS = Array(null, null, null)
}
/**
* This class is responsible for scanning XML document structure
* and content. The scanner acts as the source for the document
* information which is communicated to the document handler.
*
* This component requires the following features and properties from the
* component manager that uses it:
*
* - http://xml.org/sax/features/namespaces
* - http://xml.org/sax/features/validation
* - http://apache.org/xml/features/nonvalidating/load-external-dtd
* - http://apache.org/xml/features/scanner/notify-char-refs
* - http://apache.org/xml/features/scanner/notify-builtin-refs
* - http://apache.org/xml/properties/internal/symbol-table
* - http://apache.org/xml/properties/internal/error-reporter
* - http://apache.org/xml/properties/internal/entity-manager
* - http://apache.org/xml/properties/internal/dtd-scanner
*
*/
class XMLDocumentScannerImpl extends XMLDocumentFragmentScannerImpl {
import XMLDocumentFragmentScannerImpl._
/**
DTD scanner.
*/
protected var fDTDScanner: XMLDTDScanner = _
/**
Validation manager .
*/
protected var fValidationManager: ValidationManager = _
/**
Scanning DTD.
*/
protected var fScanningDTD: Boolean = _
/**
Doctype name.
*/
protected var fDoctypeName: String = _
/**
Doctype declaration public identifier.
*/
protected var fDoctypePublicId: String = _
/**
Doctype declaration system identifier.
*/
protected var fDoctypeSystemId: String = _
/**
Namespace support.
*/
protected var fNamespaceContext: NamespaceContext = new NamespaceSupport()
/**
Load external DTD.
*/
protected var fLoadExternalDTD: Boolean = true
/**
Disallow doctype declaration.
*/
protected var fDisallowDoctype: Boolean = false
/**
Seen doctype declaration.
*/
protected var fSeenDoctypeDecl: Boolean = _
/**
XML declaration dispatcher.
*/
protected val fXMLDeclDispatcher = new XMLDeclDispatcher()
/**
Prolog dispatcher.
*/
protected val fPrologDispatcher = new PrologDispatcher()
/**
DTD dispatcher.
*/
protected val fDTDDispatcher = new DTDDispatcher()
/**
Trailing miscellaneous section dispatcher.
*/
protected val fTrailingMiscDispatcher = new TrailingMiscDispatcher()
/**
Array of 3 strings.
*/
private val fStrings = new Array[String](3)
/**
String.
*/
private val fString = new XMLString()
/**
String buffer.
*/
private val fStringBuffer = new XMLStringBuffer()
/**
External subset source.
*/
private var fExternalSubsetSource: XMLInputSource = null
/**
A DTD Description.
*/
private val fDTDDescription = new XMLDTDDescription(null, null, null, null, null)
/**
* Sets the input source.
*
* @param inputSource The input source.
*
* @throws IOException Thrown on i/o error.
*/
override def setInputSource(inputSource: XMLInputSource): Unit = {
fEntityManager.setEntityHandler(this)
fEntityManager.startDocumentEntity(inputSource)
}
/**
* Resets the component. The component can query the component manager
* about any features and properties that affect the operation of the
* component.
*/
override def reset(componentManager: XMLComponentManager): Unit = {
super.reset(componentManager)
fDoctypeName = null
fDoctypePublicId = null
fDoctypeSystemId = null
fSeenDoctypeDecl = false
fScanningDTD = false
fExternalSubsetSource = null
if (! fParserSettings) {
fNamespaceContext.reset()
setScannerState(SCANNER_STATE_XML_DECL)
setDispatcher(fXMLDeclDispatcher)
return
}
try {
fLoadExternalDTD = componentManager.getFeature(LOAD_EXTERNAL_DTD)
} catch {
case e: XMLConfigurationException => fLoadExternalDTD = true
}
try {
fDisallowDoctype = componentManager.getFeature(DISALLOW_DOCTYPE_DECL_FEATURE)
} catch {
case e: XMLConfigurationException => fDisallowDoctype = false
}
fDTDScanner = componentManager.getProperty(DTD_SCANNER).asInstanceOf[XMLDTDScanner]
try {
fValidationManager = componentManager.getProperty(VALIDATION_MANAGER).asInstanceOf[ValidationManager]
} catch {
case e: XMLConfigurationException => fValidationManager = null
}
try {
fNamespaceContext = componentManager.getProperty(NAMESPACE_CONTEXT).asInstanceOf[NamespaceContext]
} catch {
case e: XMLConfigurationException =>
}
if (fNamespaceContext eq null) {
fNamespaceContext = new NamespaceSupport()
}
fNamespaceContext.reset()
setScannerState(SCANNER_STATE_XML_DECL)
setDispatcher(fXMLDeclDispatcher)
}
/**
* Returns a list of feature identifiers that are recognized by
* this component. This method may return null if no features
* are recognized by this component.
*/
override def getRecognizedFeatures: Array[String] = {
val featureIds = super.getRecognizedFeatures
val length = if (featureIds ne null) featureIds.length else 0
val combinedFeatureIds = new Array[String](length + RECOGNIZED_FEATURES.length)
if (featureIds ne null) {
System.arraycopy(featureIds, 0, combinedFeatureIds, 0, featureIds.length)
}
System.arraycopy(RECOGNIZED_FEATURES, 0, combinedFeatureIds, length, RECOGNIZED_FEATURES.length)
combinedFeatureIds
}
/**
* Sets the state of a feature. This method is called by the component
* manager any time after reset when a feature changes state.
*
* *Note:* Components should silently ignore features
* that do not affect the operation of the component.
*/
override def setFeature(featureId: String, state: Boolean): Unit = {
super.setFeature(featureId, state)
if (featureId.startsWith(Constants.XERCES_FEATURE_PREFIX)) {
val suffixLength = featureId.length - Constants.XERCES_FEATURE_PREFIX.length
if (suffixLength == Constants.LOAD_EXTERNAL_DTD_FEATURE.length &&
featureId.endsWith(Constants.LOAD_EXTERNAL_DTD_FEATURE)) {
fLoadExternalDTD = state
return
} else if (suffixLength == Constants.DISALLOW_DOCTYPE_DECL_FEATURE.length &&
featureId.endsWith(Constants.DISALLOW_DOCTYPE_DECL_FEATURE)) {
fDisallowDoctype = state
return
}
}
}
/**
* Returns a list of property identifiers that are recognized by
* this component. This method may return null if no properties
* are recognized by this component.
*/
override def getRecognizedProperties: Array[String] = {
val propertyIds = super.getRecognizedProperties
val length = if (propertyIds ne null) propertyIds.length else 0
val combinedPropertyIds = new Array[String](length + RECOGNIZED_PROPERTIES.length)
if (propertyIds ne null) {
System.arraycopy(propertyIds, 0, combinedPropertyIds, 0, propertyIds.length)
}
System.arraycopy(RECOGNIZED_PROPERTIES, 0, combinedPropertyIds, length, RECOGNIZED_PROPERTIES.length)
combinedPropertyIds
}
/**
* Sets the value of a property. This method is called by the component
* manager any time after reset when a property changes value.
*
* *Note:* Components should silently ignore properties
* that do not affect the operation of the component.
*/
override def setProperty(propertyId: String, value: AnyRef): Unit = {
super.setProperty(propertyId, value)
if (propertyId.startsWith(Constants.XERCES_PROPERTY_PREFIX)) {
val suffixLength = propertyId.length - Constants.XERCES_PROPERTY_PREFIX.length
if (suffixLength == Constants.DTD_SCANNER_PROPERTY.length &&
propertyId.endsWith(Constants.DTD_SCANNER_PROPERTY)) {
fDTDScanner = value.asInstanceOf[XMLDTDScanner]
}
if (suffixLength == Constants.NAMESPACE_CONTEXT_PROPERTY.length &&
propertyId.endsWith(Constants.NAMESPACE_CONTEXT_PROPERTY)) {
if (value ne null) {
fNamespaceContext = value.asInstanceOf[NamespaceContext]
}
}
return
}
}
/**
* Returns the default state for a feature, or null if this
* component does not want to report a default value for this
* feature.
*/
override def getFeatureDefault(featureId: String): java.lang.Boolean = {
RECOGNIZED_FEATURES.indices.find(RECOGNIZED_FEATURES(_) == featureId)
.map(FEATURE_DEFAULTS(_))
.getOrElse(super.getFeatureDefault(featureId))
}
/**
* Returns the default state for a property, or null if this
* component does not want to report a default value for this
* property.
*/
override def getPropertyDefault(propertyId: String): AnyRef = {
RECOGNIZED_PROPERTIES.indices.find(RECOGNIZED_PROPERTIES(_) == propertyId)
.map(PROPERTY_DEFAULTS(_))
.getOrElse(super.getPropertyDefault(propertyId))
}
/**
* This method notifies of the start of an entity. The DTD has the
* pseudo-name of "[dtd]" parameter entity names start with '%'; and
* general entities are just specified by their name.
*
* @param name The name of the entity.
* @param identifier The resource identifier.
* @param encoding The auto-detected IANA encoding name of the entity
* stream. This value will be null in those situations
* where the entity encoding is not auto-detected (e.g.
* internal entities or a document entity that is
* parsed from a java.io.Reader).
*
* @throws XNIException Thrown by handler to signal an error.
*/
override def startEntity(name: String,
identifier: XMLResourceIdentifier,
encoding: String,
augs: Augmentations): Unit = {
super.startEntity(name, identifier, encoding, augs)
if (name != "[xml]" && fEntityScanner.isExternal) {
setScannerState(SCANNER_STATE_TEXT_DECL)
}
if ((fDocumentHandler ne null) && name == "[xml]") {
fDocumentHandler.startDocument(fEntityScanner, encoding, fNamespaceContext, null)
}
}
/**
* This method notifies the end of an entity. The DTD has the pseudo-name
* of "[dtd]" parameter entity names start with '%'; and general entities
* are just specified by their name.
*
* @param name The name of the entity.
*
* @throws XNIException Thrown by handler to signal an error.
*/
override def endEntity(name: String, augs: Augmentations): Unit = {
super.endEntity(name, augs)
if ((fDocumentHandler ne null) && name == "[xml]") {
fDocumentHandler.endDocument(null)
}
}
/**
Creates a content dispatcher.
*/
override protected def createContentDispatcher(): Dispatcher = new ContentDispatcher()
/**
Scans a doctype declaration.
*/
protected def scanDoctypeDecl(): Boolean = {
if (!fEntityScanner.skipSpaces()) {
reportFatalError("MSG_SPACE_REQUIRED_BEFORE_ROOT_ELEMENT_TYPE_IN_DOCTYPEDECL", null)
}
fDoctypeName = fEntityScanner.scanName()
if (fDoctypeName eq null) {
reportFatalError("MSG_ROOT_ELEMENT_TYPE_REQUIRED", null)
}
if (fEntityScanner.skipSpaces()) {
scanExternalID(fStrings, optionalSystemId = false)
fDoctypeSystemId = fStrings(0)
fDoctypePublicId = fStrings(1)
fEntityScanner.skipSpaces()
}
fHasExternalDTD = fDoctypeSystemId ne null
if (! fHasExternalDTD && (fExternalSubsetResolver ne null)) {
fDTDDescription.setValues(null, null, fEntityManager.getCurrentResourceIdentifier.getExpandedSystemId,
null)
fDTDDescription.setRootName(fDoctypeName)
fExternalSubsetSource = fExternalSubsetResolver.getExternalSubset(fDTDDescription)
fHasExternalDTD = fExternalSubsetSource ne null
}
if (fDocumentHandler ne null) {
if (fExternalSubsetSource eq null) {
fDocumentHandler.doctypeDecl(fDoctypeName, fDoctypePublicId, fDoctypeSystemId, null)
} else {
fDocumentHandler.doctypeDecl(fDoctypeName, fExternalSubsetSource.getPublicId, fExternalSubsetSource.getSystemId,
null)
}
}
var internalSubset = true
if (!fEntityScanner.skipChar('[')) {
internalSubset = false
fEntityScanner.skipSpaces()
if (!fEntityScanner.skipChar('>')) {
reportFatalError("DoctypedeclUnterminated", Array(fDoctypeName))
}
fMarkupDepth -= 1
}
internalSubset
}
/**
Returns the scanner state name.
*/
override protected def getScannerStateName(state: Int): String = state match {
case SCANNER_STATE_XML_DECL => "SCANNER_STATE_XML_DECL"
case SCANNER_STATE_PROLOG => "SCANNER_STATE_PROLOG"
case SCANNER_STATE_TRAILING_MISC => "SCANNER_STATE_TRAILING_MISC"
case SCANNER_STATE_DTD_INTERNAL_DECLS => "SCANNER_STATE_DTD_INTERNAL_DECLS"
case SCANNER_STATE_DTD_EXTERNAL => "SCANNER_STATE_DTD_EXTERNAL"
case SCANNER_STATE_DTD_EXTERNAL_DECLS => "SCANNER_STATE_DTD_EXTERNAL_DECLS"
}
/**
* Dispatcher to handle XMLDecl scanning.
*/
protected class XMLDeclDispatcher extends Dispatcher {
/**
* Dispatch an XML "event".
*
* @param complete True if this dispatcher is intended to scan
* and dispatch as much as possible.
*
* @return True if there is more to dispatch either from this
* or a another dispatcher.
*
* @throws IOException Thrown on i/o error.
* @throws XNIException Thrown on parse error.
*/
def dispatch(complete: Boolean): Boolean = {
setScannerState(SCANNER_STATE_PROLOG)
setDispatcher(fPrologDispatcher)
try {
if (fEntityScanner.skipString("<?xml")) {
fMarkupDepth += 1
if (XMLChar.isName(fEntityScanner.peekChar())) {
fStringBuffer.clear()
fStringBuffer.append("xml")
if (fNamespaces) {
while (XMLChar.isNCName(fEntityScanner.peekChar())) {
fStringBuffer.append(fEntityScanner.scanChar().toChar)
}
} else {
while (XMLChar.isName(fEntityScanner.peekChar())) {
fStringBuffer.append(fEntityScanner.scanChar().toChar)
}
}
val target = fSymbolTable.addSymbol(fStringBuffer.ch, fStringBuffer.offset, fStringBuffer.length)
scanPIData(target, fString)
} else {
scanXMLDeclOrTextDecl(scanningTextDecl = false)
}
}
fEntityManager.fCurrentEntity.mayReadChunks = true
true
} catch {
case e: MalformedByteSequenceException =>
fErrorReporter.reportError(e.getDomain, e.getKey, e.getArguments, XMLErrorReporter.SEVERITY_FATAL_ERROR,
e)
false
// @ebruchez: not supported in Scala.js
// case e: CharConversionException =>
// fErrorReporter.reportError(XMLMessageFormatter.XML_DOMAIN, "CharConversionFailure", null, XMLErrorReporter.SEVERITY_FATAL_ERROR,
// e)
// false
case e: EOFException =>
reportFatalError("PrematureEOF", null)
false
}
}
}
/**
* Dispatcher to handle prolog scanning.
*/
protected class PrologDispatcher extends Dispatcher {
/**
* Dispatch an XML "event".
*
* @param complete True if this dispatcher is intended to scan
* and dispatch as much as possible.
*
* @return True if there is more to dispatch either from this
* or a another dispatcher.
*
* @throws IOException Thrown on i/o error.
* @throws XNIException Thrown on parse error.
*/
def dispatch(complete: Boolean): Boolean = {
try {
var again: Boolean = false
do {
again = false
fScannerState match {
case SCANNER_STATE_PROLOG =>
fEntityScanner.skipSpaces()
if (fEntityScanner.skipChar('<')) {
setScannerState(SCANNER_STATE_START_OF_MARKUP)
again = true
} else if (fEntityScanner.skipChar('&')) {
setScannerState(SCANNER_STATE_REFERENCE)
again = true
} else {
setScannerState(SCANNER_STATE_CONTENT)
again = true
}
case SCANNER_STATE_START_OF_MARKUP =>
fMarkupDepth += 1
if (fEntityScanner.skipChar('!')) {
if (fEntityScanner.skipChar('-')) {
if (!fEntityScanner.skipChar('-')) {
reportFatalError("InvalidCommentStart", null)
}
setScannerState(SCANNER_STATE_COMMENT)
again = true
} else if (fEntityScanner.skipString("DOCTYPE")) {
setScannerState(SCANNER_STATE_DOCTYPE)
again = true
} else {
reportFatalError("MarkupNotRecognizedInProlog", null)
}
} else if (isValidNameStartChar(fEntityScanner.peekChar())) {
setScannerState(SCANNER_STATE_ROOT_ELEMENT)
setDispatcher(fContentDispatcher)
return true
} else if (fEntityScanner.skipChar('?')) {
setScannerState(SCANNER_STATE_PI)
again = true
} else if (isValidNameStartHighSurrogate(fEntityScanner.peekChar())) {
setScannerState(SCANNER_STATE_ROOT_ELEMENT)
setDispatcher(fContentDispatcher)
return true
} else {
reportFatalError("MarkupNotRecognizedInProlog", null)
}
case SCANNER_STATE_COMMENT =>
scanComment()
setScannerState(SCANNER_STATE_PROLOG)
case SCANNER_STATE_PI =>
scanPI()
setScannerState(SCANNER_STATE_PROLOG)
case SCANNER_STATE_DOCTYPE =>
if (fDisallowDoctype) {
reportFatalError("DoctypeNotAllowed", null)
}
if (fSeenDoctypeDecl) {
reportFatalError("AlreadySeenDoctype", null)
}
fSeenDoctypeDecl = true
if (scanDoctypeDecl()) {
setScannerState(SCANNER_STATE_DTD_INTERNAL_DECLS)
setDispatcher(fDTDDispatcher)
return true
}
if (fDoctypeSystemId ne null) {
fIsEntityDeclaredVC = !fStandalone
if ((fValidation || fLoadExternalDTD) &&
((fValidationManager eq null) || !fValidationManager.isCachedDTD)) {
setScannerState(SCANNER_STATE_DTD_EXTERNAL)
setDispatcher(fDTDDispatcher)
return true
}
} else if (fExternalSubsetSource ne null) {
fIsEntityDeclaredVC = !fStandalone
if ((fValidation || fLoadExternalDTD) &&
((fValidationManager eq null) || !fValidationManager.isCachedDTD)) {
fDTDScanner.setInputSource(fExternalSubsetSource)
fExternalSubsetSource = null
setScannerState(SCANNER_STATE_DTD_EXTERNAL_DECLS)
setDispatcher(fDTDDispatcher)
return true
}
}
fDTDScanner.setInputSource(null)
setScannerState(SCANNER_STATE_PROLOG)
case SCANNER_STATE_CONTENT =>
reportFatalError("ContentIllegalInProlog", null)
fEntityScanner.scanChar()
// @ebruchez: unclear in original Java code if fall-through to next case was intended!
case SCANNER_STATE_REFERENCE =>
reportFatalError("ReferenceIllegalInProlog", null)
}
} while (complete || again)
if (complete) {
if (fEntityScanner.scanChar() != '<') {
reportFatalError("RootElementRequired", null)
}
setScannerState(SCANNER_STATE_ROOT_ELEMENT)
setDispatcher(fContentDispatcher)
}
} catch {
case e: MalformedByteSequenceException =>
fErrorReporter.reportError(e.getDomain, e.getKey, e.getArguments, XMLErrorReporter.SEVERITY_FATAL_ERROR,
e)
return false
// @ebruchez: not supported in Scala.js
// case e: CharConversionException =>
// fErrorReporter.reportError(XMLMessageFormatter.XML_DOMAIN, "CharConversionFailure", null, XMLErrorReporter.SEVERITY_FATAL_ERROR,
// e)
// return false
case e: EOFException =>
reportFatalError("PrematureEOF", null)
return false
}
true
}
}
/**
* Dispatcher to handle the internal and external DTD subsets.
*/
protected class DTDDispatcher extends Dispatcher {
/**
* Dispatch an XML "event".
*
* @param complete True if this dispatcher is intended to scan
* and dispatch as much as possible.
*
* @return True if there is more to dispatch either from this
* or a another dispatcher.
*
* @throws IOException Thrown on i/o error.
* @throws XNIException Thrown on parse error.
*/
def dispatch(complete: Boolean): Boolean = {
fEntityManager.setEntityHandler(null)
try {
var again = false
do {
again = false
fScannerState match {
case SCANNER_STATE_DTD_INTERNAL_DECLS =>
var exitLoop = false
val completeDTD = true
val readExternalSubset =
(fValidation || fLoadExternalDTD) &&
((fValidationManager eq null) || !fValidationManager.isCachedDTD)
val moreToScan = fDTDScanner.scanDTDInternalSubset(completeDTD, fStandalone, fHasExternalDTD && readExternalSubset)
if (!moreToScan) {
if (!fEntityScanner.skipChar(']'))
reportFatalError("EXPECTED_SQUARE_BRACKET_TO_CLOSE_INTERNAL_SUBSET", null)
fEntityScanner.skipSpaces()
if (!fEntityScanner.skipChar('>'))
reportFatalError("DoctypedeclUnterminated", Array(fDoctypeName))
fMarkupDepth -= 1
if (fDoctypeSystemId ne null) {
fIsEntityDeclaredVC = !fStandalone
if (readExternalSubset) {
setScannerState(SCANNER_STATE_DTD_EXTERNAL)
exitLoop = true
}
} else if (fExternalSubsetSource ne null) {
fIsEntityDeclaredVC = !fStandalone
if (readExternalSubset) {
fDTDScanner.setInputSource(fExternalSubsetSource)
fExternalSubsetSource = null
setScannerState(SCANNER_STATE_DTD_EXTERNAL_DECLS)
exitLoop = true
}
} else {
fIsEntityDeclaredVC = fEntityManager.hasPEReferences && !fStandalone
}
if (! exitLoop) {
setScannerState(SCANNER_STATE_PROLOG)
setDispatcher(fPrologDispatcher)
fEntityManager.setEntityHandler(XMLDocumentScannerImpl.this)
return true
}
}
case SCANNER_STATE_DTD_EXTERNAL =>
fDTDDescription.setValues(fDoctypePublicId, fDoctypeSystemId, null, null)
fDTDDescription.setRootName(fDoctypeName)
val xmlInputSource = fEntityManager.resolveEntity(fDTDDescription)
fDTDScanner.setInputSource(xmlInputSource)
setScannerState(SCANNER_STATE_DTD_EXTERNAL_DECLS)
again = true
case SCANNER_STATE_DTD_EXTERNAL_DECLS =>
val completeDTD = true
val moreToScan = fDTDScanner.scanDTDExternalSubset(completeDTD)
if (!moreToScan) {
setScannerState(SCANNER_STATE_PROLOG)
setDispatcher(fPrologDispatcher)
fEntityManager.setEntityHandler(XMLDocumentScannerImpl.this)
return true
}
case _ =>
throw new XNIException("DTDDispatcher#dispatch: scanner state=" + fScannerState +
" (" +
getScannerStateName(fScannerState) +
')')
}
} while (complete || again)
} catch {
case e: MalformedByteSequenceException =>
fErrorReporter.reportError(e.getDomain, e.getKey, e.getArguments, XMLErrorReporter.SEVERITY_FATAL_ERROR, e)
return false
// @ebruchez: not supported in Scala.js
// case e: CharConversionException =>
// fErrorReporter.reportError(XMLMessageFormatter.XML_DOMAIN, "CharConversionFailure", null, XMLErrorReporter.SEVERITY_FATAL_ERROR,
// e)
// return false
case _: EOFException =>
reportFatalError("PrematureEOF", null)
return false
} finally {
fEntityManager.setEntityHandler(XMLDocumentScannerImpl.this)
}
true
}
}
/**
* Dispatcher to handle content scanning.
*/
protected class ContentDispatcher extends FragmentContentDispatcher {
/**
* Scan for DOCTYPE hook. This method is a hook for subclasses
* to add code to handle scanning for a the "DOCTYPE" string
* after the string "<!" has been scanned.
*
* @return True if the "DOCTYPE" was scanned; false if "DOCTYPE"
* was not scanned.
*/
override protected def scanForDoctypeHook(): Boolean = {
if (fEntityScanner.skipString("DOCTYPE")) {
setScannerState(SCANNER_STATE_DOCTYPE)
return true
}
false
}
/**
* Element depth iz zero. This methos is a hook for subclasses
* to add code to handle when the element depth hits zero. When
* scanning a document fragment, an element depth of zero is
* normal. However, when scanning a full XML document, the
* scanner must handle the trailing miscellanous section of
* the document after the end of the document's root element.
*
* @return True if the caller should stop and return true which
* allows the scanner to switch to a new scanning
* dispatcher. A return value of false indicates that
* the content dispatcher should continue as normal.
*/
override protected def elementDepthIsZeroHook(): Boolean = {
setScannerState(SCANNER_STATE_TRAILING_MISC)
setDispatcher(fTrailingMiscDispatcher)
true
}
/**
* Scan for root element hook. This method is a hook for
* subclasses to add code that handles scanning for the root
* element. When scanning a document fragment, there is no
* "root" element. However, when scanning a full XML document,
* the scanner must handle the root element specially.
*
* @return True if the caller should stop and return true which
* allows the scanner to switch to a new scanning
* dispatcher. A return value of false indicates that
* the content dispatcher should continue as normal.
*/
override protected def scanRootElementHook(): Boolean = {
if ((fExternalSubsetResolver ne null) && ! fSeenDoctypeDecl &&
! fDisallowDoctype && (fValidation || fLoadExternalDTD)) {
scanStartElementName()
resolveExternalSubsetAndRead()
if (scanStartElementAfterName()) {
setScannerState(SCANNER_STATE_TRAILING_MISC)
setDispatcher(fTrailingMiscDispatcher)
return true
}
} else if (scanStartElement()) {
setScannerState(SCANNER_STATE_TRAILING_MISC)
setDispatcher(fTrailingMiscDispatcher)
return true
}
false
}
/**
* End of file hook. This method is a hook for subclasses to
* add code that handles the end of file. The end of file in
* a document fragment is OK if the markup depth is zero.
* However, when scanning a full XML document, an end of file
* is always premature.
*/
override protected def endOfFileHook(e: EOFException): Unit = {
reportFatalError("PrematureEOF", null)
}
/**
* Attempt to locate an external subset for a document that does not otherwise
* have one. If an external subset is located, then it is scanned.
*/
protected def resolveExternalSubsetAndRead(): Unit = {
fDTDDescription.setValues(null, null, fEntityManager.getCurrentResourceIdentifier.getExpandedSystemId,
null)
fDTDDescription.setRootName(fElementQName.rawname)
val src = fExternalSubsetResolver.getExternalSubset(fDTDDescription)
if (src ne null) {
fDoctypeName = fElementQName.rawname
fDoctypePublicId = src.getPublicId
fDoctypeSystemId = src.getSystemId
if (fDocumentHandler ne null) {
fDocumentHandler.doctypeDecl(fDoctypeName, fDoctypePublicId, fDoctypeSystemId, null)
}
try {
if ((fValidationManager eq null) || !fValidationManager.isCachedDTD) {
fDTDScanner.setInputSource(src)
while (fDTDScanner.scanDTDExternalSubset(complete = true))()
} else {
fDTDScanner.setInputSource(null)
}
} finally {
fEntityManager.setEntityHandler(XMLDocumentScannerImpl.this)
}
}
}
}
/**
* Dispatcher to handle trailing miscellaneous section scanning.
*/
protected class TrailingMiscDispatcher extends Dispatcher {
/**
* Dispatch an XML "event".
*
* @param complete True if this dispatcher is intended to scan
* and dispatch as much as possible.
*
* @return True if there is more to dispatch either from this
* or a another dispatcher.
*
* @throws IOException Thrown on i/o error.
* @throws XNIException Thrown on parse error.
*/
def dispatch(complete: Boolean): Boolean = {
try {
var again: Boolean = false
do {
again = false
fScannerState match {
case SCANNER_STATE_TRAILING_MISC =>
fEntityScanner.skipSpaces()
if (fEntityScanner.skipChar('<')) {
setScannerState(SCANNER_STATE_START_OF_MARKUP)
again = true
} else {
setScannerState(SCANNER_STATE_CONTENT)
again = true
}
case SCANNER_STATE_START_OF_MARKUP =>
fMarkupDepth += 1
if (fEntityScanner.skipChar('?')) {
setScannerState(SCANNER_STATE_PI)
again = true
} else if (fEntityScanner.skipChar('!')) {
setScannerState(SCANNER_STATE_COMMENT)
again = true
} else if (fEntityScanner.skipChar('/')) {
reportFatalError("MarkupNotRecognizedInMisc", null)
again = true
} else if (isValidNameStartChar(fEntityScanner.peekChar())) {
reportFatalError("MarkupNotRecognizedInMisc", null)
scanStartElement()
setScannerState(SCANNER_STATE_CONTENT)
} else if (isValidNameStartHighSurrogate(fEntityScanner.peekChar())) {
reportFatalError("MarkupNotRecognizedInMisc", null)
scanStartElement()
setScannerState(SCANNER_STATE_CONTENT)
} else {
reportFatalError("MarkupNotRecognizedInMisc", null)
}
case SCANNER_STATE_PI =>
scanPI()
setScannerState(SCANNER_STATE_TRAILING_MISC)
case SCANNER_STATE_COMMENT =>
if (!fEntityScanner.skipString("--")) {
reportFatalError("InvalidCommentStart", null)
}
scanComment()
setScannerState(SCANNER_STATE_TRAILING_MISC)
case SCANNER_STATE_CONTENT =>
val ch = fEntityScanner.peekChar()
if (ch == -1) {
setScannerState(SCANNER_STATE_TERMINATED)
return false
}
reportFatalError("ContentIllegalInTrailingMisc", null)
fEntityScanner.scanChar()
setScannerState(SCANNER_STATE_TRAILING_MISC)
case SCANNER_STATE_REFERENCE =>
reportFatalError("ReferenceIllegalInTrailingMisc", null)
setScannerState(SCANNER_STATE_TRAILING_MISC)
case SCANNER_STATE_TERMINATED =>
return false
}
} while (complete || again);
} catch {
case e: MalformedByteSequenceException =>
fErrorReporter.reportError(e.getDomain, e.getKey, e.getArguments, XMLErrorReporter.SEVERITY_FATAL_ERROR,
e)
return false
// @ebruchez: not supported in Scala.js
// case e: CharConversionException =>
// fErrorReporter.reportError(XMLMessageFormatter.XML_DOMAIN, "CharConversionFailure", null, XMLErrorReporter.SEVERITY_FATAL_ERROR,
// e)
// return false
case e: EOFException =>
if (fMarkupDepth != 0) {
reportFatalError("PrematureEOF", null)
return false
}
setScannerState(SCANNER_STATE_TERMINATED)
return false
}
true
}
}
}
| ebruchez/darius-xml.js | xerces/shared/src/main/scala/org/orbeon/apache/xerces/impl/XMLDocumentScannerImpl.scala | Scala | apache-2.0 | 36,989 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
* file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
* to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package kafka.api
import java.io.File
import java.util
import java.util.Properties
import kafka.security.authorizer.{AclAuthorizer, AclEntry}
import kafka.server.KafkaConfig
import kafka.utils.{CoreUtils, JaasTestUtils, TestUtils}
import org.apache.kafka.clients.admin._
import org.apache.kafka.common.acl.AclOperation.{ALTER, CLUSTER_ACTION, DESCRIBE}
import org.apache.kafka.common.acl.AclPermissionType.ALLOW
import org.apache.kafka.common.acl._
import org.apache.kafka.common.resource.{PatternType, Resource, ResourcePattern, ResourceType}
import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
import org.apache.kafka.common.utils.Utils
import org.apache.kafka.server.authorizer.Authorizer
import org.junit.Assert.{assertEquals, assertFalse, assertNull}
import org.junit.{After, Before, Test}
import scala.jdk.CollectionConverters._
class DescribeAuthorizedOperationsTest extends IntegrationTestHarness with SaslSetup {
override val brokerCount = 1
this.serverConfig.setProperty(KafkaConfig.ZkEnableSecureAclsProp, "true")
this.serverConfig.setProperty(KafkaConfig.AuthorizerClassNameProp, classOf[AclAuthorizer].getName)
var client: Admin = _
val group1 = "group1"
val group2 = "group2"
val group3 = "group3"
val topic1 = "topic1"
val topic2 = "topic2"
override protected def securityProtocol = SecurityProtocol.SASL_SSL
override protected lazy val trustStoreFile = Some(File.createTempFile("truststore", ".jks"))
override def configureSecurityBeforeServersStart(): Unit = {
val authorizer = CoreUtils.createObject[Authorizer](classOf[AclAuthorizer].getName)
val clusterResource = new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL)
val topicResource = new ResourcePattern(ResourceType.TOPIC, AclEntry.WildcardResource, PatternType.LITERAL)
try {
authorizer.configure(this.configs.head.originals())
val result = authorizer.createAcls(null, List(
new AclBinding(clusterResource, accessControlEntry(JaasTestUtils.KafkaServerPrincipalUnqualifiedName.toString, ALLOW, CLUSTER_ACTION)),
new AclBinding(clusterResource, accessControlEntry(JaasTestUtils.KafkaClientPrincipalUnqualifiedName2.toString, ALLOW, ALTER)),
new AclBinding(topicResource, accessControlEntry(JaasTestUtils.KafkaClientPrincipalUnqualifiedName2.toString, ALLOW, DESCRIBE))).asJava)
result.asScala.map(_.toCompletableFuture.get).foreach { result => assertFalse(result.exception.isPresent) }
} finally {
authorizer.close()
}
}
@Before
override def setUp(): Unit = {
startSasl(jaasSections(Seq("GSSAPI"), Some("GSSAPI"), Both, JaasTestUtils.KafkaServerContextName))
super.setUp()
TestUtils.waitUntilBrokerMetadataIsPropagated(servers)
}
private def accessControlEntry(userName: String, permissionType: AclPermissionType, operation: AclOperation): AccessControlEntry = {
new AccessControlEntry(new KafkaPrincipal(KafkaPrincipal.USER_TYPE, userName).toString,
AclEntry.WildcardHost, operation, permissionType)
}
@After
override def tearDown(): Unit = {
if (client != null)
Utils.closeQuietly(client, "AdminClient")
super.tearDown()
closeSasl()
}
val group1Acl = new AclBinding(new ResourcePattern(ResourceType.GROUP, group1, PatternType.LITERAL),
new AccessControlEntry("User:" + JaasTestUtils.KafkaClientPrincipalUnqualifiedName2, "*", AclOperation.ALL, AclPermissionType.ALLOW))
val group2Acl = new AclBinding(new ResourcePattern(ResourceType.GROUP, group2, PatternType.LITERAL),
new AccessControlEntry("User:" + JaasTestUtils.KafkaClientPrincipalUnqualifiedName2, "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW))
val group3Acl = new AclBinding(new ResourcePattern(ResourceType.GROUP, group3, PatternType.LITERAL),
new AccessControlEntry("User:" + JaasTestUtils.KafkaClientPrincipalUnqualifiedName2, "*", AclOperation.DELETE, AclPermissionType.ALLOW))
val clusterAllAcl = new AclBinding(new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL),
new AccessControlEntry("User:" + JaasTestUtils.KafkaClientPrincipalUnqualifiedName2, "*", AclOperation.ALL, AclPermissionType.ALLOW))
val topic1Acl = new AclBinding(new ResourcePattern(ResourceType.TOPIC, topic1, PatternType.LITERAL),
new AccessControlEntry("User:" + JaasTestUtils.KafkaClientPrincipalUnqualifiedName2, "*", AclOperation.ALL, AclPermissionType.ALLOW))
val topic2All = new AclBinding(new ResourcePattern(ResourceType.TOPIC, topic2, PatternType.LITERAL),
new AccessControlEntry("User:" + JaasTestUtils.KafkaClientPrincipalUnqualifiedName2, "*", AclOperation.DELETE, AclPermissionType.ALLOW))
def createConfig(): Properties = {
val adminClientConfig = new Properties()
adminClientConfig.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
adminClientConfig.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "20000")
val securityProps: util.Map[Object, Object] =
TestUtils.adminClientSecurityConfigs(securityProtocol, trustStoreFile, clientSaslProperties)
securityProps.forEach { (key, value) => adminClientConfig.put(key.asInstanceOf[String], value) }
adminClientConfig
}
@Test
def testConsumerGroupAuthorizedOperations(): Unit = {
client = Admin.create(createConfig())
val results = client.createAcls(List(group1Acl, group2Acl, group3Acl).asJava)
assertEquals(Set(group1Acl, group2Acl, group3Acl), results.values.keySet.asScala)
results.all.get
val describeConsumerGroupsResult = client.describeConsumerGroups(Seq(group1, group2, group3).asJava,
new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true))
assertEquals(3, describeConsumerGroupsResult.describedGroups().size())
val expectedOperations = AclEntry.supportedOperations(ResourceType.GROUP).asJava
val group1Description = describeConsumerGroupsResult.describedGroups().get(group1).get
assertEquals(expectedOperations, group1Description.authorizedOperations())
val group2Description = describeConsumerGroupsResult.describedGroups().get(group2).get
assertEquals(Set(AclOperation.DESCRIBE), group2Description.authorizedOperations().asScala.toSet)
val group3Description = describeConsumerGroupsResult.describedGroups().get(group3).get
assertEquals(Set(AclOperation.DESCRIBE, AclOperation.DELETE), group3Description.authorizedOperations().asScala.toSet)
}
@Test
def testClusterAuthorizedOperations(): Unit = {
client = Admin.create(createConfig())
// test without includeAuthorizedOperations flag
var clusterDescribeResult = client.describeCluster()
assertNull(clusterDescribeResult.authorizedOperations.get())
//test with includeAuthorizedOperations flag, we have give Alter permission
// in configureSecurityBeforeServersStart()
clusterDescribeResult = client.describeCluster(new DescribeClusterOptions().
includeAuthorizedOperations(true))
assertEquals(Set(AclOperation.DESCRIBE, AclOperation.ALTER),
clusterDescribeResult.authorizedOperations().get().asScala.toSet)
// enable all operations for cluster resource
val results = client.createAcls(List(clusterAllAcl).asJava)
assertEquals(Set(clusterAllAcl), results.values.keySet.asScala)
results.all.get
val expectedOperations = AclEntry.supportedOperations(ResourceType.CLUSTER).asJava
clusterDescribeResult = client.describeCluster(new DescribeClusterOptions().
includeAuthorizedOperations(true))
assertEquals(expectedOperations, clusterDescribeResult.authorizedOperations().get())
}
@Test
def testTopicAuthorizedOperations(): Unit = {
client = Admin.create(createConfig())
createTopic(topic1)
createTopic(topic2)
// test without includeAuthorizedOperations flag
var describeTopicsResult = client.describeTopics(Set(topic1, topic2).asJava).all.get()
assertNull(describeTopicsResult.get(topic1).authorizedOperations)
assertNull(describeTopicsResult.get(topic2).authorizedOperations)
//test with includeAuthorizedOperations flag
describeTopicsResult = client.describeTopics(Set(topic1, topic2).asJava,
new DescribeTopicsOptions().includeAuthorizedOperations(true)).all.get()
assertEquals(Set(AclOperation.DESCRIBE), describeTopicsResult.get(topic1).authorizedOperations().asScala.toSet)
assertEquals(Set(AclOperation.DESCRIBE), describeTopicsResult.get(topic2).authorizedOperations().asScala.toSet)
//add few permissions
val results = client.createAcls(List(topic1Acl, topic2All).asJava)
assertEquals(Set(topic1Acl, topic2All), results.values.keySet.asScala)
results.all.get
val expectedOperations = AclEntry.supportedOperations(ResourceType.TOPIC).asJava
describeTopicsResult = client.describeTopics(Set(topic1, topic2).asJava,
new DescribeTopicsOptions().includeAuthorizedOperations(true)).all.get()
assertEquals(expectedOperations, describeTopicsResult.get(topic1).authorizedOperations())
assertEquals(Set(AclOperation.DESCRIBE, AclOperation.DELETE),
describeTopicsResult.get(topic2).authorizedOperations().asScala.toSet)
}
}
| sslavic/kafka | core/src/test/scala/integration/kafka/api/DescribeAuthorizedOperationsTest.scala | Scala | apache-2.0 | 9,981 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package top.params
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.util.ParserUtils
/**
* @author Alexander Podkhalyuzin
* Date: 08.02.2008
*/
/*
* ClassParamClause ::= [nl] '(' [ClassParam {',' ClassParam}] ')'
*/
object ClassParamClause extends ClassParamClause {
override protected def classParam = ClassParam
}
trait ClassParamClause {
protected def classParam: ClassParam
def parse(builder: ScalaPsiBuilder): Boolean = {
val classParamMarker = builder.mark
if (builder.twoNewlinesBeforeCurrentToken) {
classParamMarker.rollbackTo()
return false
}
//Look for '('
builder.getTokenType match {
case ScalaTokenTypes.tLPARENTHESIS =>
builder.advanceLexer() //Ate '('
builder.disableNewlines()
builder.getTokenType match {
case ScalaTokenTypes.kIMPLICIT =>
classParamMarker.rollbackTo()
builder.restoreNewlinesState()
return false
case _ =>
}
//ok, let's parse parameters
if (classParam parse builder) {
while (builder.getTokenType == ScalaTokenTypes.tCOMMA && !ParserUtils.eatTrailingComma(builder, ScalaTokenTypes.tRPARENTHESIS)) {
builder.advanceLexer() //Ate ,
if (!(classParam parse builder)) {
builder error ErrMsg("wrong.parameter")
}
}
}
case _ =>
classParamMarker.rollbackTo()
return false
}
//Look for ')'
builder.getTokenType match {
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
builder.restoreNewlinesState()
classParamMarker.done(ScalaElementTypes.PARAM_CLAUSE)
true
case _ =>
classParamMarker.done(ScalaElementTypes.PARAM_CLAUSE)
builder error ErrMsg("rparenthesis.expected")
builder.restoreNewlinesState()
true
}
}
} | loskutov/intellij-scala | src/org/jetbrains/plugins/scala/lang/parser/parsing/top/params/ClassParamClause.scala | Scala | apache-2.0 | 2,123 |
package gitbucket.core.util
import gitbucket.core.controller.ControllerBase
import gitbucket.core.service.{AccountService, RepositoryService}
import gitbucket.core.model.Role
import RepositoryService.RepositoryInfo
import Implicits._
/**
* Allows only oneself and administrators.
*/
trait OneselfAuthenticator { self: ControllerBase =>
protected def oneselfOnly(action: => Any) = { authenticate(action) }
protected def oneselfOnly[T](action: T => Any) = (form: T) => { authenticate(action(form)) }
private def authenticate(action: => Any) = {
context.loginAccount match {
case Some(x) if (x.isAdmin) => action
case Some(x) if (request.paths(0) == x.userName) => action
case _ => Unauthorized()
}
}
}
/**
* Allows only the repository owner and administrators.
*/
trait OwnerAuthenticator { self: ControllerBase with RepositoryService with AccountService =>
protected def ownerOnly(action: (RepositoryInfo) => Any) = { authenticate(action) }
protected def ownerOnly[T](action: (T, RepositoryInfo) => Any) = (form: T) => { authenticate(action(form, _)) }
private def authenticate(action: (RepositoryInfo) => Any) = {
val userName = params("owner")
val repoName = params("repository")
getRepository(userName, repoName).map { repository =>
context.loginAccount match {
case Some(x) if (x.isAdmin) => action(repository)
case Some(x) if (repository.owner == x.userName) => action(repository)
// TODO Repository management is allowed for only group managers?
case Some(x) if (getGroupMembers(repository.owner).exists { m =>
m.userName == x.userName && m.isManager
}) =>
action(repository)
case Some(x) if (getCollaboratorUserNames(userName, repoName, Seq(Role.ADMIN)).contains(x.userName)) =>
action(repository)
case _ => Unauthorized()
}
} getOrElse NotFound()
}
}
/**
* Allows only signed in users.
*/
trait UsersAuthenticator { self: ControllerBase =>
protected def usersOnly(action: => Any) = { authenticate(action) }
protected def usersOnly[T](action: T => Any) = (form: T) => { authenticate(action(form)) }
private def authenticate(action: => Any) = {
context.loginAccount match {
case Some(x) => action
case None => Unauthorized()
}
}
}
/**
* Allows only administrators.
*/
trait AdminAuthenticator { self: ControllerBase =>
protected def adminOnly(action: => Any) = { authenticate(action) }
protected def adminOnly[T](action: T => Any) = (form: T) => { authenticate(action(form)) }
private def authenticate(action: => Any) = {
context.loginAccount match {
case Some(x) if (x.isAdmin) => action
case _ => Unauthorized()
}
}
}
/**
* Allows only guests and signed in users who can access the repository.
*/
trait ReferrerAuthenticator { self: ControllerBase with RepositoryService with AccountService =>
protected def referrersOnly(action: (RepositoryInfo) => Any) = { authenticate(action) }
protected def referrersOnly[T](action: (T, RepositoryInfo) => Any) = (form: T) => { authenticate(action(form, _)) }
private def authenticate(action: (RepositoryInfo) => Any) = {
val userName = params("owner")
val repoName = params("repository")
getRepository(userName, repoName).map { repository =>
if (isReadable(repository.repository, context.loginAccount)) {
action(repository)
} else {
Unauthorized()
}
} getOrElse NotFound()
}
}
/**
* Allows only signed in users who have read permission for the repository.
*/
trait ReadableUsersAuthenticator { self: ControllerBase with RepositoryService with AccountService =>
protected def readableUsersOnly(action: (RepositoryInfo) => Any) = { authenticate(action) }
protected def readableUsersOnly[T](action: (T, RepositoryInfo) => Any) = (form: T) => {
authenticate(action(form, _))
}
private def authenticate(action: (RepositoryInfo) => Any) = {
val userName = params("owner")
val repoName = params("repository")
getRepository(userName, repoName).map { repository =>
if (isReadable(repository.repository, context.loginAccount) || !repository.repository.isPrivate) {
action(repository)
} else {
Unauthorized()
}
} getOrElse NotFound()
}
}
/**
* Allows only signed in users who have write permission for the repository.
*/
trait WritableUsersAuthenticator { self: ControllerBase with RepositoryService with AccountService =>
protected def writableUsersOnly(action: (RepositoryInfo) => Any) = { authenticate(action) }
protected def writableUsersOnly[T](action: (T, RepositoryInfo) => Any) = (form: T) => {
authenticate(action(form, _))
}
private def authenticate(action: (RepositoryInfo) => Any) = {
val userName = params("owner")
val repoName = params("repository")
getRepository(userName, repoName).map { repository =>
context.loginAccount match {
case Some(x) if (x.isAdmin) => action(repository)
case Some(x) if (userName == x.userName) => action(repository)
case Some(x) if (getGroupMembers(repository.owner).exists(_.userName == x.userName)) => action(repository)
case Some(x)
if (getCollaboratorUserNames(userName, repoName, Seq(Role.ADMIN, Role.DEVELOPER))
.contains(x.userName)) =>
action(repository)
case _ => Unauthorized()
}
} getOrElse NotFound()
}
}
/**
* Allows only the group managers.
*/
trait GroupManagerAuthenticator { self: ControllerBase with AccountService =>
protected def managersOnly(action: => Any) = { authenticate(action) }
protected def managersOnly[T](action: T => Any) = (form: T) => { authenticate(action(form)) }
private def authenticate(action: => Any) = {
context.loginAccount match {
case Some(x) if x.isAdmin => action
case Some(x) if x.userName == request.paths(0) => action
case Some(x) if (getGroupMembers(request.paths(0)).exists { member =>
member.userName == x.userName && member.isManager
}) =>
action
case _ => Unauthorized()
}
}
}
| gitbucket/gitbucket | src/main/scala/gitbucket/core/util/Authenticator.scala | Scala | apache-2.0 | 6,403 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.nio.file.{Files, Paths}
import scala.collection.JavaConverters._
import scala.util.Try
import org.scalatest.Assertions._
import org.apache.spark.TestUtils
import org.apache.spark.api.python.{PythonBroadcast, PythonEvalType, PythonFunction, PythonUtils}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.config.Tests
import org.apache.spark.sql.catalyst.expressions.{Cast, Expression}
import org.apache.spark.sql.catalyst.plans.SQLHelper
import org.apache.spark.sql.execution.python.UserDefinedPythonFunction
import org.apache.spark.sql.expressions.SparkUserDefinedFunction
import org.apache.spark.sql.types.StringType
/**
* This object targets to integrate various UDF test cases so that Scalar UDF, Python UDF and
* Scalar Pandas UDFs can be tested in SBT & Maven tests.
*
* The available UDFs are special. It defines an UDF wrapped by cast. So, the input column is
* casted into string, UDF returns strings as are, and then output column is casted back to
* the input column. In this way, UDF is virtually no-op.
*
* Note that, due to this implementation limitation, complex types such as map, array and struct
* types do not work with this UDFs because they cannot be same after the cast roundtrip.
*
* To register Scala UDF in SQL:
* {{{
* val scalaTestUDF = TestScalaUDF(name = "udf_name")
* registerTestUDF(scalaTestUDF, spark)
* }}}
*
* To register Python UDF in SQL:
* {{{
* val pythonTestUDF = TestPythonUDF(name = "udf_name")
* registerTestUDF(pythonTestUDF, spark)
* }}}
*
* To register Scalar Pandas UDF in SQL:
* {{{
* val pandasTestUDF = TestScalarPandasUDF(name = "udf_name")
* registerTestUDF(pandasTestUDF, spark)
* }}}
*
* To use it in Scala API and SQL:
* {{{
* sql("SELECT udf_name(1)")
* val df = spark.range(10)
* df.select(expr("udf_name(id)")
* df.select(pandasTestUDF(df("id")))
* }}}
*/
object IntegratedUDFTestUtils extends SQLHelper {
import scala.sys.process._
private lazy val pythonPath = sys.env.getOrElse("PYTHONPATH", "")
// Note that we will directly refer pyspark's source, not the zip from a regular build.
// It is possible the test is being ran without the build.
private lazy val sourcePath = Paths.get(sparkHome, "python").toAbsolutePath
private lazy val py4jPath = Paths.get(
sparkHome, "python", "lib", PythonUtils.PY4J_ZIP_NAME).toAbsolutePath
private lazy val pysparkPythonPath = s"$py4jPath:$sourcePath"
private lazy val isPythonAvailable: Boolean = TestUtils.testCommandAvailable(pythonExec)
private lazy val isPySparkAvailable: Boolean = isPythonAvailable && Try {
Process(
Seq(pythonExec, "-c", "import pyspark"),
None,
"PYTHONPATH" -> s"$pysparkPythonPath:$pythonPath").!!
true
}.getOrElse(false)
private lazy val isPandasAvailable: Boolean = isPythonAvailable && isPySparkAvailable && Try {
Process(
Seq(
pythonExec,
"-c",
"from pyspark.sql.pandas.utils import require_minimum_pandas_version;" +
"require_minimum_pandas_version()"),
None,
"PYTHONPATH" -> s"$pysparkPythonPath:$pythonPath").!!
true
}.getOrElse(false)
private lazy val isPyArrowAvailable: Boolean = isPythonAvailable && isPySparkAvailable && Try {
Process(
Seq(
pythonExec,
"-c",
"from pyspark.sql.pandas.utils import require_minimum_pyarrow_version;" +
"require_minimum_pyarrow_version()"),
None,
"PYTHONPATH" -> s"$pysparkPythonPath:$pythonPath").!!
true
}.getOrElse(false)
lazy val pythonVer: String = if (isPythonAvailable) {
Process(
Seq(pythonExec, "-c", "import sys; print('%d.%d' % sys.version_info[:2])"),
None,
"PYTHONPATH" -> s"$pysparkPythonPath:$pythonPath").!!.trim()
} else {
throw new RuntimeException(s"Python executable [$pythonExec] is unavailable.")
}
lazy val pandasVer: String = if (isPandasAvailable) {
Process(
Seq(pythonExec, "-c", "import pandas; print(pandas.__version__)"),
None,
"PYTHONPATH" -> s"$pysparkPythonPath:$pythonPath").!!.trim()
} else {
throw new RuntimeException("Pandas is unavailable.")
}
lazy val pyarrowVer: String = if (isPyArrowAvailable) {
Process(
Seq(pythonExec, "-c", "import pyarrow; print(pyarrow.__version__)"),
None,
"PYTHONPATH" -> s"$pysparkPythonPath:$pythonPath").!!.trim()
} else {
throw new RuntimeException("PyArrow is unavailable.")
}
// Dynamically pickles and reads the Python instance into JVM side in order to mimic
// Python native function within Python UDF.
private lazy val pythonFunc: Array[Byte] = if (shouldTestPythonUDFs) {
var binaryPythonFunc: Array[Byte] = null
withTempPath { path =>
Process(
Seq(
pythonExec,
"-c",
"from pyspark.sql.types import StringType; " +
"from pyspark.serializers import CloudPickleSerializer; " +
s"f = open('$path', 'wb');" +
"f.write(CloudPickleSerializer().dumps((" +
"lambda x: None if x is None else str(x), StringType())))"),
None,
"PYTHONPATH" -> s"$pysparkPythonPath:$pythonPath").!!
binaryPythonFunc = Files.readAllBytes(path.toPath)
}
assert(binaryPythonFunc != null)
binaryPythonFunc
} else {
throw new RuntimeException(s"Python executable [$pythonExec] and/or pyspark are unavailable.")
}
private lazy val pandasFunc: Array[Byte] = if (shouldTestScalarPandasUDFs) {
var binaryPandasFunc: Array[Byte] = null
withTempPath { path =>
Process(
Seq(
pythonExec,
"-c",
"from pyspark.sql.types import StringType; " +
"from pyspark.serializers import CloudPickleSerializer; " +
s"f = open('$path', 'wb');" +
"f.write(CloudPickleSerializer().dumps((" +
"lambda x: x.apply(" +
"lambda v: None if v is None else str(v)), StringType())))"),
None,
"PYTHONPATH" -> s"$pysparkPythonPath:$pythonPath").!!
binaryPandasFunc = Files.readAllBytes(path.toPath)
}
assert(binaryPandasFunc != null)
binaryPandasFunc
} else {
throw new RuntimeException(s"Python executable [$pythonExec] and/or pyspark are unavailable.")
}
// Make sure this map stays mutable - this map gets updated later in Python runners.
private val workerEnv = new java.util.HashMap[String, String]()
workerEnv.put("PYTHONPATH", s"$pysparkPythonPath:$pythonPath")
lazy val pythonExec: String = {
val pythonExec = sys.env.getOrElse(
"PYSPARK_DRIVER_PYTHON", sys.env.getOrElse("PYSPARK_PYTHON", "python3"))
if (TestUtils.testCommandAvailable(pythonExec)) {
pythonExec
} else {
"python"
}
}
lazy val shouldTestPythonUDFs: Boolean = isPythonAvailable && isPySparkAvailable
lazy val shouldTestScalarPandasUDFs: Boolean =
isPythonAvailable && isPandasAvailable && isPyArrowAvailable
/**
* A base trait for various UDFs defined in this object.
*/
sealed trait TestUDF {
def apply(exprs: Column*): Column
val prettyName: String
}
/**
* A Python UDF that takes one column, casts into string, executes the Python native function,
* and casts back to the type of input column.
*
* Virtually equivalent to:
*
* {{{
* from pyspark.sql.functions import udf
*
* df = spark.range(3).toDF("col")
* python_udf = udf(lambda x: str(x), "string")
* casted_col = python_udf(df.col.cast("string"))
* casted_col.cast(df.schema["col"].dataType)
* }}}
*/
case class TestPythonUDF(name: String) extends TestUDF {
private[IntegratedUDFTestUtils] lazy val udf = new UserDefinedPythonFunction(
name = name,
func = PythonFunction(
command = pythonFunc,
envVars = workerEnv.clone().asInstanceOf[java.util.Map[String, String]],
pythonIncludes = List.empty[String].asJava,
pythonExec = pythonExec,
pythonVer = pythonVer,
broadcastVars = List.empty[Broadcast[PythonBroadcast]].asJava,
accumulator = null),
dataType = StringType,
pythonEvalType = PythonEvalType.SQL_BATCHED_UDF,
udfDeterministic = true) {
override def builder(e: Seq[Expression]): Expression = {
assert(e.length == 1, "Defined UDF only has one column")
val expr = e.head
assert(expr.resolved, "column should be resolved to use the same type " +
"as input. Try df(name) or df.col(name)")
Cast(super.builder(Cast(expr, StringType) :: Nil), expr.dataType)
}
}
def apply(exprs: Column*): Column = udf(exprs: _*)
val prettyName: String = "Regular Python UDF"
}
/**
* A Scalar Pandas UDF that takes one column, casts into string, executes the
* Python native function, and casts back to the type of input column.
*
* Virtually equivalent to:
*
* {{{
* from pyspark.sql.functions import pandas_udf
*
* df = spark.range(3).toDF("col")
* scalar_udf = pandas_udf(lambda x: x.apply(lambda v: str(v)), "string")
* casted_col = scalar_udf(df.col.cast("string"))
* casted_col.cast(df.schema["col"].dataType)
* }}}
*/
case class TestScalarPandasUDF(name: String) extends TestUDF {
private[IntegratedUDFTestUtils] lazy val udf = new UserDefinedPythonFunction(
name = name,
func = PythonFunction(
command = pandasFunc,
envVars = workerEnv.clone().asInstanceOf[java.util.Map[String, String]],
pythonIncludes = List.empty[String].asJava,
pythonExec = pythonExec,
pythonVer = pythonVer,
broadcastVars = List.empty[Broadcast[PythonBroadcast]].asJava,
accumulator = null),
dataType = StringType,
pythonEvalType = PythonEvalType.SQL_SCALAR_PANDAS_UDF,
udfDeterministic = true) {
override def builder(e: Seq[Expression]): Expression = {
assert(e.length == 1, "Defined UDF only has one column")
val expr = e.head
assert(expr.resolved, "column should be resolved to use the same type " +
"as input. Try df(name) or df.col(name)")
Cast(super.builder(Cast(expr, StringType) :: Nil), expr.dataType)
}
}
def apply(exprs: Column*): Column = udf(exprs: _*)
val prettyName: String = "Scalar Pandas UDF"
}
/**
* A Scala UDF that takes one column, casts into string, executes the
* Scala native function, and casts back to the type of input column.
*
* Virtually equivalent to:
*
* {{{
* import org.apache.spark.sql.functions.udf
*
* val df = spark.range(3).toDF("col")
* val scala_udf = udf((input: Any) => input.toString)
* val casted_col = scala_udf(df.col("col").cast("string"))
* casted_col.cast(df.schema("col").dataType)
* }}}
*/
case class TestScalaUDF(name: String) extends TestUDF {
private[IntegratedUDFTestUtils] lazy val udf = new SparkUserDefinedFunction(
(input: Any) => if (input == null) {
null
} else {
input.toString
},
StringType,
inputEncoders = Seq.fill(1)(None),
name = Some(name)) {
override def apply(exprs: Column*): Column = {
assert(exprs.length == 1, "Defined UDF only has one column")
val expr = exprs.head.expr
assert(expr.resolved, "column should be resolved to use the same type " +
"as input. Try df(name) or df.col(name)")
Column(Cast(createScalaUDF(Cast(expr, StringType) :: Nil), expr.dataType))
}
}
def apply(exprs: Column*): Column = udf(exprs: _*)
val prettyName: String = "Scala UDF"
}
/**
* Register UDFs used in this test case.
*/
def registerTestUDF(testUDF: TestUDF, session: SparkSession): Unit = testUDF match {
case udf: TestPythonUDF => session.udf.registerPython(udf.name, udf.udf)
case udf: TestScalarPandasUDF => session.udf.registerPython(udf.name, udf.udf)
case udf: TestScalaUDF => session.udf.register(udf.name, udf.udf)
case other => throw new RuntimeException(s"Unknown UDF class [${other.getClass}]")
}
}
| dbtsai/spark | sql/core/src/test/scala/org/apache/spark/sql/IntegratedUDFTestUtils.scala | Scala | apache-2.0 | 12,987 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import java.util.concurrent.TimeUnit._
import scala.collection.mutable.HashMap
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.fs.Path
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier}
import org.apache.spark.sql.catalyst.catalog.BucketSpec
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, Partitioning, UnknownPartitioning}
import org.apache.spark.sql.catalyst.util.truncatedString
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.datasources.parquet.{ParquetFileFormat => ParquetSource}
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.sources.{BaseRelation, Filter}
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.Utils
import org.apache.spark.util.collection.BitSet
trait DataSourceScanExec extends LeafExecNode with CodegenSupport {
val relation: BaseRelation
val tableIdentifier: Option[TableIdentifier]
protected val nodeNamePrefix: String = ""
override val nodeName: String = {
s"Scan $relation ${tableIdentifier.map(_.unquotedString).getOrElse("")}"
}
// Metadata that describes more details of this scan.
protected def metadata: Map[String, String]
override def simpleString(maxFields: Int): String = {
val metadataEntries = metadata.toSeq.sorted.map {
case (key, value) =>
key + ": " + StringUtils.abbreviate(redact(value), 100)
}
val metadataStr = truncatedString(metadataEntries, " ", ", ", "", maxFields)
redact(
s"$nodeNamePrefix$nodeName${truncatedString(output, "[", ",", "]", maxFields)}$metadataStr")
}
/**
* Shorthand for calling redactString() without specifying redacting rules
*/
private def redact(text: String): String = {
Utils.redact(sqlContext.sessionState.conf.stringRedactionPattern, text)
}
}
/** Physical plan node for scanning data from a relation. */
case class RowDataSourceScanExec(
fullOutput: Seq[Attribute],
requiredColumnsIndex: Seq[Int],
filters: Set[Filter],
handledFilters: Set[Filter],
rdd: RDD[InternalRow],
@transient relation: BaseRelation,
override val tableIdentifier: Option[TableIdentifier])
extends DataSourceScanExec with InputRDDCodegen {
def output: Seq[Attribute] = requiredColumnsIndex.map(fullOutput)
override lazy val metrics =
Map("numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
protected override def doExecute(): RDD[InternalRow] = {
val numOutputRows = longMetric("numOutputRows")
rdd.mapPartitionsWithIndexInternal { (index, iter) =>
val proj = UnsafeProjection.create(schema)
proj.initialize(index)
iter.map( r => {
numOutputRows += 1
proj(r)
})
}
}
// Input can be InternalRow, has to be turned into UnsafeRows.
override protected val createUnsafeProjection: Boolean = true
override def inputRDD: RDD[InternalRow] = rdd
override val metadata: Map[String, String] = {
val markedFilters = for (filter <- filters) yield {
if (handledFilters.contains(filter)) s"*$filter" else s"$filter"
}
Map(
"ReadSchema" -> output.toStructType.catalogString,
"PushedFilters" -> markedFilters.mkString("[", ", ", "]"))
}
// Don't care about `rdd` and `tableIdentifier` when canonicalizing.
override def doCanonicalize(): SparkPlan =
copy(
fullOutput.map(QueryPlan.normalizeExprId(_, fullOutput)),
rdd = null,
tableIdentifier = None)
}
/**
* Physical plan node for scanning data from HadoopFsRelations.
*
* @param relation The file-based relation to scan.
* @param output Output attributes of the scan, including data attributes and partition attributes.
* @param requiredSchema Required schema of the underlying relation, excluding partition columns.
* @param partitionFilters Predicates to use for partition pruning.
* @param optionalBucketSet Bucket ids for bucket pruning
* @param dataFilters Filters on non-partition columns.
* @param tableIdentifier identifier for the table in the metastore.
*/
case class FileSourceScanExec(
@transient relation: HadoopFsRelation,
output: Seq[Attribute],
requiredSchema: StructType,
partitionFilters: Seq[Expression],
optionalBucketSet: Option[BitSet],
dataFilters: Seq[Expression],
override val tableIdentifier: Option[TableIdentifier])
extends DataSourceScanExec with ColumnarBatchScan {
// Note that some vals referring the file-based relation are lazy intentionally
// so that this plan can be canonicalized on executor side too. See SPARK-23731.
override lazy val supportsBatch: Boolean = {
relation.fileFormat.supportBatch(relation.sparkSession, schema)
}
private lazy val needsUnsafeRowConversion: Boolean = {
if (relation.fileFormat.isInstanceOf[ParquetSource]) {
SparkSession.getActiveSession.get.sessionState.conf.parquetVectorizedReaderEnabled
} else {
false
}
}
override def vectorTypes: Option[Seq[String]] =
relation.fileFormat.vectorTypes(
requiredSchema = requiredSchema,
partitionSchema = relation.partitionSchema,
relation.sparkSession.sessionState.conf)
val driverMetrics: HashMap[String, Long] = HashMap.empty
/**
* Send the driver-side metrics. Before calling this function, selectedPartitions has
* been initialized. See SPARK-26327 for more details.
*/
private def sendDriverMetrics(): Unit = {
driverMetrics.foreach(e => metrics(e._1).add(e._2))
val executionId = sparkContext.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
SQLMetrics.postDriverMetricUpdates(sparkContext, executionId,
metrics.filter(e => driverMetrics.contains(e._1)).values.toSeq)
}
@transient private lazy val selectedPartitions: Seq[PartitionDirectory] = {
val optimizerMetadataTimeNs = relation.location.metadataOpsTimeNs.getOrElse(0L)
val startTime = System.nanoTime()
val ret = relation.location.listFiles(partitionFilters, dataFilters)
driverMetrics("numFiles") = ret.map(_.files.size.toLong).sum
val timeTakenMs = NANOSECONDS.toMillis(
(System.nanoTime() - startTime) + optimizerMetadataTimeNs)
driverMetrics("metadataTime") = timeTakenMs
ret
}
/**
* [[partitionFilters]] can contain subqueries whose results are available only at runtime so
* accessing [[selectedPartitions]] should be guarded by this method during planning
*/
private def hasPartitionsAvailableAtRunTime: Boolean = {
partitionFilters.exists(ExecSubqueryExpression.hasSubquery)
}
override lazy val (outputPartitioning, outputOrdering): (Partitioning, Seq[SortOrder]) = {
val bucketSpec = if (relation.sparkSession.sessionState.conf.bucketingEnabled) {
relation.bucketSpec
} else {
None
}
bucketSpec match {
case Some(spec) =>
// For bucketed columns:
// -----------------------
// `HashPartitioning` would be used only when:
// 1. ALL the bucketing columns are being read from the table
//
// For sorted columns:
// ---------------------
// Sort ordering should be used when ALL these criteria's match:
// 1. `HashPartitioning` is being used
// 2. A prefix (or all) of the sort columns are being read from the table.
//
// Sort ordering would be over the prefix subset of `sort columns` being read
// from the table.
// eg.
// Assume (col0, col2, col3) are the columns read from the table
// If sort columns are (col0, col1), then sort ordering would be considered as (col0)
// If sort columns are (col1, col0), then sort ordering would be empty as per rule #2
// above
def toAttribute(colName: String): Option[Attribute] =
output.find(_.name == colName)
val bucketColumns = spec.bucketColumnNames.flatMap(n => toAttribute(n))
if (bucketColumns.size == spec.bucketColumnNames.size) {
val partitioning = HashPartitioning(bucketColumns, spec.numBuckets)
val sortColumns =
spec.sortColumnNames.map(x => toAttribute(x)).takeWhile(x => x.isDefined).map(_.get)
val sortOrder = if (sortColumns.nonEmpty && !hasPartitionsAvailableAtRunTime) {
// In case of bucketing, its possible to have multiple files belonging to the
// same bucket in a given relation. Each of these files are locally sorted
// but those files combined together are not globally sorted. Given that,
// the RDD partition will not be sorted even if the relation has sort columns set
// Current solution is to check if all the buckets have a single file in it
val files = selectedPartitions.flatMap(partition => partition.files)
val bucketToFilesGrouping =
files.map(_.getPath.getName).groupBy(file => BucketingUtils.getBucketId(file))
val singleFilePartitions = bucketToFilesGrouping.forall(p => p._2.length <= 1)
if (singleFilePartitions) {
// TODO Currently Spark does not support writing columns sorting in descending order
// so using Ascending order. This can be fixed in future
sortColumns.map(attribute => SortOrder(attribute, Ascending))
} else {
Nil
}
} else {
Nil
}
(partitioning, sortOrder)
} else {
(UnknownPartitioning(0), Nil)
}
case _ =>
(UnknownPartitioning(0), Nil)
}
}
@transient
private val pushedDownFilters = dataFilters.flatMap(DataSourceStrategy.translateFilter)
logInfo(s"Pushed Filters: ${pushedDownFilters.mkString(",")}")
override lazy val metadata: Map[String, String] = {
def seqToString(seq: Seq[Any]) = seq.mkString("[", ", ", "]")
val location = relation.location
val locationDesc =
location.getClass.getSimpleName + seqToString(location.rootPaths)
val metadata =
Map(
"Format" -> relation.fileFormat.toString,
"ReadSchema" -> requiredSchema.catalogString,
"Batched" -> supportsBatch.toString,
"PartitionFilters" -> seqToString(partitionFilters),
"PushedFilters" -> seqToString(pushedDownFilters),
"DataFilters" -> seqToString(dataFilters),
"Location" -> locationDesc)
val withOptPartitionCount = if (relation.partitionSchemaOption.isDefined &&
!hasPartitionsAvailableAtRunTime) {
metadata + ("PartitionCount" -> selectedPartitions.size.toString)
} else {
metadata
}
val withSelectedBucketsCount = relation.bucketSpec.map { spec =>
val numSelectedBuckets = optionalBucketSet.map { b =>
b.cardinality()
} getOrElse {
spec.numBuckets
}
withOptPartitionCount + ("SelectedBucketsCount" ->
s"$numSelectedBuckets out of ${spec.numBuckets}")
} getOrElse {
withOptPartitionCount
}
withSelectedBucketsCount
}
private lazy val inputRDD: RDD[InternalRow] = {
val readFile: (PartitionedFile) => Iterator[InternalRow] =
relation.fileFormat.buildReaderWithPartitionValues(
sparkSession = relation.sparkSession,
dataSchema = relation.dataSchema,
partitionSchema = relation.partitionSchema,
requiredSchema = requiredSchema,
filters = pushedDownFilters,
options = relation.options,
hadoopConf = relation.sparkSession.sessionState.newHadoopConfWithOptions(relation.options))
val readRDD = relation.bucketSpec match {
case Some(bucketing) if relation.sparkSession.sessionState.conf.bucketingEnabled =>
createBucketedReadRDD(bucketing, readFile, selectedPartitions, relation)
case _ =>
createNonBucketedReadRDD(readFile, selectedPartitions, relation)
}
sendDriverMetrics()
readRDD
}
override def inputRDDs(): Seq[RDD[InternalRow]] = {
inputRDD :: Nil
}
override lazy val metrics =
Map("numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"),
"numFiles" -> SQLMetrics.createMetric(sparkContext, "number of files read"),
"metadataTime" -> SQLMetrics.createTimingMetric(sparkContext, "metadata time"),
"scanTime" -> SQLMetrics.createTimingMetric(sparkContext, "scan time"))
protected override def doExecute(): RDD[InternalRow] = {
if (supportsBatch) {
// in the case of fallback, this batched scan should never fail because of:
// 1) only primitive types are supported
// 2) the number of columns should be smaller than spark.sql.codegen.maxFields
WholeStageCodegenExec(this)(codegenStageId = 0).execute()
} else {
val numOutputRows = longMetric("numOutputRows")
if (needsUnsafeRowConversion) {
inputRDD.mapPartitionsWithIndexInternal { (index, iter) =>
val proj = UnsafeProjection.create(schema)
proj.initialize(index)
iter.map( r => {
numOutputRows += 1
proj(r)
})
}
} else {
inputRDD.map { r =>
numOutputRows += 1
r
}
}
}
}
override val nodeNamePrefix: String = "File"
/**
* Create an RDD for bucketed reads.
* The non-bucketed variant of this function is [[createNonBucketedReadRDD]].
*
* The algorithm is pretty simple: each RDD partition being returned should include all the files
* with the same bucket id from all the given Hive partitions.
*
* @param bucketSpec the bucketing spec.
* @param readFile a function to read each (part of a) file.
* @param selectedPartitions Hive-style partition that are part of the read.
* @param fsRelation [[HadoopFsRelation]] associated with the read.
*/
private def createBucketedReadRDD(
bucketSpec: BucketSpec,
readFile: (PartitionedFile) => Iterator[InternalRow],
selectedPartitions: Seq[PartitionDirectory],
fsRelation: HadoopFsRelation): RDD[InternalRow] = {
logInfo(s"Planning with ${bucketSpec.numBuckets} buckets")
val filesGroupedToBuckets =
selectedPartitions.flatMap { p =>
p.files.filter(_.getLen > 0).map { f =>
PartitionedFileUtil.getPartitionedFile(f, f.getPath, p.values)
}
}.groupBy { f =>
BucketingUtils
.getBucketId(new Path(f.filePath).getName)
.getOrElse(sys.error(s"Invalid bucket file ${f.filePath}"))
}
val prunedFilesGroupedToBuckets = if (optionalBucketSet.isDefined) {
val bucketSet = optionalBucketSet.get
filesGroupedToBuckets.filter {
f => bucketSet.get(f._1)
}
} else {
filesGroupedToBuckets
}
val filePartitions = Seq.tabulate(bucketSpec.numBuckets) { bucketId =>
FilePartition(bucketId, prunedFilesGroupedToBuckets.getOrElse(bucketId, Nil))
}
new FileScanRDD(fsRelation.sparkSession, readFile, filePartitions)
}
/**
* Create an RDD for non-bucketed reads.
* The bucketed variant of this function is [[createBucketedReadRDD]].
*
* @param readFile a function to read each (part of a) file.
* @param selectedPartitions Hive-style partition that are part of the read.
* @param fsRelation [[HadoopFsRelation]] associated with the read.
*/
private def createNonBucketedReadRDD(
readFile: (PartitionedFile) => Iterator[InternalRow],
selectedPartitions: Seq[PartitionDirectory],
fsRelation: HadoopFsRelation): RDD[InternalRow] = {
val openCostInBytes = fsRelation.sparkSession.sessionState.conf.filesOpenCostInBytes
val maxSplitBytes =
FilePartition.maxSplitBytes(fsRelation.sparkSession, selectedPartitions)
logInfo(s"Planning scan with bin packing, max size: $maxSplitBytes bytes, " +
s"open cost is considered as scanning $openCostInBytes bytes.")
val splitFiles = selectedPartitions.flatMap { partition =>
partition.files.filter(_.getLen > 0).flatMap { file =>
// getPath() is very expensive so we only want to call it once in this block:
val filePath = file.getPath
val isSplitable = relation.fileFormat.isSplitable(
relation.sparkSession, relation.options, filePath)
PartitionedFileUtil.splitFiles(
sparkSession = relation.sparkSession,
file = file,
filePath = filePath,
isSplitable = isSplitable,
maxSplitBytes = maxSplitBytes,
partitionValues = partition.values
)
}
}.toArray.sortBy(_.length)(implicitly[Ordering[Long]].reverse)
val partitions =
FilePartition.getFilePartitions(relation.sparkSession, splitFiles, maxSplitBytes)
new FileScanRDD(fsRelation.sparkSession, readFile, partitions)
}
override def doCanonicalize(): FileSourceScanExec = {
FileSourceScanExec(
relation,
output.map(QueryPlan.normalizeExprId(_, output)),
requiredSchema,
QueryPlan.normalizePredicates(partitionFilters, output),
optionalBucketSet,
QueryPlan.normalizePredicates(dataFilters, output),
None)
}
}
| yanboliang/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala | Scala | apache-2.0 | 18,147 |
import scala.reflect.runtime.universe
object ScalaReflection {
def main(args: Array[String]): Unit = {
//val theType = getTypeTag(classOf[Member]).tpe
val theType = getType(classOf[Member])
println(s"typeTag.declarations => ${theType.declarations}")
println(s"typeTag.members => ${theType.members}")
val vals = theType.declarations.filter(_.asTerm.isVal)
println(s"vals => $vals")
val vars = theType.declarations.filter(_.asTerm.isVar)
println(s"vars => $vars")
val nameField = theType.declaration(universe.newTermName("name"))
println(s"nameField => $nameField")
val methods = theType.declarations.filter(_.isMethod)
println(s"methods => $methods")
methods foreach { m => println(s"method name = [${m.name.decoded}]") }
val implicits = theType.declarations.filter(_.isImplicit)
println(s"implicits => $implicits")
val primitives = theType.declarations.filter(isPrimitive)
println(s"primitives => $primitives")
println(newInstance(classOf[Member]))
val m = newInstance(classOf[Member], "Taro")
m.methodA("[", "]")
println("getFieldValue => " + getFieldValue(m, "name"))
setFieldValue(m, "age", 25)
println(s"setFieldValue age => ${m.age}")
val result: String = invokeMethod(m, "methodB", "***", "***")
println(s"invokeMethod => $result")
}
def getTypeTag[T: universe.TypeTag](targetClass: Class[T]): universe.TypeTag[T] =
universe.typeTag[T]
def getType[T: universe.TypeTag](targetClass: Class[T]): universe.Type =
universe.typeOf[T]
def isPrimitive(symbol: universe.Symbol): Boolean =
if (symbol.isMethod) {
val rt = symbol.asMethod.returnType
if (rt =:= universe.definitions.BooleanTpe) true
else if (rt =:= universe.definitions.ByteTpe) true
else if (rt =:= universe.definitions.CharTpe) true
else if (rt =:= universe.definitions.DoubleTpe) true
else if (rt =:= universe.definitions.FloatTpe) true
else if (rt =:= universe.definitions.IntTpe) true
else if (rt =:= universe.definitions.LongTpe) true
else if (rt =:= universe.definitions.ShortTpe) true
else false
} else false
def newInstance[T: universe.TypeTag](targetClass: Class[T], args: Any*): T = {
val mirror = universe.runtimeMirror(targetClass.getClassLoader)
// これでもOK
// val mirror = universe.typeTag[T].mirror
val classSymbol = universe.typeOf[T].typeSymbol.asClass
val classMirror = mirror.reflectClass(classSymbol)
val constructorSymbol = universe.typeOf[T].declaration(universe.nme.CONSTRUCTOR).filter {
_.asMethod.paramss match {
case List(Nil) => args.isEmpty
case List(List(as @ _*)) => as.size == args.size
}
}.asMethod
val constructorMethodMirror = classMirror.reflectConstructor(constructorSymbol)
constructorMethodMirror(args: _*).asInstanceOf[T]
}
def getFieldValue[A: universe.TypeTag: scala.reflect.ClassTag, B](instance: A, name: String): B = {
val mirror = universe.runtimeMirror(instance.getClass.getClassLoader)
val termSymbol = universe.typeOf[A].declaration(universe.newTermName(name)).asTerm
val instanceMirror = mirror.reflect(instance)
instanceMirror.reflectField(termSymbol).get.asInstanceOf[B]
}
def setFieldValue[A: universe.TypeTag: scala.reflect.ClassTag, B](instance: A, name: String, value: B):Unit = {
val mirror = universe.runtimeMirror(instance.getClass.getClassLoader)
val termSymbol = universe.typeOf[A].declaration(universe.newTermName(name)).asTerm
val instanceMirror = mirror.reflect(instance)
instanceMirror.reflectField(termSymbol).set(value)
}
def invokeMethod[A: universe.TypeTag: scala.reflect.ClassTag, B](instance: A, name: String, args: Any*): B = {
val mirror = universe.runtimeMirror(instance.getClass.getClassLoader)
val methodSymbol = universe.typeOf[A].declaration(universe.newTermName(name)).asMethod
val instanceMirror = mirror.reflect(instance)
instanceMirror.reflectMethod(methodSymbol)(args: _*).asInstanceOf[B]
}
}
class Member(val name: String) {
def this() = this("dummy")
def this(name: String, age: Int) = {
this(name)
this.age = age
}
var age: Int = _
private var counter: Int = _
implicit val iv: String = "Implicit Value"
def methodA(pre: String, suf: String): Unit = {
counter += 1
println(s"$pre$name$suf")
}
def methodB(pre: String, suf: String): String = {
counter += 1
s"$pre$name$suf"
}
override def toString(): String =
s"Member name=$name, counter=$counter, iv=$iv"
}
| kazuhira-r/scala-2.10.0-trial | reflection/ScalaReflection.scala | Scala | mit | 4,601 |
package com.amichalo.smooolelo.config
import com.typesafe.config.Config
trait MoooleloServerConfig {
def host: String
def port: Int
}
object MoooleloServerConfig {
def fromConfig(config: Config): MoooleloServerConfig = new MoooleloServerConfig {
override def host: String = config.getString("server.host")
override def port: Int = config.getInt("server.port")
}
} | amichalo/smooolelo | src/main/scala/com/amichalo/smooolelo/config/MoooleloServerConfig.scala | Scala | apache-2.0 | 382 |
object Test {
//println("java.library.path=" + System.getProperty("java.library.path"))
val sysWordSize = System.getProperty("sun.arch.data.model", "32")
val sysType = System.getProperty("os.name")
val libName =
if (sysType == "Mac OS X")
"natives"
else
"natives-" + sysWordSize
System.loadLibrary(libName)
@native
def sayHello(s: String): String = null
def main(args: Array[String]) {
val s = sayHello("Scala is great!")
println("Invocation returned \"" + s + "\"")
}
}
| felixmulder/scala | test/files/jvm/natives.scala | Scala | bsd-3-clause | 523 |
package fpinscala.answers
import scala.language.implicitConversions
import scala.language.postfixOps
import scala.language.higherKinds
import language.higherKinds
package object iomonad {
import fpinscala.answers.parallelism.Nonblocking._
type IO[A] = IO3.IO[A]
def IO[A](a: => A): IO[A] = IO3.IO[A](a)
implicit val ioMonad = IO3.freeMonad[Par]
def now[A](a: A): IO[A] = IO3.Return(a)
def fork[A](a: => IO[A]): IO[A] = par(Par.lazyUnit(())) flatMap (_ => a)
def forkUnit[A](a: => A): IO[A] = fork(now(a))
def delay[A](a: => A): IO[A] = now(()) flatMap (_ => now(a))
def par[A](a: Par[A]): IO[A] = IO3.Suspend(a)
def async[A](cb: ((A => Unit) => Unit)): IO[A] =
fork(par(Par.async(cb)))
type Free[F[_], A] = IO3.Free[F, A]
def Return[A](a: A): IO[A] = IO3.Return[Par,A](a)
// To run an `IO`, we need an executor service.
// The name we have chosen for this method, `unsafePerformIO`,
// reflects that is is unsafe, i.e. that it has side effects,
// and that it _performs_ the actual I/O.
import java.util.concurrent.ExecutorService
def unsafePerformIO[A](io: IO[A])(implicit E: ExecutorService): A =
Par.run(E) { IO3.run(io)(IO3.parMonad) }
}
| peterbecich/fpinscala | answers/src/main/scala/fpinscala/iomonad/package.scala | Scala | mit | 1,199 |
package com.sksamuel.elastic4s
import com.sksamuel.elastic4s.source.{ Indexable, DocumentMap, DocumentSource }
import org.elasticsearch.action.index.IndexRequest.OpType
import org.elasticsearch.action.index.{ IndexRequest, IndexResponse }
import org.elasticsearch.client.Client
import org.elasticsearch.common.xcontent.{ XContentBuilder, XContentFactory }
import org.elasticsearch.index.VersionType
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
/** @author Stephen Samuel */
trait IndexDsl {
def index(kv: (String, String)): IndexDefinition = new IndexDefinition(kv._1, kv._2)
implicit object IndexDefinitionExecutable
extends Executable[IndexDefinition, IndexResponse] {
override def apply(c: Client, t: IndexDefinition): Future[IndexResponse] = {
injectFuture(c.index(t.build, _))
}
}
}
class IndexDefinition(index: String, `type`: String) extends BulkCompatibleDefinition {
private val _request = new IndexRequest(index, `type`)
private val _fields = mutable.Buffer[FieldValue]()
private var _source: Option[DocumentSource] = None
private var _json: Option[String] = None
private var _map: Option[DocumentMap] = None
def build = _source match {
case Some(src) => _request.source(src.json)
case None =>
_json match {
case Some(json) => _request.source(json)
case None =>
_map match {
case Some(map) => _request.source(map.map.asJava)
case None => _request.source(_fieldsAsXContent)
}
}
}
def _fieldsAsXContent: XContentBuilder = {
val source = XContentFactory.jsonBuilder().startObject()
_fields.foreach(_.output(source))
source.endObject()
}
def source[T](t: T)(implicit indexable: Indexable[T]): this.type = {
this._json = Option(indexable.json(t))
this
}
def doc(source: DocumentSource): this.type = {
this._source = Option(source)
this
}
def doc(map: DocumentMap): this.type = {
this._map = Option(map)
this
}
def id(id: Any): IndexDefinition = {
_request.id(id.toString)
this
}
def opType(opType: IndexRequest.OpType): IndexDefinition = {
_request.opType(opType)
this
}
def parent(parent: String): IndexDefinition = {
_request.parent(parent)
this
}
def refresh(refresh: Boolean): IndexDefinition = {
_request.refresh(refresh)
this
}
def routing(routing: String): IndexDefinition = {
_request.routing(routing)
this
}
def timestamp(timestamp: String): IndexDefinition = {
_request.timestamp(timestamp)
this
}
def ttl(ttl: Long): IndexDefinition = {
_request.ttl(ttl)
this
}
def ttl(duration: FiniteDuration): this.type = {
_request.ttl(duration.toMillis)
this
}
def update(update: Boolean): IndexDefinition = if (update) opType(OpType.CREATE) else opType(OpType.INDEX)
def version(version: Long): IndexDefinition = {
_request.version(version)
this
}
def versionType(versionType: VersionType): IndexDefinition = {
_request.versionType(versionType)
this
}
def fields(fields: Map[String, Any]): IndexDefinition = {
_fields ++= FieldsMapper.mapFields(fields)
this
}
def fields(_fields: (String, Any)*): IndexDefinition = fields(_fields.toMap)
def fields(_fields: Iterable[(String, Any)]): IndexDefinition = fields(_fields.toMap)
def fieldValues(fields: FieldValue*): IndexDefinition = {
_fields ++= fields
this
}
}
| l15k4/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/IndexDsl.scala | Scala | apache-2.0 | 3,570 |
package com.lucidchart.open.nark.request
import java.util.UUID
import play.api.Play
import play.api.Play.current
import play.api.Mode
import play.api.libs.iteratee.Done
import play.api.mvc._
import play.api.mvc.Results._
/**
* Provides helpers for creating `EnvironmentAction` values.
*/
trait EnvironmentActionBuilder {
/**
* Make sure Play is running in the mode passed in.
* If not, return a 404. If so, run the block.
*/
def checkEnvironment(validMode: Mode.Value)(block: EssentialAction): EssentialAction = checkEnvironment(Set(validMode))(block)
/**
* Make sure Play is running in one of the modes passed in.
* If not, return a 404. If so, run the block.
*/
def checkEnvironment(validModes: Set[Mode.Value])(block: => EssentialAction): EssentialAction = new EssentialAction {
def apply(requestHeader: RequestHeader) = {
if (!validModes.contains(Play.mode)) {
Done(NotFound)
}
else {
block(requestHeader)
}
}
}
/**
* Only execute the block if play is running in development mode
*/
def devOnly(block: => EssentialAction) = checkEnvironment(Mode.Dev)(block)
/**
* Only execute the block if play is running in test mode
*/
def testOnly(block: => EssentialAction) = checkEnvironment(Mode.Test)(block)
/**
* Only execute the block if play is running in production mode
*/
def prodOnly(block: => EssentialAction) = checkEnvironment(Mode.Prod)(block)
}
/**
* Helper object to create `EnvironmentAction` values.
*/
object EnvironmentAction extends EnvironmentActionBuilder
| lucidsoftware/nark | app/com/lucidchart/open/nark/request/EnvironmentAction.scala | Scala | apache-2.0 | 1,542 |
package org.bitcoins.keymanager
import com.typesafe.config.ConfigFactory
import org.bitcoins.core.crypto.BIP39Seed
import org.bitcoins.core.crypto.ExtKeyVersion.SegWitMainNetPriv
import org.bitcoins.core.util.TimeUtil
import org.bitcoins.crypto.AesPassword
import org.bitcoins.keymanager.ReadMnemonicError._
import org.bitcoins.keymanager.bip39.BIP39KeyManager
import org.bitcoins.testkitcore.Implicits._
import org.bitcoins.testkitcore.gen.{CryptoGenerators, StringGenerators}
import org.bitcoins.testkit.wallet.BitcoinSWalletTest
import org.bitcoins.wallet.config.WalletAppConfig
import org.scalatest.{BeforeAndAfterEach, FutureOutcome}
import java.nio.file.{Files, Path}
class WalletStorageTest extends BitcoinSWalletTest with BeforeAndAfterEach {
override type FixtureParam = WalletAppConfig
override def withFixture(test: OneArgAsyncTest): FutureOutcome =
withWalletConfigNotStarted(test)
def getSeedPath(config: WalletAppConfig): Path = {
config.kmConf.seedPath
}
behavior of "WalletStorage"
val passphrase: Some[AesPassword] = Some(
AesPassword.fromNonEmptyString("this_is_secret"))
val badPassphrase: Some[AesPassword] = Some(
AesPassword.fromNonEmptyString("this_is_also_secret"))
def getAndWriteMnemonic(walletConf: WalletAppConfig): DecryptedMnemonic = {
val mnemonicCode = CryptoGenerators.mnemonicCode.sampleSome
val decryptedMnemonic = DecryptedMnemonic(mnemonicCode, TimeUtil.now)
val encrypted = decryptedMnemonic.encrypt(passphrase.get)
val seedPath = getSeedPath(walletConf)
WalletStorage.writeSeedToDisk(seedPath, encrypted)
decryptedMnemonic
}
def getAndWriteXprv(walletConf: WalletAppConfig): DecryptedExtPrivKey = {
val xprv = CryptoGenerators.extPrivateKey.sampleSome
val decryptedExtPrivKey = DecryptedExtPrivKey(xprv, TimeUtil.now)
val encrypted = decryptedExtPrivKey.encrypt(passphrase.get)
val seedPath = getSeedPath(walletConf)
WalletStorage.writeSeedToDisk(seedPath, encrypted)
decryptedExtPrivKey
}
it must "write and read an encrypted mnemonic to disk" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
val writtenMnemonic = getAndWriteMnemonic(walletConf)
// should have been written by now
assert(walletConf.kmConf.seedExists())
val seedPath = getSeedPath(walletConf)
val read =
WalletStorage.decryptSeedFromDisk(seedPath, passphrase)
read match {
case Right(readMnemonic: DecryptedMnemonic) =>
assert(writtenMnemonic.mnemonicCode == readMnemonic.mnemonicCode)
// Need to compare using getEpochSecond because when reading an epoch second
// it will not include the milliseconds that writtenMnemonic will have
assert(
writtenMnemonic.creationTime.getEpochSecond == readMnemonic.creationTime.getEpochSecond)
case Right(xprv: DecryptedExtPrivKey) =>
fail(s"Parsed unexpected type of seed $xprv")
case Left(err) => fail(err.toString)
}
}
it must "write and read an encrypted seed to disk" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
val writtenXprv = getAndWriteXprv(walletConf)
// should have been written by now
assert(walletConf.kmConf.seedExists())
val seedPath = getSeedPath(walletConf)
val read =
WalletStorage.decryptSeedFromDisk(seedPath, passphrase)
read match {
case Right(readXprv: DecryptedExtPrivKey) =>
assert(writtenXprv.xprv == readXprv.xprv)
// Need to compare using getEpochSecond because when reading an epoch second
// it will not include the milliseconds that writtenMnemonic will have
assert(
writtenXprv.creationTime.getEpochSecond == readXprv.creationTime.getEpochSecond)
case Right(readMnemonic: DecryptedMnemonic) =>
fail(s"Parsed unexpected type of seed $readMnemonic")
case Left(err) => fail(err.toString)
}
}
it must "write and read an unencrypted mnemonic to disk" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
val mnemonicCode = CryptoGenerators.mnemonicCode.sampleSome
val writtenMnemonic = DecryptedMnemonic(mnemonicCode, TimeUtil.now)
val seedPath = getSeedPath(walletConf)
WalletStorage.writeSeedToDisk(seedPath, writtenMnemonic)
// should have been written by now
assert(walletConf.kmConf.seedExists())
val read =
WalletStorage.decryptSeedFromDisk(seedPath, None)
read match {
case Right(readMnemonic: DecryptedMnemonic) =>
assert(writtenMnemonic.mnemonicCode == readMnemonic.mnemonicCode)
// Need to compare using getEpochSecond because when reading an epoch second
// it will not include the milliseconds that writtenMnemonic will have
assert(
writtenMnemonic.creationTime.getEpochSecond == readMnemonic.creationTime.getEpochSecond)
case Right(xprv: DecryptedExtPrivKey) =>
fail(s"Parsed unexpected type of seed $xprv")
case Left(err) => fail(err.toString)
}
}
it must "write and read an unencrypted xprv to disk" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
val xprv = CryptoGenerators.extPrivateKey.sampleSome
val writtenXprv = DecryptedExtPrivKey(xprv, TimeUtil.now)
val seedPath = getSeedPath(walletConf)
WalletStorage.writeSeedToDisk(seedPath, writtenXprv)
// should have been written by now
assert(walletConf.kmConf.seedExists())
val read =
WalletStorage.decryptSeedFromDisk(seedPath, None)
read match {
case Right(readXprv: DecryptedExtPrivKey) =>
assert(writtenXprv.xprv == readXprv.xprv)
// Need to compare using getEpochSecond because when reading an epoch second
// it will not include the milliseconds that writtenMnemonic will have
assert(
writtenXprv.creationTime.getEpochSecond == readXprv.creationTime.getEpochSecond)
case Right(readMnemonic: DecryptedMnemonic) =>
fail(s"Parsed unexpected type of seed $readMnemonic")
case Left(err) => fail(err.toString)
}
}
it must "change the password of an encrypted mnemonic" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
val writtenMnemonic = getAndWriteMnemonic(walletConf)
assert(walletConf.kmConf.seedExists())
val seedPath = getSeedPath(walletConf)
WalletStorage.changeAesPassword(seedPath = seedPath,
oldPasswordOpt = passphrase,
newPasswordOpt = badPassphrase)
val read =
WalletStorage.decryptSeedFromDisk(seedPath, badPassphrase)
read match {
case Right(readMnemonic: DecryptedMnemonic) =>
assert(writtenMnemonic.mnemonicCode == readMnemonic.mnemonicCode)
// Need to compare using getEpochSecond because when reading an epoch second
// it will not include the milliseconds that writtenMnemonic will have
assert(
writtenMnemonic.creationTime.getEpochSecond == readMnemonic.creationTime.getEpochSecond)
case Right(xprv: DecryptedExtPrivKey) =>
fail(s"Parsed unexpected type of seed $xprv")
case Left(err) => fail(err.toString)
}
}
it must "change the password of an unencrypted mnemonic" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
val mnemonicCode = CryptoGenerators.mnemonicCode.sampleSome
val writtenMnemonic = DecryptedMnemonic(mnemonicCode, TimeUtil.now)
val seedPath = getSeedPath(walletConf)
WalletStorage.writeSeedToDisk(seedPath, writtenMnemonic)
assert(walletConf.kmConf.seedExists())
WalletStorage.changeAesPassword(seedPath = seedPath,
oldPasswordOpt = None,
newPasswordOpt = badPassphrase)
val read =
WalletStorage.decryptSeedFromDisk(seedPath, badPassphrase)
read match {
case Right(readMnemonic: DecryptedMnemonic) =>
assert(writtenMnemonic.mnemonicCode == readMnemonic.mnemonicCode)
// Need to compare using getEpochSecond because when reading an epoch second
// it will not include the milliseconds that writtenMnemonic will have
assert(
writtenMnemonic.creationTime.getEpochSecond == readMnemonic.creationTime.getEpochSecond)
case Right(xprv: DecryptedExtPrivKey) =>
fail(s"Parsed unexpected type of seed $xprv")
case Left(err) => fail(err.toString)
}
}
it must "remove the password from an encrypted mnemonic" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
val writtenMnemonic = getAndWriteMnemonic(walletConf)
assert(walletConf.kmConf.seedExists())
val seedPath = getSeedPath(walletConf)
WalletStorage.changeAesPassword(seedPath = seedPath,
oldPasswordOpt = passphrase,
newPasswordOpt = None)
val read =
WalletStorage.decryptSeedFromDisk(seedPath, None)
read match {
case Right(readMnemonic: DecryptedMnemonic) =>
assert(writtenMnemonic.mnemonicCode == readMnemonic.mnemonicCode)
// Need to compare using getEpochSecond because when reading an epoch second
// it will not include the milliseconds that writtenMnemonic will have
assert(
writtenMnemonic.creationTime.getEpochSecond == readMnemonic.creationTime.getEpochSecond)
case Right(xprv: DecryptedExtPrivKey) =>
fail(s"Parsed unexpected type of seed $xprv")
case Left(err) => fail(err.toString)
}
}
it must "fail to change the aes password when given the wrong password" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
getAndWriteMnemonic(walletConf)
assert(walletConf.kmConf.seedExists())
val seedPath = getSeedPath(walletConf)
assertThrows[RuntimeException](
WalletStorage.changeAesPassword(seedPath = seedPath,
oldPasswordOpt = badPassphrase,
newPasswordOpt = badPassphrase))
}
it must "fail to change the aes password when given no password" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
getAndWriteMnemonic(walletConf)
assert(walletConf.kmConf.seedExists())
val seedPath = getSeedPath(walletConf)
assertThrows[RuntimeException](
WalletStorage.changeAesPassword(seedPath = seedPath,
oldPasswordOpt = None,
newPasswordOpt = badPassphrase))
}
it must "fail to set the aes password when given an oldPassword" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
val mnemonicCode = CryptoGenerators.mnemonicCode.sampleSome
val writtenMnemonic = DecryptedMnemonic(mnemonicCode, TimeUtil.now)
val seedPath = getSeedPath(walletConf)
WalletStorage.writeSeedToDisk(seedPath, writtenMnemonic)
assert(walletConf.kmConf.seedExists())
assertThrows[RuntimeException](
WalletStorage.changeAesPassword(seedPath = seedPath,
oldPasswordOpt = passphrase,
newPasswordOpt = badPassphrase))
}
it must "read an encrypted mnemonic without a creation time" in {
walletConf =>
val badJson =
"""
| {
| "iv":"d2aeeda5ab83d43bb0b8fe6416b12009",
| "cipherText": "003ad9acd6c3559911d7e2446dc329c869266844fda949d69fce591205ab7a32ddb0aa614b1be5963ecc5b784bb0c1454d5d757b71584d5d990ecadc3d4414b87df50ffc46a54c912f258d5ab094bbeb49f92ef02ab60c92a52b3f205ce91943dc6c21b15bfbc635c17b049a8eec4b0a341c48ea163d5384ebbd69c79ff175823e8fbb0849e5a223e243c81c7f7c5bca62a11b7396",
| "salt":"db3a6d3c88f430bf44f4a834d85255ad6b52c187c05e95fac3b427b094298028"
| }
""".stripMargin
val seedPath = getSeedPath(walletConf)
Files.createDirectories(seedPath.getParent)
Files.write(seedPath, badJson.getBytes())
val read =
WalletStorage.decryptSeedFromDisk(seedPath,
Some(BIP39KeyManager.badPassphrase))
read match {
case Right(readMnemonic) =>
assert(
readMnemonic.creationTime.getEpochSecond == WalletStorage.FIRST_BITCOIN_S_WALLET_TIME)
case Left(err) => fail(err.toString)
}
}
it must "read an unencrypted mnemonic without a creation time" in {
walletConf =>
val badJson =
"""
| {
| "mnemonicSeed":["stage","boring","net","gather","radar","radio","arrest","eye","ask","risk","girl","country"]
| }
""".stripMargin
val seedPath = getSeedPath(walletConf)
Files.createDirectories(seedPath.getParent)
Files.write(seedPath, badJson.getBytes())
val read = WalletStorage.decryptSeedFromDisk(seedPath, None)
read match {
case Right(readMnemonic) =>
assert(
readMnemonic.creationTime.getEpochSecond == WalletStorage.FIRST_BITCOIN_S_WALLET_TIME)
case Left(err) => fail(err.toString)
}
}
it must "fail to read an encrypted mnemonic with improperly formatted creation time" in {
walletConf =>
val badJson =
"""
| {
| "iv":"d2aeeda5ab83d43bb0b8fe6416b12009",
| "cipherText": "003ad9acd6c3559911d7e2446dc329c869266844fda949d69fce591205ab7a32ddb0aa614b1be5963ecc5b784bb0c1454d5d757b71584d5d990ecadc3d4414b87df50ffc46a54c912f258d5ab094bbeb49f92ef02ab60c92a52b3f205ce91943dc6c21b15bfbc635c17b049a8eec4b0a341c48ea163d5384ebbd69c79ff175823e8fbb0849e5a223e243c81c7f7c5bca62a11b7396",
| "salt":"db3a6d3c88f430bf44f4a834d85255ad6b52c187c05e95fac3b427b094298028",
| "creationTime":"not a number"
| }
""".stripMargin
val seedPath = getSeedPath(walletConf)
Files.createDirectories(seedPath.getParent)
Files.write(seedPath, badJson.getBytes())
val read =
WalletStorage.decryptSeedFromDisk(seedPath, passphrase)
read match {
case Left(JsonParsingError(_)) => succeed
case res @ (Left(_) | Right(_)) => fail(res.toString)
}
}
it must "fail to read an unencrypted mnemonic with improperly formatted creation time" in {
walletConf =>
val badJson =
"""
| {
| "mnemonicSeed":["stage","boring","net","gather","radar","radio","arrest","eye","ask","risk","girl","country"],
| "creationTime":"not a number"
| }
""".stripMargin
val seedPath = getSeedPath(walletConf)
Files.createDirectories(seedPath.getParent)
Files.write(seedPath, badJson.getBytes())
val read =
WalletStorage.decryptSeedFromDisk(seedPath, None)
read match {
case Left(JsonParsingError(_)) => succeed
case res @ (Left(_) | Right(_)) => fail(res.toString)
}
}
it must "fail to read an encrypted mnemonic with bad aes password" in {
walletConf =>
val _ = getAndWriteMnemonic(walletConf)
val seedPath = getSeedPath(walletConf)
val read = WalletStorage.decryptSeedFromDisk(seedPath, badPassphrase)
read match {
case Right(_) =>
fail("Wrote and read with different passwords")
case Left(DecryptionError) => succeed
case Left(err) => fail(err.toString)
}
}
it must "fail to read an encrypted mnemonic that has bad JSON in it" in {
walletConf =>
val badJson =
"""
| {
| "iv":"ba7722683dad8067df8d069ee04530cc",
| "cipherText":,
| "salt":"2b7e7d718139518070a87fbbda03ea33cdcda83b555020e9344774e6e7d08af2"
| }
""".stripMargin
val seedPath = getSeedPath(walletConf)
Files.createDirectories(seedPath.getParent)
Files.write(seedPath, badJson.getBytes())
val read =
WalletStorage.decryptSeedFromDisk(seedPath, passphrase)
read match {
case Left(JsonParsingError(_)) => succeed
case res @ (Left(_) | Right(_)) => fail(res.toString)
}
}
it must "fail to read an unencrypted mnemonic that has bad JSON in it" in {
walletConf =>
val badJson =
"""
| {
| "mnemonicSeed":,
| }
""".stripMargin
val seedPath = getSeedPath(walletConf)
Files.createDirectories(seedPath.getParent)
Files.write(seedPath, badJson.getBytes())
val read =
WalletStorage.decryptSeedFromDisk(seedPath, None)
read match {
case Left(JsonParsingError(_)) => succeed
case res @ (Left(_) | Right(_)) => fail(res.toString)
}
}
it must "fail to read an encrypted mnemonic that has missing a JSON field" in {
walletConf =>
val badJson =
"""
| {
| "iv":"ba7722683dad8067df8d069ee04530cc",
| "salt":"2b7e7d718139518070a87fbbda03ea33cdcda83b555020e9344774e6e7d08af2"
| }
""".stripMargin
val seedPath = getSeedPath(walletConf)
Files.createDirectories(seedPath.getParent)
Files.write(seedPath, badJson.getBytes())
val read =
WalletStorage.decryptSeedFromDisk(seedPath, passphrase)
read match {
case Left(JsonParsingError(_)) => succeed
case res @ (Left(_) | Right(_)) => fail(res.toString)
}
}
it must "fail to read an unencrypted mnemonic that has missing a JSON field" in {
walletConf =>
val badJson =
"""
| {
| "creationTime":1601917137
| }
""".stripMargin
val seedPath = getSeedPath(walletConf)
Files.createDirectories(seedPath.getParent)
Files.write(seedPath, badJson.getBytes())
val read = WalletStorage.decryptSeedFromDisk(seedPath, None)
read match {
case Left(JsonParsingError(_)) => succeed
case res @ (Left(_) | Right(_)) => fail(res.toString)
}
}
it must "fail to read an encrypted mnemonic not in hex" in { walletConf =>
val badJson =
"""
| {
| "iv":"ba7722683dad8067df8d069ee04530cc",
| "cipherText": "my name is jeff",
| "salt":"2b7e7d718139518070a87fbbda03ea33cdcda83b555020e9344774e6e7d08af2"
| }
""".stripMargin
val seedPath = getSeedPath(walletConf)
Files.createDirectories(seedPath.getParent)
Files.write(seedPath, badJson.getBytes())
val read =
WalletStorage.decryptSeedFromDisk(seedPath, passphrase)
read match {
case Left(JsonParsingError(_)) => succeed
case res @ (Left(_) | Right(_)) => fail(res.toString)
}
}
it must "fail to read an unencrypted xprv with a password" in { walletConf =>
val badJson =
"""
| {
| "xprv":"xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7",
| "creationTime":1601917137
| }
""".stripMargin
val seedPath = getSeedPath(walletConf)
Files.createDirectories(seedPath.getParent)
Files.write(seedPath, badJson.getBytes())
val read =
WalletStorage.decryptSeedFromDisk(seedPath, passphrase)
read match {
case Left(DecryptionError) => succeed
case res @ (Left(_) | Right(_)) => fail(res.toString)
}
}
it must "fail to read an unencrypted xprv with a improperly formatted xprv" in {
walletConf =>
val badJson =
"""
| {
| "xprv":"BROKENxprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7",
| "creationTime":1601917137
| }
""".stripMargin
val seedPath = getSeedPath(walletConf)
Files.createDirectories(seedPath.getParent)
Files.write(seedPath, badJson.getBytes())
val read =
WalletStorage.decryptSeedFromDisk(seedPath, None)
read match {
case Left(JsonParsingError(_)) => succeed
case res @ (Left(_) | Right(_)) => fail(res.toString)
}
}
it must "fail to read an unencrypted xprv with a improperly formatted creation time" in {
walletConf =>
val badJson =
"""
| {
| "xprv":"xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7",
| "creationTime":
| }
""".stripMargin
val seedPath = getSeedPath(walletConf)
Files.createDirectories(seedPath.getParent)
Files.write(seedPath, badJson.getBytes())
val read =
WalletStorage.decryptSeedFromDisk(seedPath, None)
read match {
case Left(JsonParsingError(_)) => succeed
case res @ (Left(_) | Right(_)) => fail(res.toString)
}
}
it must "fail to read an unencrypted seed that doesn't exist" in {
walletConf =>
require(!walletConf.kmConf.seedExists())
val seedPath = getSeedPath(walletConf)
val read =
WalletStorage.decryptSeedFromDisk(seedPath, None)
read match {
case Left(NotFoundError) => succeed
case res @ (Left(_) | Right(_)) => fail(res.toString)
}
}
it must "throw an exception if we attempt to overwrite an existing seed" in {
walletConf =>
assert(!walletConf.kmConf.seedExists())
val _ = getAndWriteMnemonic(walletConf)
// should have been written by now
assert(walletConf.kmConf.seedExists())
assertThrows[RuntimeException] {
//attempt to write another mnemonic
getAndWriteMnemonic(walletConf)
}
}
it must "write and read an encrypted ExtPrivateKey from disk" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
val password = getBIP39PasswordOpt().getOrElse(BIP39Seed.EMPTY_PASSWORD)
val keyVersion = SegWitMainNetPriv
val writtenMnemonic = getAndWriteMnemonic(walletConf)
val expected = BIP39Seed
.fromMnemonic(mnemonic = writtenMnemonic.mnemonicCode,
password = password)
.toExtPrivateKey(keyVersion)
.toHardened
// should have been written by now
assert(walletConf.kmConf.seedExists())
val seedPath = getSeedPath(walletConf)
val read =
WalletStorage.getPrivateKeyFromDisk(seedPath,
keyVersion,
passphrase,
Some(password))
assert(read == expected)
}
it must "write and read an unencrypted ExtPrivateKey from disk" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
val mnemonicCode = CryptoGenerators.mnemonicCode.sampleSome
val writtenMnemonic = DecryptedMnemonic(mnemonicCode, TimeUtil.now)
val seedPath = getSeedPath(walletConf)
WalletStorage.writeSeedToDisk(seedPath, writtenMnemonic)
val password = getBIP39PasswordOpt().getOrElse(BIP39Seed.EMPTY_PASSWORD)
val keyVersion = SegWitMainNetPriv
val expected = BIP39Seed
.fromMnemonic(mnemonic = writtenMnemonic.mnemonicCode,
password = password)
.toExtPrivateKey(keyVersion)
.toHardened
// should have been written by now
assert(walletConf.kmConf.seedExists())
val read =
WalletStorage.getPrivateKeyFromDisk(seedPath,
keyVersion,
None,
Some(password))
assert(read == expected)
}
it must "fail to read unencrypted ExtPrivateKey from disk that doesn't exist" in {
walletConf: WalletAppConfig =>
assert(!walletConf.kmConf.seedExists())
val seedPath = getSeedPath(walletConf)
val keyVersion = SegWitMainNetPriv
assertThrows[RuntimeException](
WalletStorage.getPrivateKeyFromDisk(seedPath, keyVersion, None, None))
}
it must "safely create 2 seeds in the seed folder" in {
walletConfA: WalletAppConfig =>
assert(!walletConfA.kmConf.seedExists())
getAndWriteMnemonic(walletConfA)
assert(walletConfA.kmConf.seedExists())
val otherWalletName = StringGenerators.genNonEmptyString
.suchThat(_ != walletConfA.walletNameOpt.getOrElse(""))
.sampleSome
val walletConfB = walletConfA.withOverrides(
ConfigFactory.parseString(
s"bitcoin-s.wallet.walletName = $otherWalletName"))
assert(!walletConfB.kmConf.seedExists())
getAndWriteXprv(walletConfB)
assert(walletConfB.kmConf.seedExists())
val expectedParentDir =
walletConfA.baseDatadir.resolve(WalletStorage.SEED_FOLDER_NAME)
assert(walletConfA.seedPath.getParent == expectedParentDir)
assert(walletConfB.seedPath.getParent == expectedParentDir)
assert(walletConfA.seedPath.getParent == walletConfB.seedPath.getParent)
assert(walletConfA.seedPath != walletConfB.seedPath)
val mnemonicAE =
WalletStorage.decryptSeedFromDisk(walletConfA.seedPath, passphrase)
val mnemonicBE =
WalletStorage.decryptSeedFromDisk(walletConfB.seedPath, passphrase)
(mnemonicAE, mnemonicBE) match {
case (Left(_), Left(_)) | (Right(_), Left(_)) | (Left(_), Right(_)) =>
fail() // if any of them error, then fail
case (Right(mnemonicA), Right(mnemonicB)) =>
assert(mnemonicA != mnemonicB)
}
}
}
| bitcoin-s/bitcoin-s | key-manager-test/src/test/scala/org/bitcoins/keymanager/WalletStorageTest.scala | Scala | mit | 25,969 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.benchmark
import java.io.File
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.util.Benchmark
/**
* Benchmark to measure TPCDS query performance.
* To run this:
* spark-submit --class <this class> --jars <spark sql test jar>
*/
object TPCDSQueryBenchmark {
val conf =
new SparkConf()
.setMaster("local[1]")
.setAppName("test-sql-context")
.set("spark.sql.parquet.compression.codec", "snappy")
.set("spark.sql.shuffle.partitions", "4")
.set("spark.driver.memory", "3g")
.set("spark.executor.memory", "3g")
.set("spark.sql.autoBroadcastJoinThreshold", (20 * 1024 * 1024).toString)
.set("spark.sql.crossJoin.enabled", "true")
val spark = SparkSession.builder.config(conf).getOrCreate()
val tables = Seq("catalog_page", "catalog_returns", "customer", "customer_address",
"customer_demographics", "date_dim", "household_demographics", "inventory", "item",
"promotion", "store", "store_returns", "catalog_sales", "web_sales", "store_sales",
"web_returns", "web_site", "reason", "call_center", "warehouse", "ship_mode", "income_band",
"time_dim", "web_page")
def setupTables(dataLocation: String): Map[String, Long] = {
tables.map { tableName =>
spark.read.parquet(s"$dataLocation/$tableName").createOrReplaceTempView(tableName)
tableName -> spark.table(tableName).count()
}.toMap
}
def tpcdsAll(dataLocation: String, queries: Seq[String]): Unit = {
require(dataLocation.nonEmpty,
"please modify the value of dataLocation to point to your local TPCDS data")
val tableSizes = setupTables(dataLocation)
queries.foreach { name =>
val queryString = fileToString(new File(Thread.currentThread().getContextClassLoader
.getResource(s"tpcds/$name.sql").getFile))
// This is an indirect hack to estimate the size of each query's input by traversing the
// logical plan and adding up the sizes of all tables that appear in the plan. Note that this
// currently doesn't take WITH subqueries into account which might lead to fairly inaccurate
// per-row processing time for those cases.
val queryRelations = scala.collection.mutable.HashSet[String]()
spark.sql(queryString).queryExecution.logical.map {
case UnresolvedRelation(t: TableIdentifier) =>
queryRelations.add(t.table)
case lp: LogicalPlan =>
lp.expressions.foreach { _ foreach {
case subquery: SubqueryExpression =>
subquery.plan.foreach {
case UnresolvedRelation(t: TableIdentifier) =>
queryRelations.add(t.table)
case _ =>
}
case _ =>
}
}
case _ =>
}
val numRows = queryRelations.map(tableSizes.getOrElse(_, 0L)).sum
val benchmark = new Benchmark(s"TPCDS Snappy", numRows, 5)
benchmark.addCase(name) { i =>
spark.sql(queryString).collect()
}
benchmark.run()
}
}
def main(args: Array[String]): Unit = {
// List of all TPC-DS queries
val tpcdsQueries = Seq(
"q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11",
"q12", "q13", "q14a", "q14b", "q15", "q16", "q17", "q18", "q19", "q20",
"q21", "q22", "q23a", "q23b", "q24a", "q24b", "q25", "q26", "q27", "q28", "q29", "q30",
"q31", "q32", "q33", "q34", "q35", "q36", "q37", "q38", "q39a", "q39b", "q40",
"q41", "q42", "q43", "q44", "q45", "q46", "q47", "q48", "q49", "q50",
"q51", "q52", "q53", "q54", "q55", "q56", "q57", "q58", "q59", "q60",
"q61", "q62", "q63", "q64", "q65", "q66", "q67", "q68", "q69", "q70",
"q71", "q72", "q73", "q74", "q75", "q76", "q77", "q78", "q79", "q80",
"q81", "q82", "q83", "q84", "q85", "q86", "q87", "q88", "q89", "q90",
"q91", "q92", "q93", "q94", "q95", "q96", "q97", "q98", "q99")
// In order to run this benchmark, please follow the instructions at
// https://github.com/databricks/spark-sql-perf/blob/master/README.md to generate the TPCDS data
// locally (preferably with a scale factor of 5 for benchmarking). Thereafter, the value of
// dataLocation below needs to be set to the location where the generated data is stored.
val dataLocation = ""
tpcdsAll(dataLocation, queries = tpcdsQueries)
}
}
| SHASHANKB/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala | Scala | apache-2.0 | 5,519 |
package test
import cz.kamenitxan.jakon.core.configuration.Settings
import org.openqa.selenium.htmlunit.HtmlUnitDriver
import org.openqa.selenium.{By, WebDriver, WebElement}
import org.scalatest.funsuite.FixtureAnyFunSuite
import org.scalatest.{Assertion, Outcome}
import scala.collection.mutable
import scala.jdk.CollectionConverters._
class TestBase extends FixtureAnyFunSuite {
var host = ""
var adminHost = "/admin/"
case class FixtureParam(driver: WebDriver)
def withFixture(test: OneArgTest): Outcome = {
host = "http://localhost:" + Settings.getPort
adminHost = host + "/admin/"
val driver = new HtmlUnitDriver()
val fixture = FixtureParam(driver)
try {
withFixture(test.toNoArgTest(fixture)) // "loan" the fixture to the test
} finally {
driver.close()
}
}
protected def checkPageLoad(selector: String = ".navbar-brand")(implicit driver: WebDriver): Boolean = {
driver.findElements(By.cssSelector(selector)).get(0) != null
}
protected def checkSiteMessage(msgText: String)(implicit driver: WebDriver): Assertion = {
val msgs = driver.findElements(By.cssSelector("#jakon_messages .alert")).asScala.map(e => e.getText)
assert(msgs.contains(msgText))
}
protected def findElements(selector: String)(implicit driver: WebDriver): mutable.Buffer[WebElement] = {
driver.findElements(By.cssSelector(selector)).asScala
}
protected def getAdminTableRows()(implicit driver: WebDriver): mutable.Buffer[WebElement] = {
findElements("#dataTables-example tbody tr")
}
protected def assertNotEmpty(v: String): Assertion = {
assert(v != null)
assert(v.nonEmpty)
}
}
| kamenitxan/Jakon | modules/backend/src/test/scala/test/TestBase.scala | Scala | bsd-3-clause | 1,618 |
package org.judal.examples.scala.jdbc
import java.math.BigDecimal
import org.junit.Test
import org.junit.Assert.assertEquals
import org.scalatest.Suite
import com.knowgate.io.IOUtils
import org.judal.Using._
import org.judal.storage.EngineFactory
import org.judal.storage.relational.RelationalView
import org.judal.storage.table.impl.SingleLongColumnRecord
import org.judal.jdbc.JDBCRelationalDataSource
import org.judal.examples.Resources
import org.judal.examples.scala.model.Course
import org.judal.examples.scala.model.Student
import org.judal.examples.scala.model.StudentCourse
import collection.JavaConverters._
/**
* Insert data from a comma delimited file into the database
* using Default Relational DataSource kept at StorageContext
*/
class E10_WriteCSVDataIntoTheDatabase extends Suite {
@Test def demo() = {
E10_WriteCSVDataIntoTheDatabase.setUp()
E10_WriteCSVDataIntoTheDatabase.insertStudentsIntoDatabase()
E10_WriteCSVDataIntoTheDatabase.insertCoursesIntoDatabase()
E10_WriteCSVDataIntoTheDatabase.assignStudentsToCoursesIntoDatabase()
E10_WriteCSVDataIntoTheDatabase.tearDown()
}
}
object E10_WriteCSVDataIntoTheDatabase {
def insertStudentsIntoDatabase() = {
val lines = IOUtils.readLines(Resources.getResourceAsStream("students.csv")).asScala
// Write new students into the database
for (line <- lines) {
val fields = line.split(";")
val s = new Student()
s.setId(fields(0).toInt)
s.setFirstName(fields(1))
s.setLastName(fields(2))
s.setDateOfBirth(fields(3))
// This call use EngineFactory.DefaultThreadDataSource under the hood
s.store()
}
assertEquals(lines.size, countRows(Student.TABLE_NAME))
}
def insertCoursesIntoDatabase() = {
val lines = IOUtils.readLines(Resources.getResourceAsStream("courses.csv")).asScala
// Write new students into the database
for (line <- lines) {
val fields = line.split(";")
val c = new Course()
c.setId(fields(0).toInt)
c.setCode(fields(1))
c.setCourseName(fields(2))
c.setStartDate(fields(3))
c.setEndDate(fields(4))
c.setPrice(new BigDecimal(fields(5)))
c.setDescription(fields(6))
// This call use EngineFactory.DefaultThreadDataSource under the hood
c.store()
}
assertEquals(lines.size, countRows(Course.TABLE_NAME))
}
def assignStudentsToCoursesIntoDatabase() = {
val lines = IOUtils.readLines(Resources.getResourceAsStream("studentcourse.csv")).asScala
// Write new students into the database
for (line <- lines) {
val fields = line.split(",")
val sc = new StudentCourse()
sc.setStudentId(fields(0).toInt)
sc.setCourseId(fields(1).toInt)
// This call use EngineFactory.DefaultThreadDataSource under the hood
sc.store()
}
assertEquals(lines.size, countRows(StudentCourse.TABLE_NAME))
}
def countRows(TableName: String) = {
val count = new SingleLongColumnRecord(TableName, "c")
val dts = EngineFactory.getDefaultRelationalDataSource
var courses : RelationalView = null
var retval : Int = 0
using (courses) {
courses = dts.openRelationalView(count)
retval = courses.count(null).intValue
}
retval
}
def setUp() = {
val dataSource = E01_CreateDefaultRelationalDataSource.create
E04_CreateTablesFromJDOXML.createSchemaObjects(dataSource)
dataSource.execute("CREATE SEQUENCE seq_student AS BIGINT START WITH 1 INCREMENT BY 1")
dataSource.execute("CREATE SEQUENCE seq_course AS BIGINT START WITH 1 INCREMENT BY 1")
dataSource.asInstanceOf[JDBCRelationalDataSource]
}
def tearDown() = {
E04_CreateTablesFromJDOXML.dropSchemaObjects(EngineFactory.getDefaultRelationalDataSource)
E01_CreateDefaultRelationalDataSource.close
}
} | sergiomt/judal | aexample/src/main/scala/org/judal/examples/scala/jdbc/E10_WriteCSVDataIntoTheDatabase.scala | Scala | apache-2.0 | 3,821 |
package com.msilb.scalanda.streamapi
import akka.actor.{ActorRef, FSM, Props}
import akka.io.IO
import com.msilb.scalanda.common.Environment
import com.msilb.scalanda.common.Environment.SandBox
import com.msilb.scalanda.common.model.Transaction
import com.msilb.scalanda.common.model.TransactionJsonProtocol._
import com.msilb.scalanda.streamapi.StreamingConnector._
import com.msilb.scalanda.streamapi.model.HeartbeatJsonProtocol._
import com.msilb.scalanda.streamapi.model.TickJsonProtocol._
import com.msilb.scalanda.streamapi.model.{Heartbeat, Tick}
import spray.can.Http
import spray.can.Http.HostConnectorInfo
import spray.http.Uri.Query
import spray.http._
import spray.httpx.RequestBuilding._
import spray.json._
import scala.concurrent.duration._
object StreamingConnector {
def props = Props(classOf[StreamingConnector])
case class Connect(env: Environment = SandBox, authToken: Option[String] = None)
case class StartRatesStreaming(accountId: Int, instruments: Set[String], sessionId: Option[String] = None)
case class StartEventsStreaming(accountIds: Option[Set[Int]] = None)
case class AddListeners(listeners: Set[ActorRef])
case class RemoveListeners(listeners: Set[ActorRef])
case object ConnectionEstablished
sealed trait State
case object Disconnected extends State
case object Connecting extends State
case object Connected extends State
sealed trait Data
case object Empty extends Data
case class CurrentData(requesterInfo: ActorRef, hostConnector: ActorRef, listeners: Set[ActorRef]) extends Data
}
class StreamingConnector extends FSM[State, Data] {
import context.system
startWith(Disconnected, Empty)
when(Disconnected) {
case Event(Connect(env, authTokenOpt), _) =>
IO(Http) ! Http.HostConnectorSetup(
host = env.streamApiUrl(),
port = if (env.authenticationRequired()) 443 else 80,
sslEncryption = env.authenticationRequired(),
defaultHeaders = authTokenOpt.map(authToken => List(HttpHeaders.Authorization(OAuth2BearerToken(authToken)))).getOrElse(Nil)
)
goto(Connecting) forMax 5.seconds using CurrentData(sender(), system.deadLetters, Set.empty)
}
when(Connecting) {
case Event(HostConnectorInfo(hostConnector, _), data: CurrentData) =>
goto(Connected) using data.copy(hostConnector = hostConnector)
}
onTransition {
case Connecting -> Connected =>
(stateData: @unchecked) match {
case CurrentData(requester, _, _) => requester ! ConnectionEstablished
}
}
when(Connected) {
case Event(StartRatesStreaming(accountId, instruments, sessionIdOpt), CurrentData(_, connector, _)) =>
val uri = Uri("/v1/prices").withQuery(
Query.asBodyData(
Seq(
Some(("accountId", accountId.toString)),
Some(("instruments", instruments.mkString(","))),
sessionIdOpt.map(s => ("sessionId", s))
).flatten
)
)
connector ! Get(uri)
stay()
case Event(StartEventsStreaming(accountIdsOpt), CurrentData(_, connector, _)) =>
val uri = Uri("/v1/events").withQuery(
Query.asBodyData(
Seq(
accountIdsOpt.map(accountIds => ("accountIds", accountIds.mkString(",")))
).flatten
)
)
connector ! Get(uri)
stay()
case Event(e: ChunkedResponseStart, _) =>
log.debug("Received ChunkedResponseStart: {}", e)
stay()
case Event(MessageChunk(data, _), CurrentData(_, _, refs)) =>
data.asString.lines.foreach { line =>
line.parseJson.asJsObject.fields.head match {
case ("transaction", obj) =>
val t = obj.convertTo[Transaction]
refs.foreach(_ ! t)
log.info("Received new transaction: {}", t)
case ("tick", obj) =>
val t = obj.convertTo[Tick]
refs.foreach(_ ! t)
log.info("Received new price tick: {}", t)
case ("heartbeat", obj) =>
val h = obj.convertTo[Heartbeat]
log.debug("Received heartbeat: {}", h)
case (unknown, _) =>
log.warning("Unknown event received: {}", unknown)
}
}
stay()
case Event(e: ChunkedMessageEnd, _) =>
log.debug("Received ChunkedMessageEnd: {}", e)
stay()
}
whenUnhandled {
case Event(AddListeners(refs), currentData: CurrentData) =>
val newListeners = currentData.listeners ++ refs
stay() using currentData.copy(listeners = newListeners) replying newListeners
case Event(RemoveListeners(refs), currentData: CurrentData) =>
val newListeners = currentData.listeners -- refs
stay() using currentData.copy(listeners = newListeners) replying newListeners
}
}
| msilb/scalanda | src/main/scala/com/msilb/scalanda/streamapi/StreamingConnector.scala | Scala | mit | 4,751 |
package com.automatatutor.model
import net.liftweb.mapper.MappedString
import net.liftweb.mapper.LongKeyedMapper
import net.liftweb.mapper.LongKeyedMetaMapper
import net.liftweb.mapper.MappedLongForeignKey
import net.liftweb.mapper.IdPK
import net.liftweb.mapper.By
import net.liftweb.mapper.MappedText
import scala.xml.XML
import scala.xml.NodeSeq
class PumpingLemmaProblem extends LongKeyedMapper[PumpingLemmaProblem] with IdPK with SpecificProblem[PumpingLemmaProblem] {
def getSingleton = PumpingLemmaProblem
object problemId extends MappedLongForeignKey(this, Problem)
object language extends MappedText(this)
object constraint extends MappedText(this)
object alphabet extends MappedText(this)
object pumpingString extends MappedText(this)
def getAlphabet : Seq[String] = this.alphabet.is.split(" ")
override def copy(): PumpingLemmaProblem = {
val retVal = new PumpingLemmaProblem
retVal.problemId(this.problemId.get)
retVal.language(this.language.get)
retVal.constraint(this.constraint.get)
retVal.alphabet(this.alphabet.get)
retVal.pumpingString(this.pumpingString.get)
return retVal
}
override def setGeneralProblem(newProblem: Problem) = this.problemId(newProblem)
}
object PumpingLemmaProblem extends PumpingLemmaProblem with LongKeyedMetaMapper[PumpingLemmaProblem] {
def findByGeneralProblem(generalProblem : Problem) : PumpingLemmaProblem =
find(By(PumpingLemmaProblem.problemId, generalProblem)) openOrThrowException("Must only be called if we are sure that generalProblem is a PumpingLemmaProblem")
} | AutomataTutor/automatatutor-frontend | src/main/scala/com/automatatutor/model/PumpingLemmaProblem.scala | Scala | mit | 1,561 |
package at.forsyte.apalache.tla.lir
import at.forsyte.apalache.tla.lir.values.{TlaBool, TlaInt}
// [email protected]: why are TestingPredefs located in src/main?
// TODO: move TestingPredefs into src/test
trait TestingPredefs {
implicit def name( p_s : String ) : NameEx = NameEx( p_s )
implicit def value( p_n : Int ) : ValEx = ValEx( TlaInt( p_n ) )
implicit def sfp( p_s : String ) : SimpleFormalParam = SimpleFormalParam( p_s )
implicit def ofp( p_pair : (String, Int) ) : OperFormalParam =
OperFormalParam( p_pair._1, p_pair._2 )
def n_a : NameEx = "a"
def n_b : NameEx = "b"
def n_c : NameEx = "c"
def n_d : NameEx = "d"
def n_e : NameEx = "e"
def n_f : NameEx = "f"
def n_g : NameEx = "g"
def n_p : NameEx = "p"
def n_q : NameEx = "q"
def n_r : NameEx = "r"
def n_s : NameEx = "s"
def n_t : NameEx = "t"
def n_A : NameEx = "A"
def n_B : NameEx = "B"
def n_S : NameEx = "S"
def n_T : NameEx = "T"
def n_P : NameEx = "P"
def n_Q : NameEx = "Q"
def n_x : NameEx = "x"
def n_y : NameEx = "y"
def n_z : NameEx = "z"
def trueEx : ValEx = ValEx(TlaBool(true))
def falseEx : ValEx = ValEx(TlaBool(false))
def arr : Array[TlaEx] = Array( n_a, n_b, n_c, n_d, n_e, n_f, n_g )
def arr_s : Seq[TlaEx] = arr.toSeq
def seq( n : Int, nSkip : Int = 0 ) : Seq[TlaEx] = arr.slice( nSkip, n + nSkip ).toSeq ++ Seq.fill( n - arr.length )( n_x )
def x_in_S : OperEx = Builder.in( "x", "S" )
def printlns( p_ss : String* )
( implicit p_surround : Boolean = true ) : Unit =
println( (if ( p_surround ) p_ss.map( "\\"%s\\"".format( _ ) ) else p_ss).mkString( "\\n" ) )
def printsep() : Unit = println( "\\n%s\\n".format( Seq.fill( 20 )( "-" ).mkString ) )
def noOp() : Unit = {}
def prePostTest( f: => Unit, pre : () => Unit = noOp, post: () => Unit = noOp ) : Unit = {
pre()
f
post()
}
}
| konnov/apalache | tlair/src/main/scala/at/forsyte/apalache/tla/lir/TestingPredefs.scala | Scala | apache-2.0 | 1,891 |
package com.clarifi.reporting
package ermine.session
import java.util.concurrent._
import com.clarifi.reporting.ermine.{ Result, Success, Failure, Filtered, Document, Death }
import com.clarifi.reporting.ermine.Document.{ text, vsep }
import scalaz.concurrent.{ Promise, Strategy }
import scalaz.concurrent.Strategy._
import java.util.Date
import scalaz.Scalaz._
case class SessionTask[A](env: SessionEnv, promise: Promise[Either[Death,A]])
object SessionTask {
def fork[A](p: (SessionEnv, Supply) => A)(implicit s: SessionEnv, vs: Supply): SessionTask[A] = {
val sp = s.copy
val vsp = vs.split
SessionTask(
sp,
Promise(
try { Right(p(sp,vsp)) }
catch { case r : Death => Left(r) }
)
)
}
// @throws Death
def join[A](task: SessionTask[A])(implicit s: SessionEnv): A =
task.promise.get match {
case Left(e) => throw Death(e.error, e)
case Right(a) =>
s += task.env
a
}
def joins[A](tasks: List[SessionTask[A]])(implicit s: SessionEnv): List[A] = {
val (failures, successes) = tasks.map(t => (t.promise.get, t.env)).partition { _._1.isLeft }
if (failures.isEmpty)
successes.foldRight(List[A]()) {
case ((Right(x), sp), xs) =>
s += sp
x :: xs
case _ => sys.error("joins: the impossible happened")
}
else {
val failDocs = failures.collect {
case (Left(Death(e,_)), _) => e
}
throw Death(vsep(failDocs))
}
}
}
| ermine-language/ermine-legacy | src/main/scala/com/clarifi/reporting/ermine/session/SessionTask.scala | Scala | bsd-2-clause | 1,500 |
package cz.vse.easyminer.rest
import akka.actor.ActorRefFactory
import spray.http.ContentType
import spray.http.HttpEntity
import spray.http.MediaTypes
import spray.http.StatusCodes
import spray.http.Uri
import spray.routing.Directives
import MediaTypes._
import spray.routing.RoutingSettings
import spray.routing.directives.ContentTypeResolver
trait EndpointDoc extends Directives {
def attachDoc(swaggerJson: Uri => String)(implicit settings: RoutingSettings, resolver: ContentTypeResolver, refFactory: ActorRefFactory) = pathSingleSlash {
getFromFile("webapp/index.html")
} ~ pathEnd {
requestUri { uri =>
redirect(uri.withPath(uri.path / ""), StatusCodes.PermanentRedirect)
}
} ~ path("swagger-doc.json") {
requestUri { uri =>
complete(HttpEntity.apply(ContentType(`application/json`), swaggerJson(uri)))
}
} ~ getFromDirectory("webapp")
}
| KIZI/EasyMiner-Apriori-R | src/main/scala/cz/vse/easyminer/rest/EndpointDoc.scala | Scala | bsd-3-clause | 888 |
package com.wlangiewicz.brainwallet
class BrainwalletSpec extends UnitSpec {
"Brainwallet" should "generate WIF for \\"abc\\"" in {
val output = Brainwallet.getWIF("abc")
output should be ("5KEQgeL4EwjuEAyPQBoaJYVrbt5kSUsrwXPkjzAQTPiNoUxxeS8")
}
"Brainwallet" should "generate WIF for \\"bitcoin\\"" in {
val output = Brainwallet.getWIF("bitcoin")
output should be ("5JdeQ39z8NUkNVvB37tt74Cu2WSNVj7qb9PdY651UoQnqyCm937")
}
"Brainwallet" should "generate WIF for long phrase" in {
val input = "yard impulse luxury drive today throw farm pepper survey wreck glass federal"
val output = Brainwallet.getWIF(input)
output should be ("5KfrXsycsKx7EH8QCVPxSBmjRvMhjqPcMmrRaSQChN57nR8bv6Q")
}
"Brainwallet" should "generate address for \\"abc\\"" in {
val output = Brainwallet.getAddress("abc")
output should be ("1NEwmNSC7w9nZeASngHCd43Bc5eC2FmXpn")
}
"Brainwallet" should "generate address for long phrase" in {
val input = "yard impulse luxury drive today throw farm pepper survey wreck glass federal"
val output = Brainwallet.getAddress(input)
output should be ("1Cf8wvA9qbHjPxGkkRSRDSZbhprk92Cfwf")
}
}
| wlk/brainwallet-scala | src/test/scala/com/wlangiewicz/brainwallet/BrainwalletSpec.scala | Scala | apache-2.0 | 1,168 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.crossdata.connector.elasticsearch
import com.stratio.crossdata.connector.TableInventory.Table
import com.stratio.crossdata.test.BaseXDTest
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.types.StructType
import org.elasticsearch.hadoop.cfg.ConfigurationOptions
import org.elasticsearch.hadoop.cfg.ConfigurationOptions._
;
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import org.junit.runner.RunWith
import org.mockito.Mockito._
@RunWith(classOf[JUnitRunner])
class DefaultSourceESSpec extends BaseXDTest with MockitoSugar {
"A DefaultSource " should "build a ElasticSearchXDRelation without schema" in {
//Fixture
val defaultDatasource = new DefaultSource()
val sqlContext = mock[SQLContext]
val parameters = Map[String, String] {ConfigurationOptions.ES_RESOURCE -> "index/type"}
//Experimentation
val result = defaultDatasource.createRelation(sqlContext, parameters)
//Expectations
result should not be null
}
it should "build a ElasticSearchXDRelation with schema" in {
//Fixture
val defaultDatasource = new DefaultSource()
val sqlContext = mock[SQLContext]
val schema = mock[StructType]
val parameters = Map[String, String] {ConfigurationOptions.ES_RESOURCE -> "index/type"}
//Experimentation
val result = defaultDatasource.createRelation(sqlContext, parameters, schema)
//Expectations
result should not be null
}
it should "Build a Map with the default opts" in {
val defaultDatasource = new DefaultSource()
val item: Table = mock[Table]
when(item.database).thenReturn(Some("index"))
when(item.tableName).thenReturn("type")
val userOpts: Map[String, String] = Map(ES_NODES -> "localhost")
//Experimentation
val result:Map[String, String] = defaultDatasource.generateConnectorOpts(item, userOpts)
//Expectations
result should not be null
result.get(ES_RESOURCE).get should be ("index/type")
result.get(ES_NODES).get should be ("localhost")
}
}
| darroyocazorla/crossdata | elasticsearch/src/test/scala/com/stratio/crossdata/connector/elasticsearch/DefaultSourceESSpec.scala | Scala | apache-2.0 | 2,678 |
package org.monarchinitiative.dosdp.cli
import better.files._
import org.eclipse.rdf4j.model.vocabulary.DCTERMS
import org.monarchinitiative.dosdp.cli.DOSDPError.{logError, logErrorFail}
import org.monarchinitiative.dosdp.cli.Main.loggingContext
import org.monarchinitiative.dosdp.{DOSDP, Utilities}
import org.phenoscape.scowl._
import org.semanticweb.owlapi.model.{OWLAnnotationProperty, OWLAxiom}
import zio._
import zio.logging._
import zio.blocking.Blocking
object Prototype {
private val DCTTitle: OWLAnnotationProperty = AnnotationProperty(DCTERMS.TITLE.stringValue)
val OboInOwlSource: OWLAnnotationProperty = AnnotationProperty("http://www.geneontology.org/formats/oboInOwl#source")
def run(config: PrototypeConfig): ZIO[ZEnv with Logging, DOSDPError, Unit] = {
log.locally(_.annotate(loggingContext, Map("command" -> "prototype"))) {
val possibleFile = File(config.common.template)
for {
isDir <- ZIO.effect(possibleFile.isDirectory).flatMapError(e => logError(s"Unable to read input at $possibleFile", e))
filenames <- if (isDir) {
ZIO.effect {
possibleFile.list.filter { f =>
f.extension(false, false, true).exists(e => (e == "yaml") || (e == "yml"))
}.map(_.toString).toSet
}.flatMapError(e => logError(s"Couldn't list files in $possibleFile", e))
} else ZIO.succeed(Set(config.common.template))
dosdps <- ZIO.foreach(filenames)(f => Config.inputDOSDPFrom(f))
axioms <- ZIO.foreach(dosdps)(dosdp => axiomsFor(dosdp, config)).map(_.flatten)
_ <- Utilities.saveAxiomsToOntology(axioms, config.common.outfile)
} yield ()
}
}
private def axiomsFor(dosdp: DOSDP, config: PrototypeConfig): ZIO[Blocking with Logging, DOSDPError, Set[OWLAxiom]] =
log.locally(_.annotate(loggingContext, dosdp.pattern_name.map(n => Map("pattern" -> n)).getOrElse(Map.empty))) {
for {
prefixes <- config.common.prefixesMap
ontologyOpt <- config.common.ontologyOpt
iri <- ZIO.fromOption(dosdp.pattern_iri).orElse(logErrorFail("Pattern must have pattern IRI for prototype command"))
fillers = dosdp.vars.getOrElse(Map.empty) ++
dosdp.list_vars.getOrElse(Map.empty) ++
dosdp.data_vars.getOrElse(Map.empty) ++
dosdp.data_list_vars.getOrElse(Map.empty) +
(DOSDP.DefinedClassVariable -> iri)
axioms <- Generate.renderPattern(dosdp, prefixes, fillers, ontologyOpt, true, true, None, false, OboInOwlSource, false, Map.empty)
maybeTitleAxiom = dosdp.pattern_name.map(name => Class(iri) Annotation(DCTTitle, name))
} yield axioms ++ maybeTitleAxiom
}
}
| INCATools/dosdp-tools | src/main/scala/org/monarchinitiative/dosdp/cli/Prototype.scala | Scala | mit | 2,686 |
package us.feliscat.ir.fulltext.indri
import us.feliscat.m17n.MultiLingual
import us.feliscat.text.StringOption
import us.feliscat.util.LibrariesConfig
import scala.collection.mutable
/**
* <pre>
* Created on 2017/01/08.
* </pre>
*
* @author K.Sakamoto
*/
trait Retrieval extends MultiLingual {
protected def selectKnowledgeSource(isKeywordQuery: Boolean): Seq[String]
protected def command(queryList: Seq[String], knowledgeSourceList: Seq[String]): Seq[String] = {
val indices: Seq[String] = knowledgeSourceList map {
knowledgeSource: String =>
"-index=" concat knowledgeSource
}
val queries: Seq[String] = queryList map {
query: String =>
"-query=" concat query
}
(
"IndriRunQuery" ::
"-printDocuments=true" ::
s"-memory=${LibrariesConfig.indriMemory}" ::
"-printQuery=true" ::
s"-count=${LibrariesConfig.indriCount}" :: Nil
) ++ queries ++ indices
}
protected def toIndriResultMap(lines: Iterator[String],
keywordOriginalTextOpt: StringOption,
expansionOnlyList: Seq[String],
indriResultMap: mutable.Map[String, IndriResult]): Map[String, IndriResult]
}
| ktr-skmt/FelisCatusZero-multilingual | libraries/src/main/scala/us/feliscat/ir/fulltext/indri/Retrieval.scala | Scala | apache-2.0 | 1,261 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.