code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package dhg.ccg.tag.learn
import org.junit.Test
import dhg.util._
import org.junit.Assert._
import dhg.ccg.cat._
import dhg.ccg.rule._
import dhg.ccg.prob._
import dhg.ccg.tagdict.TagDictionary
import dhg.ccg.tagdict.SimpleTagDictionary
class CcgHmmInitializationTests {
val STA = cat"<S>"
val S = cat"S".asInstanceOf[AtomCat]
val NP = cat"NP".asInstanceOf[AtomCat]
val N = cat"N".asInstanceOf[AtomCat]
val X = cat"X".asInstanceOf[AtomCat]
val END = cat"<E>"
val A = cat"A".asInstanceOf[AtomCat]
val B = cat"B".asInstanceOf[AtomCat]
val CAN = cat"CAN".asInstanceOf[AtomCat]
val CANT = cat"CANT".asInstanceOf[AtomCat]
@Test
def test_CcgCombinabilityTrInitializer_with_CcgCombinabilityTransitionConditionalLogProbabilityDistribution {
type Word = String
type Tag = Cat
val mockTagdict2 = new TagDictionary[Tag] {
def allWords: Set[Word] = ???
def startWord: Word = ???; def endWord: Word = ???
def excludedTags: Set[Tag] = ???
def reversed: Map[Tag, Set[Word]] = ???
def entries: Map[Word, Set[Tag]] = ???
def knownWordsForTag: Map[Tag, Set[Word]] = ???
def withWords(words: Set[Word]): TagDictionary[Tag] = ???
def withTags(tags: Set[Tag]): TagDictionary[Tag] = ???
def withExcludedTags(tags: Set[Tag]): TagDictionary[Tag] = ???
def apply(w: Word): Set[Tag] = ???
def allTags: Set[Tag] = Set(
N,
S \\ N,
(S \\ N) \\ (S \\ N))
def startTag: Tag = STA
def endTag: Tag = END
}
val mockTagdict1 = new TagDictionary[Tag] {
def allWords: Set[Word] = ???
def startWord: Word = ???; def endWord: Word = ???
def excludedTags: Set[Tag] = ???
def reversed: Map[Tag, Set[Word]] = ???
def entries: Map[Word, Set[Tag]] = ???
def knownWordsForTag: Map[Tag, Set[Word]] = ???
def withWords(words: Set[Word]): TagDictionary[Tag] = ???
def withExcludedTags(tags: Set[Tag]): TagDictionary[Tag] = ???
def apply(w: Word): Set[Tag] = ???
def allTags: Set[Tag] = ???
def endTag: Tag = ???
def startTag: Tag = ???
def withTags(tags: Set[Tag]): TagDictionary[Tag] = {
mockTagdict2
}
}
val mockTrCPD = new ConditionalLogProbabilityDistribution[Cat, Cat] {
override def apply(x: Cat, given: Cat): LogDouble = {
(given, x) match {
case (STA, STA) /* */ => sys.error("<S> -> <S>") // COMB NC
case (STA, N) /* */ => LogDouble(0.70) // X
case (STA, S \\ N) /* */ => LogDouble(0.13) // X
case (STA, (S \\ N) \\ (S \\ N)) => LogDouble(0.12) // X
case (STA, END) /* */ => LogDouble.zero //
// 0.70 0.25
case (STA, NP) /* */ => LogDouble(0.16) // X
case (STA, S \\ NP) /* */ => LogDouble(0.29) // X
case (N, /**/ STA) /* */ => sys.error("N -> <S>") // COMB NC
case (N, /**/ N) /* */ => LogDouble(0.20) // X
case (N, /**/ S \\ N) /* */ => LogDouble(0.50) // X
case (N, /**/ (S \\ N) \\ (S \\ N)) => LogDouble(0.05) // X
case (N, /**/ END) /* */ => LogDouble(0.35) // X
// 0.85 0.25
case (N, S \\ NP) /* */ => LogDouble(0.33) // X
case (N, NP) /* */ => LogDouble(0.21) // X
case (S \\ N, /**/ STA) /* */ => sys.error("S\\\\N -> <S>") // COMB NC
case (S \\ N, /**/ N) /* */ => LogDouble(0.08) // X
case (S \\ N, /**/ S \\ N) /* */ => LogDouble(0.07) // X
case (S \\ N, /**/ (S \\ N) \\ (S \\ N)) => LogDouble(0.45) // X
case (S \\ N, /**/ END) /* */ => LogDouble(0.20) // X
// 0.65 0.15
case (S \\ N, NP \\ (S \\ N)) /* */ => LogDouble(0.26) // X
case (S \\ N, NP) /* */ => LogDouble(0.31) // X
case ((S \\ N) \\ (S \\ N), /**/ STA) /* */ => sys.error("(S\\\\N)\\\\(S\\\\N) -> <S>") // COMB NC
case ((S \\ N) \\ (S \\ N), /**/ N) /* */ => LogDouble(0.11) // X
case ((S \\ N) \\ (S \\ N), /**/ S \\ N) /* */ => LogDouble(0.06) // X
case ((S \\ N) \\ (S \\ N), /**/ (S \\ N) \\ (S \\ N)) => LogDouble(0.23) // X
case ((S \\ N) \\ (S \\ N), /**/ END) /* */ => LogDouble(0.60) // X
// 0.83 0.17
case ((S \\ N) \\ (S \\ N), /**/ NP \\ (S \\ N)) /**/ => LogDouble(0.03) // X
case ((S \\ N) \\ (S \\ N), /**/ NP) /* */ => LogDouble(0.14) // X
case (X, /**/ STA) /* */ => sys.error("X -> <S>") // COMB NC
case (X, /**/ N) /* */ => LogDouble(0.25) // X
case (X, /**/ S \\ N) /* */ => LogDouble(0.55) // X
case (X, /**/ (S \\ N) \\ (S \\ N)) => LogDouble(0.10) // X
case (X, /**/ END) /* */ => LogDouble(0.40) // X
// 0.95 0.35
case (X, S \\ NP) /* */ => LogDouble(0.38) // X
case (X, NP) /* */ => LogDouble(0.26) // X
case (A, /**/ STA) /* */ => sys.error("A -> <S>")
case (A, /**/ N) /* */ => LogDouble(0.29)
case (A, /**/ S \\ N) /* */ => LogDouble(0.01)
case (A, /**/ (S \\ N) \\ (S \\ N)) => LogDouble(0.09)
case (A, /**/ END) /* */ => LogDouble(0.01)
// 0.32
case (A, S \\ NP) /* */ => LogDouble(0.41)
case (A, NP) /* */ => LogDouble(0.42)
case (B, /**/ STA) /* */ => sys.error("B -> <S>")
case (B, /**/ N) /* */ => LogDouble(0.46)
case (B, /**/ S \\ N) /* */ => LogDouble(0.13)
case (B, /**/ (S \\ N) \\ (S \\ N)) => LogDouble(0.52)
case (B, /**/ END) /* */ => LogDouble(0.12)
// 0.26
case (B, S \\ NP) /* */ => LogDouble(0.43)
case (B, NP) /* */ => LogDouble(0.44)
case (CAN, /**/ STA) /* */ => sys.error("CAN -> <S>")
case (CAN, /**/ N) /* */ => LogDouble(0.23)
case (CAN, /**/ S \\ N) /* */ => LogDouble(0.02)
case (CAN, /**/ (S \\ N) \\ (S \\ N)) => LogDouble(0.03)
case (CAN, /**/ END) /* */ => LogDouble(0.04)
// 0.32
case (CAN, S \\ NP) /* */ => LogDouble(0.39)
case (CAN, NP) /* */ => LogDouble(0.27)
case (CANT, /**/ STA) /* */ => sys.error("CANT -> <S>")
case (CANT, /**/ N) /* */ => LogDouble(0.05)
case (CANT, /**/ S \\ N) /* */ => LogDouble(0.06)
case (CANT, /**/ (S \\ N) \\ (S \\ N)) => LogDouble(0.07)
case (CANT, /**/ END) /* */ => LogDouble(0.08)
// 0.26
case (CANT, S \\ NP) /* */ => LogDouble(0.37)
case (CANT, NP) /* */ => LogDouble(0.28)
// COMB TOTAL = 0.75 + 0.75 + 0.85 + 0.83 = 3.18
// NCOM TOTAL = 0.25 + 0.25 + 0.15 + 0.17 = 0.82
case (END, _) => ???
}
}
override def sample(given: Cat): Cat = ???
}
val mockCanCombine = new CatCanCombine {
def apply(a: Cat, b: Cat): Boolean = (a, b) match {
case (STA, STA) /* */ => sys.error("<S> -> <S>")
case (STA, N) /* */ => true
case (STA, S \\ N) /* */ => false
case (STA, (S \\ N) \\ (S \\ N)) /* */ => false
case (STA, END) /* */ => false
case (STA, NP) /* */ => true
case (STA, S \\ NP) /* */ => false
case (N, /**/ STA) /* */ => sys.error("N -> <S>")
case (N, /**/ N) /* */ => false
case (N, /**/ S \\ N) /* */ => true
case (N, /**/ (S \\ N) \\ (S \\ N)) /**/ => false
case (N, /**/ END) /* */ => true
case (N, NP) /* */ => false
case (N, S \\ NP) /* */ => true
case (S \\ N, /**/ STA) /* */ => sys.error("S\\\\N -> <S>")
case (S \\ N, /**/ N) /* */ => false
case (S \\ N, /**/ S \\ N) /* */ => false
case (S \\ N, /**/ (S \\ N) \\ (S \\ N)) => true
case (S \\ N, /**/ END) /* */ => true
case (S \\ N, NP \\ (S \\ N)) /* */ => true
case (S \\ N, NP) /* */ => false
case ((S \\ N) \\ (S \\ N), /**/ STA) /* */ => sys.error("(S\\\\N)\\\\(S\\\\N) -> <S>")
case ((S \\ N) \\ (S \\ N), /**/ N) /* */ => false
case ((S \\ N) \\ (S \\ N), /**/ S \\ N) /* */ => false
case ((S \\ N) \\ (S \\ N), /**/ (S \\ N) \\ (S \\ N)) => true
case ((S \\ N) \\ (S \\ N), /**/ END) /* */ => true
case ((S \\ N) \\ (S \\ N), /**/ NP \\ (S \\ N)) /**/ => true
case ((S \\ N) \\ (S \\ N), /**/ NP) /* */ => false
case (X, /**/ STA) /* */ => sys.error("X -> <S>")
case (X, /**/ N) /* */ => false
case (X, /**/ S \\ N) /* */ => true
case (X, /**/ (S \\ N) \\ (S \\ N)) /**/ => false
case (X, /**/ END) /* */ => true
case (X, NP) /* */ => false
case (X, S \\ NP) /* */ => true
case (A, /**/ STA) /* */ => sys.error("A -> <S>")
case (A, /**/ N) /* */ => true
case (A, /**/ S \\ N) /* */ => false
case (A, /**/ (S \\ N) \\ (S \\ N)) /**/ => true
case (A, /**/ END) /* */ => false
case (A, S \\ NP) /* */ => true
case (A, NP) /* */ => false
case (B, /**/ STA) /* */ => sys.error("B -> <S>")
case (B, /**/ N) /* */ => true
case (B, /**/ S \\ N) /* */ => false
case (B, /**/ (S \\ N) \\ (S \\ N)) /**/ => true
case (B, /**/ END) /* */ => false
case (B, S \\ NP) /* */ => true
case (B, NP) /* */ => false
case (CAN, /**/ STA) /* */ => sys.error("CAN -> <S>")
case (CAN, /**/ N) /* */ => true
case (CAN, /**/ S \\ N) /* */ => true
case (CAN, /**/ (S \\ N) \\ (S \\ N)) /**/ => true
case (CAN, /**/ END) /* */ => true
case (CAN, S \\ NP) /* */ => true
case (CAN, NP) /* */ => false
case (CANT, /**/ STA) /* */ => sys.error("CANT -> <S>")
case (CANT, /**/ N) /* */ => false
case (CANT, /**/ S \\ N) /* */ => false
case (CANT, /**/ (S \\ N) \\ (S \\ N)) /**/ => false
case (CANT, /**/ END) /* */ => false
case (CANT, S \\ NP) /* */ => true
case (CANT, NP) /* */ => false
case (END, _) => ???
}
def rules: Vector[CcgRule] = ???
def startCat: Cat = ???
def endCat: Cat = ???
}
val combinableTransitionMass = 0.97
def check(cctcpd: CcgCombinabilityTransitionConditionalLogProbabilityDistribution) {
def p(given: Cat, x: Cat) = cctcpd(x, given)
// /*
// * Verify sums make sense
// */
// assertEquals(4, cctcpd.combinableSplitSums.size)
// assertEqualsLog(LogDouble(0.70 / 0.8), cctcpd.combinableSplitSums(STA)._1, 1e-9)
// assertEqualsLog(LogDouble(0.25 / 0.2), cctcpd.combinableSplitSums(STA)._2, 1e-9)
// assertEqualsLog(LogDouble(0.85 / 0.8), cctcpd.combinableSplitSums(N)._1, 1e-9)
// assertEqualsLog(LogDouble(0.25 / 0.2), cctcpd.combinableSplitSums(N)._2, 1e-9)
// assertEqualsLog(LogDouble(0.65 / 0.8), cctcpd.combinableSplitSums(S \\ N)._1, 1e-9)
// assertEqualsLog(LogDouble(0.15 / 0.2), cctcpd.combinableSplitSums(S \\ N)._2, 1e-9)
// assertEqualsLog(LogDouble(0.83 / 0.8), cctcpd.combinableSplitSums((S \\ N) \\ (S \\ N))._1, 1e-9)
// assertEqualsLog(LogDouble(0.17 / 0.2), cctcpd.combinableSplitSums((S \\ N) \\ (S \\ N))._2, 1e-9)
assertEqualsLog(LogDouble(0.70 / (0.70 / 0.97)), p(STA, N), 1e-9)
assertEqualsLog(LogDouble(0.13 / (0.25 / 0.03)), p(STA, S \\ N), 1e-9)
assertEqualsLog(LogDouble(0.16 / (0.70 / 0.97)), p(STA, NP), 1e-9)
assertEqualsLog(LogDouble(0.29 / (0.25 / 0.03)), p(STA, S \\ NP), 1e-9)
assertEqualsLog(LogDouble(0.50 / (0.85 / 0.97)), p(N, S \\ N), 1e-9)
assertEqualsLog(LogDouble(0.20 / (0.25 / 0.03)), p(N, N), 1e-9)
assertEqualsLog(LogDouble(0.33 / (0.85 / 0.97)), p(N, S \\ NP), 1e-9)
assertEqualsLog(LogDouble(0.21 / (0.25 / 0.03)), p(N, NP), 1e-9)
assertEqualsLog(LogDouble(0.45 / (0.65 / 0.97)), p(S \\ N, (S \\ N) \\ (S \\ N)), 1e-9)
assertEqualsLog(LogDouble(0.08 / (0.15 / 0.03)), p(S \\ N, N), 1e-9)
assertEqualsLog(LogDouble(0.26 / (0.65 / 0.97)), p(S \\ N, NP \\ (S \\ N)), 1e-9)
assertEqualsLog(LogDouble(0.31 / (0.15 / 0.03)), p(S \\ N, NP), 1e-9)
assertEqualsLog(LogDouble(0.60 / (0.83 / 0.97)), p((S \\ N) \\ (S \\ N), END), 1e-9)
assertEqualsLog(LogDouble(0.06 / (0.17 / 0.03)), p((S \\ N) \\ (S \\ N), S \\ N), 1e-9)
assertEqualsLog(LogDouble(0.03 / (0.83 / 0.97)), p((S \\ N) \\ (S \\ N), NP \\ (S \\ N)), 1e-9)
assertEqualsLog(LogDouble(0.14 / (0.17 / 0.03)), p((S \\ N) \\ (S \\ N), NP), 1e-9)
assertEqualsLog(LogDouble(0.55 / (0.95 / 0.97)), p(X, S \\ N), 1e-9)
assertEqualsLog(LogDouble(0.25 / (0.35 / 0.03)), p(X, N), 1e-9)
assertEqualsLog(LogDouble(0.38 / (0.95 / 0.97)), p(X, S \\ NP), 1e-9)
assertEqualsLog(LogDouble(0.26 / (0.35 / 0.03)), p(X, NP), 1e-9)
// cantTotal < 1-combinableTransitionMass
assertEqualsLog(LogDouble(0.29 /* / (0.32 / 0.97) */ ), p(A, N), 1e-9)
assertEqualsLog(LogDouble(0.01 /* / (0.02 / 0.97) */ ), p(A, S \\ N), 1e-9)
assertEqualsLog(LogDouble(0.41 /* / (0.32 / 0.97) */ ), p(A, S \\ NP), 1e-9)
assertEqualsLog(LogDouble(0.42 /* / (0.02 / 0.03) */ ), p(A, NP), 1e-9)
// canTotal > combinableTransitionMass
assertEqualsLog(LogDouble(0.46 /* / (0.98 / 0.03) */ ), p(B, N), 1e-9)
assertEqualsLog(LogDouble(0.13 /* / (0.25 / 0.03) */ ), p(B, S \\ N), 1e-9)
assertEqualsLog(LogDouble(0.43 /* / (0.98 / 0.97) */ ), p(B, S \\ NP), 1e-9)
assertEqualsLog(LogDouble(0.44 /* / (0.25 / 0.03) */ ), p(B, NP), 1e-9)
// cantTotal == 0
assertEqualsLog(LogDouble(0.02 /* / (0.32 / 0.97) */ ), p(CAN, S \\ N), 1e-9)
assertEqualsLog(LogDouble(0.23 /* / (0.32 / 0.97) */ ), p(CAN, N), 1e-9)
assertEqualsLog(LogDouble(0.39 /* / (0.32 / 0.97) */ ), p(CAN, S \\ NP), 1e-9)
assertEqualsLog(LogDouble(0.27 /* / (0.00 / 0.03) */ ), p(CAN, NP), 1e-9)
// canTotal == 0
assertEqualsLog(LogDouble(0.06 / (0.26 / 0.03)), p(CANT, S \\ N), 1e-9)
assertEqualsLog(LogDouble(0.05 / (0.26 / 0.03)), p(CANT, N), 1e-9)
assertEqualsLog(LogDouble(0.37 /* / (0.00 / 0.97) */ ), p(CANT, S \\ NP), 1e-9)
assertEqualsLog(LogDouble(0.28 / (0.26 / 0.03)), p(CANT, NP), 1e-9)
}
/*
* Test CcgCombinabilityTransitionConditionalLogProbabilityDistribution
*/
val directCctcpd = new CcgCombinabilityTransitionConditionalLogProbabilityDistribution(
mockTrCPD, mockTagdict2, mockCanCombine, combinableTransitionMass, totalSmoothing = LogDouble.zero)
check(directCctcpd)
/*
* Test CcgCombinabilityTrInitializer
*/
val mockSentences = Vector[Vector[(Word, Set[Tag])]](Vector(("junk", Set())))
val mockTransitionInitializer = new TransitionInitializer[Tag] {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]) = {
assertSame(mockSentences, sentences)
mockTrCPD
}
}
val mockInitialTagdict = new TagDictionary[Tag] {
def allWords: Set[Word] = ???
def startWord: Word = ???; def endWord: Word = ???; def startTag: Tag = ???; def endTag: Tag = ???
def excludedTags: Set[Tag] = ???
def reversed: Map[Tag, Set[Word]] = ???
def entries: Map[Word, Set[Tag]] = ???
def knownWordsForTag: Map[Tag, Set[Word]] = ???
def withTags(tags: Set[Tag]): TagDictionary[Tag] = ???
def withExcludedTags(tags: Set[Tag]): TagDictionary[Tag] = ???
def apply(w: Word): Set[Tag] = ???
def allTags: Set[Tag] = ???
def withWords(words: Set[Word]): TagDictionary[Tag] = {
assertEquals(mockSentences.flatten.map(_._1).toSet, words)
mockTagdict1
}
}
val cctcpdInitializer = new CcgCombinabilityTrInitializer(
mockTransitionInitializer, mockCanCombine, combinableTransitionMass, totalSmoothing = LogDouble.zero)
check(cctcpdInitializer.fromKnownSupertagSets(mockSentences, mockInitialTagdict))
}
@Test
def test_TagPriorTrInitializer {
type Word = String
type Tag = Cat
val mockSentences = Vector[Vector[(Word, Set[Tag])]](Vector("junk" -> Set.empty))
val mockExcludedTag: Tag = cat"an excluded tag".asInstanceOf[AtomCat]
val mockEndTag: Tag = cat"an end tag".asInstanceOf[AtomCat]
val mockTagdict2 = new TagDictionary[Tag] {
def allWords: Set[Word] = ???
def startWord: Word = ???; def endWord: Word = ???; def startTag: Tag = ???; def endTag: Tag = mockEndTag
def excludedTags: Set[Tag] = Set(mockExcludedTag)
def reversed: Map[Tag, Set[Word]] = ???
def entries: Map[Word, Set[Tag]] = ???
def knownWordsForTag: Map[Tag, Set[Word]] = ???
def withWords(words: Set[Word]): TagDictionary[Tag] = ???
def withExcludedTags(tags: Set[Tag]): TagDictionary[Tag] = ???
def apply(w: Word): Set[Tag] = ???
def allTags: Set[Tag] = ???
def withTags(tags: Set[Tag]): TagDictionary[Tag] = ???
}
val mockTagdict1 = new TagDictionary[Tag] {
def allWords: Set[Word] = ???
def startWord: Word = ???; def endWord: Word = ???; def startTag: Tag = ???; def endTag: Tag = ???
def excludedTags: Set[Tag] = ???
def reversed: Map[Tag, Set[Word]] = ???
def entries: Map[Word, Set[Tag]] = ???
def knownWordsForTag: Map[Tag, Set[Word]] = ???
def withWords(words: Set[Word]): TagDictionary[Tag] = ???
def withExcludedTags(tags: Set[Tag]): TagDictionary[Tag] = ???
def apply(w: Word): Set[Tag] = ???
def allTags: Set[Tag] = ???
def withTags(tags: Set[Tag]): TagDictionary[Tag] = {
mockTagdict2
}
}
val mockInitialTagdict = new TagDictionary[Tag] {
def allWords: Set[Word] = ???
def startWord: Word = ???; def endWord: Word = ???; def startTag: Tag = ???; def endTag: Tag = ???
def excludedTags: Set[Tag] = ???
def reversed: Map[Tag, Set[Word]] = ???
def entries: Map[Word, Set[Tag]] = ???
def knownWordsForTag: Map[Tag, Set[Word]] = ???
def withTags(tags: Set[Tag]): TagDictionary[Tag] = ???
def withExcludedTags(tags: Set[Tag]): TagDictionary[Tag] = ???
def apply(w: Word): Set[Tag] = ???
def allTags: Set[Tag] = ???
def withWords(words: Set[Word]): TagDictionary[Tag] = {
assertEquals(mockSentences.flatten.map(_._1).toSet, words)
mockTagdict1
}
}
val mockTagPrior = new LogProbabilityDistribution[Tag] {
def apply(b: Tag): LogDouble = b match {
case N => LogDouble(0.11)
case `mockEndTag` => LogDouble(0.23)
case `mockExcludedTag` => LogDouble(0.35)
}
def sample(): Tag = ???
def defaultProb: LogDouble = ???
}
val mockTagPriorInitializer = new TagPriorInitializer[Tag] {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]) = {
assertSame(mockSentences, sentences)
assertSame(mockTagdict2, initialTagdict)
mockTagPrior
}
}
val tpti = new TagPriorTrInitializer(mockTagPriorInitializer)
val tpTrCPD = tpti.fromKnownSupertagSets(mockSentences, mockInitialTagdict)
def p(given: Cat, x: Cat) = tpTrCPD(x, given)
assertEqualsLog(LogDouble(0.11), p(X, N), 1e-9)
assertEqualsLog(LogDouble(0.23), p(X, mockEndTag), 1e-9)
assertEqualsLog(LogDouble(0.35), p(X, mockExcludedTag), 1e-9)
assertEqualsLog(LogDouble(0.11), p(S \\ NP, N), 1e-9)
assertEqualsLog(LogDouble(0.23), p(S \\ NP, mockEndTag), 1e-9)
assertEqualsLog(LogDouble(0.35), p(S \\ NP, mockExcludedTag), 1e-9)
assertEqualsLog(LogDouble(0.00), p(mockEndTag, N), 1e-9)
assertEqualsLog(LogDouble(0.00), p(mockExcludedTag, N), 1e-9)
}
def assertEqualsLog(a: LogDouble, b: LogDouble, c: Double) {
assertEquals(a.toDouble, b.toDouble, c)
}
}
| dhgarrette/2015-ccg-parsing | src/test/scala/dhg/ccg/tag/learn/CcgHmmInitializationTests.scala | Scala | apache-2.0 | 21,719 |
////////////////////////////////////////////////////////////////////////////////
// //
// OpenSolid is a generic library for the representation and manipulation //
// of geometric objects such as points, curves, surfaces, and volumes. //
// //
// Copyright 2007-2015 by Ian Mackenzie //
// [email protected] //
// //
// This Source Code Form is subject to the terms of the Mozilla Public //
// License, v. 2.0. If a copy of the MPL was not distributed with this file, //
// you can obtain one at http://mozilla.org/MPL/2.0/. //
// //
////////////////////////////////////////////////////////////////////////////////
package org.opensolid.core
import scala.math
final case class Vector2d(x: Double, y: Double) extends VectorTransformable2d[Vector2d] {
def components: (Double, Double) =
(x, y)
def component(index: Int): Double = index match {
case 0 => x
case 1 => y
case _ => throw new IndexOutOfBoundsException(s"Index $index is out of bounds for Vector2d")
}
def squaredLength: Double =
x * x + y * y
def length: Double =
math.sqrt(squaredLength)
def isZero(tolerance: Double): Boolean =
squaredLength.isZero(tolerance * tolerance)
def isNonZero(tolerance: Double): Boolean =
squaredLength.isNonZero(tolerance * tolerance)
override def transformedBy(transformation: Transformation2d): Vector2d =
transformation(this)
def projectedOnto(direction: Direction2d): Vector2d =
componentIn(direction) * direction
def projectedOnto(axis: Axis2d): Vector2d =
projectedOnto(axis.direction)
def placedOnto(plane: Plane3d): Vector3d =
x * plane.xDirection + y * plane.yDirection
def normalized: Option[Vector2d] = {
val length = this.length
if (length == 0.0) {
None
} else {
Some(this / length)
}
}
def direction: Option[Direction2d] = {
val length = this.length
if (length == 0.0) {
None
} else {
Some(Direction2d(x / length, y / length))
}
}
def perpendicularVector: Vector2d =
Vector2d(-y, x)
def normalDirection: Option[Direction2d] =
perpendicularVector.direction
def unary_- : Vector2d =
Vector2d(-x, -y)
def negated: Vector2d =
-this
def +(that: Vector2d): Vector2d =
Vector2d(this.x + that.x, this.y + that.y)
def +[P](expression: VectorExpression2d[P]): VectorExpression2d[P] =
VectorExpression2d.Constant[P](this) + expression
def plus(that: Vector2d): Vector2d =
this + that
def plus[P](expression: VectorExpression2d[P]): VectorExpression2d[P] =
this + expression
def -(that: Vector2d): Vector2d =
Vector2d(this.x - that.x, this.y - that.y)
def -[P](expression: VectorExpression2d[P]): VectorExpression2d[P] =
VectorExpression2d.Constant[P](this) - expression
def minus(that: Vector2d): Vector2d =
this - that
def minus[P](expression: VectorExpression2d[P]): VectorExpression2d[P] =
this - expression
def *(value: Double): Vector2d =
Vector2d(x * value, y * value)
def *[P](expression: Expression1d[P]): VectorExpression2d[P] =
VectorExpression2d.Constant[P](this) * expression
def times(value: Double): Vector2d =
this * value
def times[P](expression: Expression1d[P]): VectorExpression2d[P] =
this * expression
def /(value: Double): Vector2d =
Vector2d(x / value, y / value)
def /[P](expression: Expression1d[P]): VectorExpression2d[P] =
VectorExpression2d.Constant[P](this) / expression
def dividedBy(value: Double): Vector2d =
this / value
def dividedBy[P](expression: Expression1d[P]): VectorExpression2d[P] =
this / expression
def dot(that: Vector2d): Double =
this.x * that.x + this.y * that.y
def dot[P](expression: VectorExpression2d[P]): Expression1d[P] =
VectorExpression2d.Constant[P](this).dot(expression)
def cross(that: Vector2d): Double =
this.x * that.y - this.y * that.x
def cross(direction: Direction2d): Double =
x * direction.y - y * direction.x
def cross[P](expression: VectorExpression2d[P]): Expression1d[P] =
VectorExpression2d.Constant[P](this).cross(expression)
def componentIn(direction: Direction2d): Double =
x * direction.x + y * direction.y
}
object Vector2d {
def fromComponents(components: (Double, Double)): Vector2d = components match {
case (x, y) => Vector2d(x, y)
}
def polar(radius: Double, angle: Double): Vector2d =
Vector2d(radius * math.cos(angle), radius * math.sin(angle))
val Zero: Vector2d = Vector2d(0.0, 0.0)
}
| ianmackenzie/opensolid-core | src/main/scala/org/opensolid/core/Vector2d.scala | Scala | mpl-2.0 | 4,951 |
package jp.co.cyberagent.aeromock.core.bootstrap
import javassist.{LoaderClassPath, ClassPool}
/**
* Bootstrap trait
*/
trait Bootstrap {
val pool = new ClassPool()
pool.appendClassPath(new LoaderClassPath(this.getClass().getClassLoader()))
/**
* run bootstrap process
*/
def process(): Unit
}
| CyberAgent/aeromock | aeromock-server/src/main/scala/jp/co/cyberagent/aeromock/core/bootstrap/Bootstrap.scala | Scala | mit | 315 |
package net.bhardy.braintree.scala
import net.bhardy.braintree.scala.util.EnumUtils
import net.bhardy.braintree.scala.util.NodeWrapper
import net.bhardy.braintree.scala.ValidationErrors.NoValidationErrors
final class WebhookNotification(node: NodeWrapper) {
val kind = EnumUtils.findByNameOpt(classOf[WebhookNotifications.Kind])(node("kind"))
val timestamp = node.findDateTime("timestamp")
val subjectNode = node.findFirst("subject")
val errorNode = subjectNode.findFirstOpt("api-error-response")
val wrapperNode = errorNode.getOrElse(subjectNode)
val subscription = wrapperNode.findFirstOpt("subscription") map {
new Subscription(_)
}
val merchantAccount = wrapperNode.findFirstOpt("merchant-account") map {
MerchantAccount(_)
}
val transaction = wrapperNode.findFirstOpt("transaction") map {
new Transaction(_)
}
val errors = if (!wrapperNode.isSuccess) {
ValidationErrors.apply(wrapperNode)
} else {
NoValidationErrors
}
} | benhardy/braintree-scala | src/main/scala/WebhookNotification.scala | Scala | mit | 982 |
package org.scalacoin.util
import org.scalatest.{FlatSpec, MustMatchers}
/**
* Created by chris on 4/1/16.
*/
class BitcoinSUtilTest extends FlatSpec with MustMatchers {
}
| TomMcCabe/scalacoin | src/test/scala/org/scalacoin/util/BitcoinSUtilTest.scala | Scala | mit | 177 |
package ru.org.codingteam.horta.plugins
import ru.org.codingteam.horta.protocol.Protocol
import ru.org.codingteam.horta.security.{Credential, CommonAccess}
import ru.org.codingteam.horta.localization.Localization._
private object FortuneCommand
class FortunePlugin extends BasePlugin with CommandProcessor {
override def name = "fortune"
override def commands = List(CommandDefinition(CommonAccess, "fortune", FortuneCommand))
override def processCommand(credential: Credential,
token: Any,
arguments: Array[String]) {
implicit val c = credential
token match {
case FortuneCommand =>
Protocol.sendResponse(credential.location, credential,
localize("Fortune plugin is deprecated. You may take a look at $loglist command for similar functionality."))
}
}
}
| codingteam/horta-hell | src/main/scala/ru/org/codingteam/horta/plugins/FortunePlugin.scala | Scala | mit | 866 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.utils.tf.loaders
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.T
import com.intel.analytics.bigdl.dllib.utils.tf.Tensorflow._
import com.intel.analytics.bigdl.dllib.utils.tf.TensorflowSpecHelper
import org.tensorflow.framework.{DataType, NodeDef}
class StridedSliceSpec extends TensorflowSpecHelper {
"StridedSlice forward float" should "be correct" in {
compare[Float](
NodeDef.newBuilder()
.setName("StridedSliceTest")
.setOp(s"StridedSlice")
.putAttr("T", typeAttr(DataType.DT_FLOAT))
.putAttr("Index", typeAttr(DataType.DT_INT32))
.putAttr("begin_mask", intAttr(0))
.putAttr("end_mask", intAttr(0))
.putAttr("ellipsis_mask", intAttr(0))
.putAttr("new_axis_mask", intAttr(0))
.putAttr("shrink_axis_mask", intAttr(1)),
Seq(Tensor[Float](T(40, 128, 64)), Tensor[Int](T(0)),
Tensor[Int](T(1)), Tensor[Int](T(1))),
0
)
compare[Float](
NodeDef.newBuilder()
.setName("StridedSliceTest")
.setOp(s"StridedSlice")
.putAttr("T", typeAttr(DataType.DT_FLOAT))
.putAttr("Index", typeAttr(DataType.DT_INT32))
.putAttr("begin_mask", intAttr(0))
.putAttr("end_mask", intAttr(0))
.putAttr("ellipsis_mask", intAttr(0))
.putAttr("new_axis_mask", intAttr(0))
.putAttr("shrink_axis_mask", intAttr(1)),
Seq(Tensor[Float](T(40, 128, 64)), Tensor[Int](T(1)),
Tensor[Int](T(2)), Tensor[Int](T(1))),
0
)
compare[Float](
NodeDef.newBuilder()
.setName("StridedSliceTest")
.setOp(s"StridedSlice")
.putAttr("T", typeAttr(DataType.DT_FLOAT))
.putAttr("Index", typeAttr(DataType.DT_INT32))
.putAttr("begin_mask", intAttr(0))
.putAttr("end_mask", intAttr(0))
.putAttr("ellipsis_mask", intAttr(0))
.putAttr("new_axis_mask", intAttr(0))
.putAttr("shrink_axis_mask", intAttr(0)),
Seq(Tensor[Float](T(
T(T(1, 1, 1), T(2, 2, 2)),
T(T(3, 3, 3), T(4, 4, 4)),
T(T(5, 5, 5), T(6, 6, 6))
)), Tensor[Int](T(1, 0, 0)),
Tensor[Int](T(2, 1, 3)), Tensor[Int](T(1, 1, 1))),
0
)
compare[Float](
NodeDef.newBuilder()
.setName("StridedSliceTest")
.setOp(s"StridedSlice")
.putAttr("T", typeAttr(DataType.DT_FLOAT))
.putAttr("Index", typeAttr(DataType.DT_INT32))
.putAttr("begin_mask", intAttr(5))
.putAttr("end_mask", intAttr(5))
.putAttr("ellipsis_mask", intAttr(0))
.putAttr("new_axis_mask", intAttr(0))
.putAttr("shrink_axis_mask", intAttr(2)),
Seq(Tensor[Float](T(
T(T(1, 1, 1), T(2, 2, 2)),
T(T(3, 3, 3), T(4, 4, 4)),
T(T(5, 5, 5), T(6, 6, 6)))
), Tensor[Int](T(0, -1, 0)),
Tensor[Int](T(0, 0, 0)), Tensor[Int](T(1, 1, 1))),
0
)
compare[Float](
NodeDef.newBuilder()
.setName("StridedSliceTest")
.setOp(s"StridedSlice")
.putAttr("T", typeAttr(DataType.DT_FLOAT))
.putAttr("Index", typeAttr(DataType.DT_INT32))
.putAttr("begin_mask", intAttr(5))
.putAttr("end_mask", intAttr(5))
.putAttr("ellipsis_mask", intAttr(0))
.putAttr("new_axis_mask", intAttr(0))
.putAttr("shrink_axis_mask", intAttr(2)),
Seq(Tensor[Float](T(
T(T(1, 1, 1), T(2, 2, 2)),
T(T(3, 3, 3), T(4, 4, 4)),
T(T(5, 5, 5), T(6, 6, 6)))
), Tensor[Int](T(0, 1, 0)),
Tensor[Int](T(0, 0, 0)), Tensor[Int](T(1, 1, 1))),
0
)
}
"StridedSlice forward int" should "be correct" in {
compare[Int](
NodeDef.newBuilder()
.setName("StridedSliceTest")
.setOp(s"StridedSlice")
.putAttr("T", typeAttr(DataType.DT_INT32))
.putAttr("Index", typeAttr(DataType.DT_INT32))
.putAttr("begin_mask", intAttr(5))
.putAttr("end_mask", intAttr(5))
.putAttr("ellipsis_mask", intAttr(0))
.putAttr("new_axis_mask", intAttr(0))
.putAttr("shrink_axis_mask", intAttr(2)),
Seq(Tensor[Int](T(
T(T(1, 1, 1), T(2, 2, 2)),
T(T(3, 3, 3), T(4, 4, 4)),
T(T(5, 5, 5), T(6, 6, 6)))
), Tensor[Int](T(0, -1, 0)),
Tensor[Int](T(0, 0, 0)), Tensor[Int](T(1, 1, 1))),
0
)
compare[Int](
NodeDef.newBuilder()
.setName("StridedSliceTest")
.setOp(s"StridedSlice")
.putAttr("T", typeAttr(DataType.DT_INT32))
.putAttr("Index", typeAttr(DataType.DT_INT32))
.putAttr("begin_mask", intAttr(1))
.putAttr("end_mask", intAttr(1))
.putAttr("ellipsis_mask", intAttr(0))
.putAttr("new_axis_mask", intAttr(0))
.putAttr("shrink_axis_mask", intAttr(2)),
Seq(Tensor[Int](T(
T(T(1, 1, 1), T(2, 2, 2)),
T(T(3, 3, 3), T(4, 4, 4)),
T(T(5, 5, 5), T(6, 6, 6)))
), Tensor[Int](T(0, -1, 0)),
Tensor[Int](T(0, 0, 2)), Tensor[Int](T(1, 1, 1))),
0
)
compare[Int](
NodeDef.newBuilder()
.setName("StridedSliceTest")
.setOp(s"StridedSlice")
.putAttr("T", typeAttr(DataType.DT_INT32))
.putAttr("Index", typeAttr(DataType.DT_INT32))
.putAttr("begin_mask", intAttr(2))
.putAttr("end_mask", intAttr(2))
.putAttr("ellipsis_mask", intAttr(0))
.putAttr("new_axis_mask", intAttr(0))
.putAttr("shrink_axis_mask", intAttr(4)),
Seq(Tensor[Int](T(
T(T(1, 1, 1), T(2, 2, 2)),
T(T(3, 3, 3), T(4, 4, 4)),
T(T(5, 5, 5), T(6, 6, 6)))
), Tensor[Int](T(0, 0, -1)),
Tensor[Int](T(1, 0, 0)), Tensor[Int](T(1, 1, 1))),
0
)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/utils/tf/loaders/StridedSliceSpec.scala | Scala | apache-2.0 | 6,378 |
package mesosphere.marathon.core
import javax.inject.Named
import akka.actor.ActorRefFactory
import com.google.inject.name.Names
import com.google.inject.{ AbstractModule, Provides, Scopes, Singleton }
import mesosphere.marathon.MarathonConf
import mesosphere.marathon.core.appinfo.{ AppInfoModule, AppInfoService, GroupInfoService }
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.core.launcher.OfferProcessor
import mesosphere.marathon.core.launchqueue.LaunchQueue
import mesosphere.marathon.core.leadership.{ LeadershipCoordinator, LeadershipModule }
import mesosphere.marathon.core.plugin.{ PluginDefinitions, PluginManager }
import mesosphere.marathon.core.readiness.ReadinessCheckExecutor
import mesosphere.marathon.core.task.bus.{ TaskStatusEmitter, TaskChangeObservables }
import mesosphere.marathon.core.task.jobs.TaskJobsModule
import mesosphere.marathon.core.task.tracker.{ TaskCreationHandler, TaskStateOpProcessor, TaskTracker }
import mesosphere.marathon.core.task.update.impl.steps.{
ContinueOnErrorStep,
NotifyHealthCheckManagerStepImpl,
NotifyLaunchQueueStepImpl,
NotifyRateLimiterStepImpl,
PostToEventStreamStepImpl,
ScaleAppUpdateStepImpl,
TaskStatusEmitterPublishStepImpl
}
import mesosphere.marathon.core.task.update.impl.{ TaskStatusUpdateProcessorImpl, ThrottlingTaskStatusUpdateProcessor }
import mesosphere.marathon.core.task.update.{ TaskStatusUpdateProcessor, TaskUpdateStep }
import mesosphere.marathon.metrics.Metrics
import mesosphere.marathon.plugin.auth.{ Authenticator, Authorizer }
import mesosphere.marathon.plugin.http.HttpRequestHandler
import mesosphere.util.{ CapConcurrentExecutions, CapConcurrentExecutionsMetrics }
/**
* Provides the glue between guice and the core modules.
*/
class CoreGuiceModule extends AbstractModule {
// Export classes used outside of core to guice
@Provides @Singleton
def leadershipModule(coreModule: CoreModule): LeadershipModule = coreModule.leadershipModule
@Provides @Singleton
def taskTracker(coreModule: CoreModule): TaskTracker = coreModule.taskTrackerModule.taskTracker
@Provides @Singleton
def taskCreationHandler(coreModule: CoreModule): TaskCreationHandler =
coreModule.taskTrackerModule.taskCreationHandler
@Provides @Singleton
def stateOpProcessor(coreModule: CoreModule): TaskStateOpProcessor = coreModule.taskTrackerModule.stateOpProcessor
@Provides @Singleton
def leadershipCoordinator(
leadershipModule: LeadershipModule,
// makeSureToInitializeThisBeforeCreatingCoordinator
prerequisite1: TaskStatusUpdateProcessor,
prerequisite2: LaunchQueue): LeadershipCoordinator =
leadershipModule.coordinator()
@Provides @Singleton
def offerProcessor(coreModule: CoreModule): OfferProcessor = coreModule.launcherModule.offerProcessor
@Provides @Singleton
def taskStatusEmitter(coreModule: CoreModule): TaskStatusEmitter = coreModule.taskBusModule.taskStatusEmitter
@Provides @Singleton
def taskStatusObservable(coreModule: CoreModule): TaskChangeObservables =
coreModule.taskBusModule.taskStatusObservables
@Provides @Singleton
def taskJobsModule(coreModule: CoreModule): TaskJobsModule = coreModule.taskJobsModule
@Provides @Singleton
final def launchQueue(coreModule: CoreModule): LaunchQueue = coreModule.appOfferMatcherModule.launchQueue
@Provides @Singleton
final def appInfoService(appInfoModule: AppInfoModule): AppInfoService = appInfoModule.appInfoService
@Provides @Singleton
final def groupInfoService(appInfoModule: AppInfoModule): GroupInfoService = appInfoModule.groupInfoService
@Provides @Singleton
def pluginManager(coreModule: CoreModule): PluginManager = coreModule.pluginModule.pluginManager
@Provides @Singleton
def pluginDefinitions(coreModule: CoreModule): PluginDefinitions = coreModule.pluginModule.pluginManager.definitions
@Provides @Singleton
def authorizer(coreModule: CoreModule): Authorizer = coreModule.authModule.authorizer
@Provides @Singleton
def authenticator(coreModule: CoreModule): Authenticator = coreModule.authModule.authenticator
@Provides @Singleton
def readinessCheckExecutor(coreModule: CoreModule): ReadinessCheckExecutor = coreModule.readinessModule.readinessCheckExecutor //scalastyle:ignore
@Provides @Singleton
def taskStatusUpdateSteps(
notifyHealthCheckManagerStepImpl: NotifyHealthCheckManagerStepImpl,
notifyRateLimiterStepImpl: NotifyRateLimiterStepImpl,
notifyLaunchQueueStepImpl: NotifyLaunchQueueStepImpl,
taskStatusEmitterPublishImpl: TaskStatusEmitterPublishStepImpl,
postToEventStreamStepImpl: PostToEventStreamStepImpl,
scaleAppUpdateStepImpl: ScaleAppUpdateStepImpl): Seq[TaskUpdateStep] = {
// This is a sequence on purpose. The specified steps are executed in order for every
// task status update.
// This way we make sure that e.g. the taskTracker already reflects the changes for the update
// (updateTaskTrackerStepImpl) before we notify the launch queue (notifyLaunchQueueStepImpl).
// The task tracker is updated before any of these steps are processed.
Seq(
// Subsequent steps (for example, the health check subsystem) depend on
// task tracker lookup to determine the routable host address for running
// tasks. In case this status update is the first TASK_RUNNING update
// in IP-per-container mode, we need to store the assigned container
// address reliably before attempting to initiate health checks, or
// publish events to the bus.
ContinueOnErrorStep(notifyHealthCheckManagerStepImpl),
ContinueOnErrorStep(notifyRateLimiterStepImpl),
ContinueOnErrorStep(notifyLaunchQueueStepImpl),
ContinueOnErrorStep(taskStatusEmitterPublishImpl),
ContinueOnErrorStep(postToEventStreamStepImpl),
ContinueOnErrorStep(scaleAppUpdateStepImpl)
)
}
@Provides @Singleton
def pluginHttpRequestHandler(coreModule: CoreModule): Seq[HttpRequestHandler] = {
coreModule.pluginModule.httpRequestHandler
}
override def configure(): Unit = {
bind(classOf[Clock]).toInstance(Clock())
bind(classOf[CoreModule]).to(classOf[CoreModuleImpl]).in(Scopes.SINGLETON)
// FIXME: Because of cycle breaking in guice, it is hard to not wire it with Guice directly
bind(classOf[TaskStatusUpdateProcessor])
.annotatedWith(Names.named(ThrottlingTaskStatusUpdateProcessor.dependencyTag))
.to(classOf[TaskStatusUpdateProcessorImpl]).asEagerSingleton()
bind(classOf[TaskStatusUpdateProcessor]).to(classOf[ThrottlingTaskStatusUpdateProcessor]).asEagerSingleton()
bind(classOf[AppInfoModule]).asEagerSingleton()
}
@Provides @Singleton @Named(ThrottlingTaskStatusUpdateProcessor.dependencyTag)
def throttlingTaskStatusUpdateProcessorSerializer(
metrics: Metrics,
config: MarathonConf,
actorRefFactory: ActorRefFactory): CapConcurrentExecutions = {
val capMetrics = new CapConcurrentExecutionsMetrics(metrics, classOf[ThrottlingTaskStatusUpdateProcessor])
CapConcurrentExecutions(
capMetrics,
actorRefFactory,
"serializeTaskStatusUpdates",
maxParallel = config.internalMaxParallelStatusUpdates(),
maxQueued = config.internalMaxQueuedStatusUpdates()
)
}
}
| ss75710541/marathon | src/main/scala/mesosphere/marathon/core/CoreGuiceModule.scala | Scala | apache-2.0 | 7,268 |
package com.scalding
import com.twitter.scalding._
import org.apache.hadoop.conf.Configuration
import akka.actor.{Actor, ActorLogging, Props, ActorRef}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
abstract class Scalding extends Actor with ActorLogging {
val settings = new Settings
import settings._
var mode:Mode = new Local(true)
var config:Config = Config.default
var jobConf:Configuration = new Configuration
def setMode(m:String):Unit = {
m match {
case "local" =>
case "hdfs" =>
jobConf.set("mapred.job.tracker", jobTracker)
jobConf.set("fs.defaultFS", defaultFS)
config = Config.hadoopWithDefaults(jobConf)
mode = Hdfs(strict=true, jobConf)
}
}
// override this
def job:Execution[Unit]
// override
def actorReceive: Actor.Receive
def exec(j:Execution[Unit]):Future[Unit] = {
j.run(config, mode)
}
def execBlock(j:Execution[Unit]):Unit = {
j.waitFor(config, mode)
}
}
| zentiment/scala-boilerplate | scalding/src/main/scala/com/scalding/Scalding.scala | Scala | mit | 1,020 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.worker
import org.apache.spark.internal.Logging
import org.apache.spark.rpc._
/**
* Endpoint which connects to a worker process and terminates the JVM if the
* connection is severed.
* Provides fate sharing between a worker and its associated child processes.
*/
private[spark] class WorkerWatcher(
override val rpcEnv: RpcEnv, workerUrl: String, isTesting: Boolean = false)
extends RpcEndpoint with Logging {
logInfo(s"Connecting to worker $workerUrl")
if (!isTesting) {
rpcEnv.asyncSetupEndpointRefByURI(workerUrl)
}
// Used to avoid shutting down JVM during tests
// In the normal case, exitNonZero will call `System.exit(-1)` to shutdown the JVM. In the unit
// test, the user should call `setTesting(true)` so that `exitNonZero` will set `isShutDown` to
// true rather than calling `System.exit`. The user can check `isShutDown` to know if
// `exitNonZero` is called.
private[deploy] var isShutDown = false
// Lets filter events only from the worker's rpc system
private val expectedAddress = RpcAddress.fromURIString(workerUrl)
private def isWorker(address: RpcAddress) = expectedAddress == address
private def exitNonZero() = if (isTesting) isShutDown = true else System.exit(-1)
override def receive: PartialFunction[Any, Unit] = {
case e => logWarning(s"Received unexpected message: $e")
}
override def onConnected(remoteAddress: RpcAddress): Unit = {
if (isWorker(remoteAddress)) {
logInfo(s"Successfully connected to $workerUrl")
}
}
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
if (isWorker(remoteAddress)) {
// This log message will never be seen
logError(s"Lost connection to worker rpc endpoint $workerUrl. Exiting.")
exitNonZero()
}
}
override def onNetworkError(cause: Throwable, remoteAddress: RpcAddress): Unit = {
if (isWorker(remoteAddress)) {
// These logs may not be seen if the worker (and associated pipe) has died
logError(s"Could not initialize connection to worker $workerUrl. Exiting.")
logError(s"Error was: $cause")
exitNonZero()
}
}
}
| sh-cho/cshSpark | deploy/worker/WorkerWatcher.scala | Scala | apache-2.0 | 2,963 |
/*******************************************************************************
Copyright (c) 2012-2013, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.tests
import junit.framework.TestSuite
import _root_.java.io.File
import kr.ac.kaist.jsaf.ProjectProperties
import kr.ac.kaist.jsaf.Shell
object CompilerJUTest {
val SEP = File.separator
val COMPILER_FAIL_TESTS_DIR = "tests/compiler_tests"
def main(args: String*) = junit.textui.TestRunner.run(suite)
def suite() = {
val suite = new TestSuite("Test all .js files in 'tests/compiler_tests.")
val failsOnly = true // false if we want to print out the test results
//$JUnit-BEGIN$
suite.addTest(FileTests.compilerSuite(COMPILER_FAIL_TESTS_DIR, failsOnly, false))
//$JUnit-END$
suite
}
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/tests/CompilerJUTest.scala | Scala | bsd-3-clause | 990 |
package hello
import org.springframework.context.annotation.{Configuration, ComponentScan}
import org.springframework.boot.autoconfigure.EnableAutoConfiguration
import org.springframework.boot;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMethod;
/**
* This config class will trigger Spring @annotation scanning and auto configure Spring context.
*
* @author Neha
* @since 1.0
*/
@Controller
@Configuration
@EnableAutoConfiguration
@ComponentScan
class HelloConfig {
@RequestMapping(value=Array("/"), method=Array(RequestMethod.GET))
@ResponseBody
def home(): String = "Hello World from Neha!!!"
}
| NehaWani/hello-world | src/main/scala/hello/HelloConfig.scala | Scala | mit | 797 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.batch.sql.validation
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.utils.TableTestBase
import org.junit.Test
class CalcValidationTest extends TableTestBase {
@Test(expected = classOf[ValidationException])
def testInvalidFields(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
val sqlQuery = "SELECT a, foo FROM MyTable"
util.tableEnv.sqlQuery(sqlQuery)
}
}
| jinglining/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/batch/sql/validation/CalcValidationTest.scala | Scala | apache-2.0 | 1,332 |
//
// RemoveUnusedTypes.scala -- Scala object RemoveUnusedTypes
// Project OrcScala
//
// $Id: RemoveUnusedTypes.scala 2933 2011-12-15 16:26:02Z jthywissen $
//
// Created by dkitchin on Jul 12, 2010.
//
// Copyright (c) 2011 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.compile.optimize
import orc.ast.oil.named._
/** Remove unused type declarations from the AST.
*
* @author dkitchin
*/
object RemoveUnusedTypes extends NamedASTTransform {
override def onExpression(context: List[BoundVar], typecontext: List[BoundTypevar]) = {
case DeclareType(u, t, body) => {
val newbody = transform(body, context, u :: typecontext)
if (newbody.freetypevars contains u) { DeclareType(u, t, newbody) }
else { newbody }
}
}
}
| laurenyew/cOrcS | src/orc/compile/optimize/RemoveUnusedTypes.scala | Scala | bsd-3-clause | 989 |
package com.sksamuel.scapegoat.inspections
import com.sksamuel.scapegoat.{Inspection, InspectionContext, Inspector, Levels}
/**
* @author Stephen Samuel
*/
class NoOpOverride
extends Inspection(
text = "Noop override",
defaultLevel = Levels.Info,
description = "Checks for code that overrides parent method but simply calls super.",
explanation = "This method is overridden yet only calls super."
) {
def inspector(context: InspectionContext): Inspector =
new Inspector(context) {
override def postTyperTraverser =
new context.Traverser {
import context.global._
private def argumentsMatch(signatureArgs: List[ValDef], actualArgs: List[Tree]): Boolean = {
signatureArgs.size == actualArgs.size &&
signatureArgs.zip(actualArgs).forall {
case (sig, act: Ident) => sig.name == act.name
case _ => false
}
}
override def inspect(tree: Tree): Unit = {
tree match {
case DefDef(_, name, _, vparamss, _, Apply(Select(Super(This(_), _), name2), args))
if name == name2 && vparamss.size == 1 && argumentsMatch(
vparamss.headOption.getOrElse(List.empty),
args
) =>
context.warn(tree.pos, self, tree.toString.take(200))
case _ => continue(tree)
}
}
}
}
}
| sksamuel/scalac-scapegoat-plugin | src/main/scala/com/sksamuel/scapegoat/inspections/NoOpOverride.scala | Scala | apache-2.0 | 1,489 |
package com.eevolution.context.dictionary.infrastructure.repository
import java.util.UUID
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.ReplicationOrganizationAccess
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession
import scala.concurrent.{ExecutionContext, Future}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/e-Evolution
* Created by [email protected] , www.e-evolution.com
*/
/**
* Replication Organization Access Repository
* @param session
* @param executionContext
*/
class ReplicationOrganizationAccessRepository (session: JdbcSession)(implicit executionContext: ExecutionContext)
extends api.repository.ReplicationOrganizationAccessRepository[ReplicationOrganizationAccess , Int]
with ReplicationOrganizationAccessMapping {
def getById(id: Int): Future[ReplicationOrganizationAccess] = {
Future(run(queryReplicationOrganizationAccess.filter(_.replicationOrganizationAccessId == lift(id))).headOption.get)
}
def getByUUID(uuid: UUID): Future[ReplicationOrganizationAccess] = {
Future(run(queryReplicationOrganizationAccess.filter(_.uuid == lift(uuid.toString))).headOption.get)
}
def getByReplicationOrganizationAccessId(id : Int) : Future[List[ReplicationOrganizationAccess]] = {
Future(run(queryReplicationOrganizationAccess))
}
def getAll() : Future[List[ReplicationOrganizationAccess]] = {
Future(run(queryReplicationOrganizationAccess))
}
def getAllByPage(page: Int, pageSize: Int): Future[PaginatedSequence[ReplicationOrganizationAccess]] = {
val offset = page * pageSize
val limit = (page + 1) * pageSize
for {
count <- countReplicationOrganizationAccess()
elements <- if (offset > count) Future.successful(Nil)
else selectReplicationOrganizationAccess(offset, limit)
} yield {
PaginatedSequence(elements, page, pageSize, count)
}
}
private def countReplicationOrganizationAccess() = {
Future(run(queryReplicationOrganizationAccess.size).toInt)
}
private def selectReplicationOrganizationAccess(offset: Int, limit: Int): Future[Seq[ReplicationOrganizationAccess]] = {
Future(run(queryReplicationOrganizationAccess).drop(offset).take(limit).toSeq)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/ReplicationOrganizationAccessRepository.scala | Scala | gpl-3.0 | 3,183 |
package com.tpersson.client.common.utils
import de.saxsys.mvvmfx.ViewModel
import scala.concurrent.ExecutionContext
class ViewModelBase(executionContextProvider: ExecutionContextProvider) extends ViewModel {
implicit val uiExecutionContext: ExecutionContext = executionContextProvider.ui
}
| TommyPersson/scala-mvvm-example | src/main/scala/com/tpersson/client/common/utils/ViewModelBase.scala | Scala | apache-2.0 | 295 |
package com.github.edwardsmatt.fpinscala
import org.scalatest._
import OptionValues._
import EitherValues._
import Fp._
class Fp2_2Spec extends FlatSpec with Matchers {
val ascending = (a: Int, b: Int) => (a < b)
"isSorted" should "return false with an unsorted array" in {
val instance = isSorted(Array(1, 3, 2, 5, 4, 6), ascending)
instance should be (false)
}
it should "return true if the array is sorted in ascending order" in {
val instance = isSorted(Array(1, 2, 3, 4, 5, 6, 7, 8), ascending)
instance should be (true)
}
val descending = (a: Int, b: Int) => (a > b)
it should "return true if the array is sorted in descending order" in {
val instance = isSorted(Array(8, 7, 6, 5, 4, 3, 1), descending)
instance should be (true)
}
val alphabeticalAscending = (a: String, b: String) => (a < b)
it should "return true if the array is sorted in alphabetical ascending order" in {
val instance = isSorted(Array("aardvark", "apple", "banana"), alphabeticalAscending)
instance should be (true)
}
it should "return false if the array is not sorted in alphabetical ascending order" in {
val instance = isSorted(Array("zebra", "aardvark", "apple", "banana"), alphabeticalAscending)
instance should be (false)
}
}
class Fp2_1Spec extends FlatSpec with Matchers {
"Getting the 9th Fibonnacci number" should "return the 34" in {
val instance = fib(9)
val expected = 34
instance should be (expected)
}
"Getting the 10th Fibonnacci number" should "return the 55" in {
val instance = fib(10)
val expected = 55
instance should be (expected)
}
"Getting the 11th Fibonnacci number" should "return the 89" in {
val instance = fib(11)
val expected = 89
instance should be (expected)
}
}
| edwardsmatt/fpins | src/test/scala/FPSpec.scala | Scala | mit | 1,743 |
package scala.macros
import com.intellij.openapi.module.Module
import com.intellij.openapi.roots.OrderEnumerator
import com.intellij.openapi.roots.libraries.Library
import com.intellij.util.Processor
import org.jetbrains.plugins.scala.lang.psi.api.base.ScPrimaryConstructor
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScAnnotation
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScMacroDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScMember, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
import scala.meta.intellij.IdeaUtil
package object intellij {
implicit class ModuleExt(val module: Module) extends AnyVal {
def hasMacros2: Boolean = {
OrderEnumerator.orderEntries(module).forEachLibrary(new Processor[Library] {
override def process(t: Library): Boolean = {
true
}
})
false
}
}
object psiExt {
implicit class AnnotExt(val annotation: ScAnnotation) extends AnyVal {
def isMacro2: Boolean = {
def isApplyMacro(m: ScMember) = m.isInstanceOf[ScMacroDefinition] && m.getName == "apply"
IdeaUtil.safeAnnotationResolve(annotation).exists {
case ScalaResolveResult(c: ScPrimaryConstructor, _) =>
c.containingClass.members.exists(isApplyMacro)
case ScalaResolveResult(o: ScTypeDefinition, _) =>
o.members.exists(isApplyMacro)
case _ => false
}
}
}
}
}
| triplequote/intellij-scala | scala/scala-impl/src/scala/macros/intellij/intellij.scala | Scala | apache-2.0 | 1,506 |
/* __ *\
** ________ ___ / / ___ __ ____ Scala.js sbt plugin **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ |/_// /_\ \ http://scala-js.org/ **
** /____/\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\* */
package org.scalajs.jsenv.rhino
import org.scalajs.jsenv._
import org.scalajs.jsenv.Utils.OptDeadline
import org.scalajs.core.tools.sem.Semantics
import org.scalajs.core.tools.io._
import org.scalajs.core.tools.jsdep.ResolvedJSDependency
import org.scalajs.core.tools.logging._
import org.scalajs.core.tools.linker.LinkingUnit
import org.scalajs.core.tools.linker.backend.OutputMode
import org.scalajs.core.tools.linker.backend.emitter.Emitter
import org.scalajs.core.tools.javascript.ESLevel
import scala.annotation.tailrec
import scala.io.Source
import scala.collection.mutable
import scala.concurrent.{Future, Promise, Await, TimeoutException}
import scala.concurrent.duration._
import scala.reflect.ClassTag
import org.mozilla.javascript._
final class RhinoJSEnv private (
semantics: Semantics,
withDOM: Boolean,
val sourceMap: Boolean
) extends LinkingUnitComJSEnv {
import RhinoJSEnv._
def this(semantics: Semantics = Semantics.Defaults, withDOM: Boolean = false) =
this(semantics, withDOM, sourceMap = true)
def withSourceMap(sourceMap: Boolean): RhinoJSEnv =
new RhinoJSEnv(semantics, withDOM, sourceMap)
/* Although RhinoJSEnv does not use the Emitter directly, it uses
* ScalaJSCoreLib which uses the same underlying components
* (ScalaJSClassEmitter, JSDesugaring and CoreJSLibs).
*/
val symbolRequirements = Emitter.symbolRequirements(semantics, ESLevel.ES5)
def name: String = "RhinoJSEnv"
override def loadLinkingUnit(linkingUnit: LinkingUnit): ComJSEnv = {
verifyUnit(linkingUnit)
super.loadLinkingUnit(linkingUnit)
}
/** Executes code in an environment where the Scala.js library is set up to
* load its classes lazily.
*
* Other .js scripts in the inputs are executed eagerly before the provided
* `code` is called.
*/
override def jsRunner(libs: Seq[ResolvedJSDependency],
code: VirtualJSFile): JSRunner = {
new Runner(libs, None, Nil, code)
}
override def jsRunner(preLibs: Seq[ResolvedJSDependency],
linkingUnit: LinkingUnit, postLibs: Seq[ResolvedJSDependency],
code: VirtualJSFile): JSRunner = {
verifyUnit(linkingUnit)
new Runner(preLibs, Some(linkingUnit), postLibs, code)
}
private class Runner(preLibs: Seq[ResolvedJSDependency],
optLinkingUnit: Option[LinkingUnit], postLibs: Seq[ResolvedJSDependency],
code: VirtualJSFile) extends JSRunner {
def run(logger: Logger, console: JSConsole): Unit =
internalRunJS(preLibs, optLinkingUnit, postLibs,
code, logger, console, None)
}
override def asyncRunner(libs: Seq[ResolvedJSDependency],
code: VirtualJSFile): AsyncJSRunner = {
new AsyncRunner(libs, None, Nil, code)
}
override def asyncRunner(preLibs: Seq[ResolvedJSDependency],
linkingUnit: LinkingUnit, postLibs: Seq[ResolvedJSDependency],
code: VirtualJSFile): AsyncJSRunner = {
verifyUnit(linkingUnit)
new AsyncRunner(preLibs, Some(linkingUnit), postLibs, code)
}
private class AsyncRunner(preLibs: Seq[ResolvedJSDependency],
optLinkingUnit: Option[LinkingUnit], postLibs: Seq[ResolvedJSDependency],
code: VirtualJSFile) extends AsyncJSRunner {
private[this] val promise = Promise[Unit]
private[this] var _thread: Thread = _
def future: Future[Unit] = promise.future
def start(logger: Logger, console: JSConsole): Future[Unit] = {
_thread = new Thread {
override def run(): Unit = {
try {
internalRunJS(preLibs, optLinkingUnit, postLibs,
code, logger, console, optChannel)
promise.success(())
} catch {
case t: Throwable =>
promise.failure(t)
}
}
}
_thread.start()
future
}
def stop(): Unit = _thread.interrupt()
protected def optChannel(): Option[Channel] = None
}
override def comRunner(libs: Seq[ResolvedJSDependency],
code: VirtualJSFile): ComJSRunner = {
new ComRunner(libs, None, Nil, code)
}
override def comRunner(preLibs: Seq[ResolvedJSDependency],
linkingUnit: LinkingUnit, postLibs: Seq[ResolvedJSDependency],
code: VirtualJSFile): ComJSRunner = {
verifyUnit(linkingUnit)
new ComRunner(preLibs, Some(linkingUnit), postLibs, code)
}
private class ComRunner(preLibs: Seq[ResolvedJSDependency],
optLinkingUnit: Option[LinkingUnit], postLibs: Seq[ResolvedJSDependency],
code: VirtualJSFile)
extends AsyncRunner(preLibs, optLinkingUnit, postLibs, code)
with ComJSRunner {
private[this] val channel = new Channel
override protected def optChannel(): Option[Channel] = Some(channel)
def send(msg: String): Unit = channel.sendToJS(msg)
def receive(timeout: Duration): String = {
try {
channel.recvJVM(timeout)
} catch {
case _: ChannelClosedException =>
throw new ComJSEnv.ComClosedException
}
}
def close(): Unit = channel.closeJVM()
}
private def internalRunJS(preLibs: Seq[ResolvedJSDependency],
optLinkingUnit: Option[LinkingUnit], postLibs: Seq[ResolvedJSDependency],
code: VirtualJSFile, logger: Logger, console: JSConsole,
optChannel: Option[Channel]): Unit = {
val context = Context.enter()
try {
val scope = context.initStandardObjects()
// Rhino has trouble optimizing some big things, e.g., env.js or ScalaTest
context.setOptimizationLevel(-1)
if (withDOM)
setupDOM(context, scope)
disableLiveConnect(context, scope)
setupConsole(context, scope, console)
val taskQ = setupSetTimeout(context, scope)
// Optionally setup scalaJSCom
var recvCallback: Option[String => Unit] = None
for (channel <- optChannel) {
setupCom(context, scope, channel,
setCallback = cb => recvCallback = Some(cb),
clrCallback = () => recvCallback = None)
}
try {
// Evaluate pre JS libs
preLibs.foreach(lib => context.evaluateFile(scope, lib.lib))
// Load LinkingUnit (if present)
optLinkingUnit.foreach(loadLinkingUnit(context, scope, _))
// Evaluate post JS libs
postLibs.foreach(lib => context.evaluateFile(scope, lib.lib))
// Actually run the code
context.evaluateFile(scope, code)
// Start the event loop
for (channel <- optChannel) {
comEventLoop(taskQ, channel,
() => recvCallback.get, () => recvCallback.isDefined)
}
// Channel is closed. Fall back to basic event loop
basicEventLoop(taskQ)
} catch {
case e: RhinoException =>
// Trace here, since we want to be in the context to trace.
logger.trace(e)
sys.error(s"Exception while running JS code: ${e.getMessage}")
}
} finally {
// Ensure the channel is closed to release JVM side
optChannel.foreach(_.closeJS())
Context.exit()
}
}
private def setupDOM(context: Context, scope: Scriptable): Unit = {
// Fetch env.rhino.js from webjar
val name = "env.rhino.js"
val path = "/META-INF/resources/webjars/envjs/1.2/" + name
val resource = getClass.getResource(path)
assert(resource != null, s"need $name as resource")
// Don't print envjs header
scope.addFunction("print", args => ())
// Pipe file to Rhino
val reader = Source.fromURL(resource).bufferedReader
context.evaluateReader(scope, reader, name, 1, null);
// No need to actually define print here: It is captured by envjs to
// implement console.log, which we'll override in the next statement
}
/** Make sure Rhino does not do its magic for JVM top-level packages (#364) */
private def disableLiveConnect(context: Context, scope: Scriptable): Unit = {
val PackagesObject =
ScriptableObject.getProperty(scope, "Packages").asInstanceOf[Scriptable]
val topLevelPackageIds = ScriptableObject.getPropertyIds(PackagesObject)
for (id <- topLevelPackageIds) (id: Any) match {
case name: String => ScriptableObject.deleteProperty(scope, name)
case index: Int => ScriptableObject.deleteProperty(scope, index)
case _ => // should not happen, I think, but with Rhino you never know
}
}
private def setupConsole(context: Context, scope: Scriptable,
console: JSConsole): Unit = {
// Setup console.log
val jsconsole = context.newObject(scope)
jsconsole.addFunction("log", _.foreach(console.log _))
ScriptableObject.putProperty(scope, "console", jsconsole)
}
private def setupSetTimeout(context: Context,
scope: Scriptable): TaskQueue = {
val ordering = Ordering.by[TimedTask, Deadline](_.deadline).reverse
val taskQ = mutable.PriorityQueue.empty(ordering)
def ensure[T: ClassTag](v: AnyRef, errMsg: String) = v match {
case v: T => v
case _ => sys.error(errMsg)
}
scope.addFunction("setTimeout", args => {
val cb = ensure[Function](args(0),
"First argument to setTimeout must be a function")
val deadline =
args.lift(1).fold(0)(n => Context.toNumber(n).toInt).millis.fromNow
val task = new TimeoutTask(deadline, () =>
cb.call(context, scope, scope, args.slice(2, args.length)))
taskQ += task
task
})
scope.addFunction("setInterval", args => {
val cb = ensure[Function](args(0),
"First argument to setInterval must be a function")
val interval = Context.toNumber(args(1)).toInt.millis
val firstDeadline = interval.fromNow
val task = new IntervalTask(firstDeadline, interval, () =>
cb.call(context, scope, scope, args.slice(2, args.length)))
taskQ += task
task
})
scope.addFunction("clearTimeout", args => {
val task = ensure[TimeoutTask](args(0), "First argument to " +
"clearTimeout must be a value returned by setTimeout")
task.cancel()
})
scope.addFunction("clearInterval", args => {
val task = ensure[IntervalTask](args(0), "First argument to " +
"clearInterval must be a value returned by setInterval")
task.cancel()
})
taskQ
}
private def setupCom(context: Context, scope: Scriptable, channel: Channel,
setCallback: (String => Unit) => Unit, clrCallback: () => Unit): Unit = {
val comObj = context.newObject(scope)
comObj.addFunction("send", s =>
channel.sendToJVM(Context.toString(s(0))))
comObj.addFunction("init", s => s(0) match {
case f: Function =>
val cb: String => Unit =
msg => f.call(context, scope, scope, Array(msg))
setCallback(cb)
case _ =>
sys.error("First argument to init must be a function")
})
comObj.addFunction("close", _ => {
// Tell JVM side we won't send anything
channel.closeJS()
// Internally register that we're done
clrCallback()
})
ScriptableObject.putProperty(scope, "scalajsCom", comObj)
}
/** Loads a [[LinkingUnit]] with lazy loading of classes and source mapping. */
private def loadLinkingUnit(context: Context, scope: Scriptable,
linkingUnit: LinkingUnit): Unit = {
val loader = new ScalaJSCoreLib(linkingUnit)
// Setup sourceMapper
if (sourceMap) {
val oldScalaJSenv = ScriptableObject.getProperty(scope, "__ScalaJSEnv")
val scalaJSenv = oldScalaJSenv match {
case Scriptable.NOT_FOUND =>
val newScalaJSenv = context.newObject(scope)
ScriptableObject.putProperty(scope, "__ScalaJSEnv", newScalaJSenv)
newScalaJSenv
case oldScalaJSenv: Scriptable =>
oldScalaJSenv
}
scalaJSenv.addFunction("sourceMapper", args => {
val trace = Context.toObject(args(0), scope)
loader.mapStackTrace(trace, context, scope)
})
}
loader.insertInto(context, scope)
}
private def basicEventLoop(taskQ: TaskQueue): Unit =
eventLoopImpl(taskQ, sleepWait, () => true)
private def comEventLoop(taskQ: TaskQueue, channel: Channel,
callback: () => String => Unit, isOpen: () => Boolean): Unit = {
if (!isOpen())
// The channel has not been opened yet. Wait for opening.
eventLoopImpl(taskQ, sleepWait, () => !isOpen())
// Once we reach this point, we either:
// - Are done
// - The channel is open
// Guard call to `callback`
if (isOpen()) {
val cb = callback()
try {
@tailrec
def loop(): Unit = {
val loopResult = eventLoopImpl(taskQ, channel.recvJS _, isOpen)
loopResult match {
case Some(msg) =>
cb(msg)
loop()
case None if isOpen() =>
assert(taskQ.isEmpty)
cb(channel.recvJS())
loop()
case None =>
// No tasks left, channel closed
}
}
loop()
} catch {
case _: ChannelClosedException =>
// the JVM side closed the connection
}
}
}
/** Run an event loop on [[taskQ]] using [[waitFct]] to wait
*
* If [[waitFct]] returns a Some, this method returns this value immediately
* If [[waitFct]] returns a None, we assume a sufficient amount has been
* waited for the Deadline to pass. The event loop then runs the task.
*
* Each iteration, [[continue]] is queried, whether to continue the loop.
*
* @returns A Some returned by [[waitFct]] or None if [[continue]] has
* returned false, or there are no more tasks (i.e. [[taskQ]] is empty)
* @throws InterruptedException if the thread was interrupted
*/
private def eventLoopImpl[T](taskQ: TaskQueue,
waitFct: Deadline => Option[T], continue: () => Boolean): Option[T] = {
@tailrec
def loop(): Option[T] = {
if (Thread.interrupted())
throw new InterruptedException()
if (taskQ.isEmpty || !continue()) None
else {
val task = taskQ.head
if (task.canceled) {
taskQ.dequeue()
loop()
} else {
waitFct(task.deadline) match {
case result @ Some(_) => result
case None =>
// The time has actually expired
val task = taskQ.dequeue()
// Perform task
task.task()
if (task.reschedule())
taskQ += task
loop()
}
}
}
}
loop()
}
private val sleepWait = { (deadline: Deadline) =>
val timeLeft = deadline.timeLeft.toMillis
if (timeLeft > 0)
Thread.sleep(timeLeft)
None
}
private def verifyUnit(linkingUnit: LinkingUnit) = {
require(linkingUnit.semantics == semantics,
"RhinoJSEnv and LinkingUnit must agree on semantics")
require(linkingUnit.esLevel == ESLevel.ES5, "RhinoJSEnv only supports ES5")
}
}
object RhinoJSEnv {
final class ClassNotFoundException(className: String) extends Exception(
s"Rhino was unable to load Scala.js class: $className")
/** Communication channel between the Rhino thread and the rest of the JVM */
private class Channel {
private[this] var _closedJS = false
private[this] var _closedJVM = false
private[this] val js2jvm = mutable.Queue.empty[String]
private[this] val jvm2js = mutable.Queue.empty[String]
def sendToJS(msg: String): Unit = synchronized {
ensureOpen(_closedJVM)
jvm2js.enqueue(msg)
notifyAll()
}
def sendToJVM(msg: String): Unit = synchronized {
ensureOpen(_closedJS)
js2jvm.enqueue(msg)
notifyAll()
}
def recvJVM(timeout: Duration): String = synchronized {
val deadline = OptDeadline(timeout)
while (js2jvm.isEmpty && ensureOpen(_closedJS) && !deadline.isOverdue)
wait(deadline.millisLeft)
if (js2jvm.isEmpty)
throw new TimeoutException("Timeout expired")
js2jvm.dequeue()
}
def recvJS(): String = synchronized {
while (jvm2js.isEmpty && ensureOpen(_closedJVM))
wait()
jvm2js.dequeue()
}
def recvJS(deadline: Deadline): Option[String] = synchronized {
var expired = false
while (jvm2js.isEmpty && !expired && ensureOpen(_closedJVM)) {
val timeLeft = deadline.timeLeft.toMillis
if (timeLeft > 0)
wait(timeLeft)
else
expired = true
}
if (expired) None
else Some(jvm2js.dequeue())
}
def closeJS(): Unit = synchronized {
_closedJS = true
notifyAll()
}
def closeJVM(): Unit = synchronized {
_closedJVM = true
notifyAll()
}
/** Throws if the channel is closed and returns true */
private def ensureOpen(closed: Boolean): Boolean = {
if (closed)
throw new ChannelClosedException
true
}
}
private class ChannelClosedException extends Exception
private abstract class TimedTask(val task: () => Unit) {
private[this] var _canceled: Boolean = false
def deadline: Deadline
def reschedule(): Boolean
def canceled: Boolean = _canceled
def cancel(): Unit = _canceled = true
}
private final class TimeoutTask(val deadline: Deadline,
task: () => Unit) extends TimedTask(task) {
def reschedule(): Boolean = false
override def toString(): String =
s"TimeoutTask($deadline, canceled = $canceled)"
}
private final class IntervalTask(firstDeadline: Deadline,
interval: FiniteDuration, task: () => Unit) extends TimedTask(task) {
private[this] var _deadline = firstDeadline
def deadline: Deadline = _deadline
def reschedule(): Boolean = {
_deadline += interval
!canceled
}
override def toString(): String =
s"IntervalTask($deadline, interval = $interval, canceled = $canceled)"
}
private type TaskQueue = mutable.PriorityQueue[TimedTask]
}
| japgolly/scala-js | js-envs/src/main/scala/org/scalajs/jsenv/rhino/RhinoJSEnv.scala | Scala | bsd-3-clause | 18,462 |
package bifrost.transaction
/**
* Created by cykoz on 5/11/2017.
*/
import bifrost.{BifrostGenerators, ValidGenerators}
import org.scalatest.prop.{GeneratorDrivenPropertyChecks, PropertyChecks}
import org.scalatest.{Matchers, PropSpec}
class ProgramMethodExecutionSpec extends PropSpec
with PropertyChecks
with GeneratorDrivenPropertyChecks
with Matchers
with BifrostGenerators
with ValidGenerators {
}
| Topl/Project-Bifrost | src/test/scala/bifrost/transaction/ProgramMethodExecutionSpec.scala | Scala | mpl-2.0 | 420 |
package icfpc2013
import java.math.BigInteger
object BvCompiler {
type Context = Map[Id, Long]
def apply(p: Program): Long => Long =
l => apply(p.e)(Map(p.id -> l))
def apply(e: Expression): Context => Long = e match {
case Zero => _ => 0
case One => _ => 1
case id: Id => ctx => ctx(id)
case If(cond, tthen, eelse) =>
ctx => if (apply(cond)(ctx) == 0L) apply(tthen)(ctx) else apply(eelse)(ctx)
case Fold(xs, z, x, acc, exp) =>
ctx =>
val base = apply(xs)(ctx)
val zero = apply(z)(ctx)
val mask = 0x00000000000000FF
val ctx2 = ctx + (acc -> zero)
(0 until 8).foldLeft(zero) { case (accc, i) =>
val shift = i * 8
val ctx3 = ctx2 + (acc -> accc) + (x -> ((base >> shift) & mask))
apply(exp)(ctx3)
}
case Op1(op, x) =>
op match {
case Not =>
ctx => ~apply(x)(ctx)
case Shl1 =>
ctx => apply(x)(ctx) << 1
case Shr1 =>
ctx => apply(x)(ctx) >>> 1
case Shr4 =>
ctx => apply(x)(ctx) >>> 4
case Shr16 =>
ctx => apply(x)(ctx) >>> 16
}
case Op2(op, x, y) =>
op match {
case And =>
ctx => apply(x)(ctx) & apply(y)(ctx)
case Or =>
ctx => apply(x)(ctx) | apply(y)(ctx)
case Xor =>
ctx => apply(x)(ctx) ^ apply(y)(ctx)
case Plus =>
ctx => add(apply(x)(ctx), apply(y)(ctx))
}
}
@inline private[this] def add(a: Long, b: Long) = {
var carry = a & b
var result = a ^ b
var i = 0
while (i < 64) {
val shiftedcarry = carry << 1
carry = result & shiftedcarry
result ^= shiftedcarry
i += 1
}
result
}
}
| ShiftForward/icfpc2013 | src/main/scala/icfpc2013/compiler.scala | Scala | mit | 1,756 |
/****************************************************************************
* Copyright (C) 2015 Łukasz Szpakowski. *
* *
* This software is licensed under the GNU General Public License *
* v3 or later. See the LICENSE file for the full licensing terms. *
****************************************************************************/
package pl.luckboy.issuenotifier
import android.app.Activity
import android.os.Bundle
import android.widget.TextView
import org.apache.http.HttpStatus
import org.json.JSONObject
import scala.annotation.tailrec
import scala.collection.mutable.PriorityQueue
import scala.collection.mutable.{ Map => MutableMap }
import AndroidUtils._
import DataStorage._
import LogStringUtils._
class IssuePairListActivity extends AbstractIssueListActivity[IssuePair]
{
override protected val mTag = getClass().getSimpleName()
private var mReposes: Vector[Repository] = null
private var mState: RequestIssueState = null
private var mSorting: IssueSorting.Value = null
private var mPage = 1L
private var mIssuePairQueue: PriorityQueue[IssuePair] = null
private var mUnloadedIssuePairReposes: Set[Repository] = null
private var mReposIssueCounts: Map[Repository, Long] = null
override def onCreate(bundle: Bundle)
{
super.onCreate(bundle)
log(mTag, "onCreated(): created")
}
override def onDestroy()
{
log(mTag, "onDestroy(): destroying ...")
super.onDestroy()
}
override protected val mRepositoryFromItem = (issuePair: IssuePair) => issuePair.repos
override protected val mIssueInfoFromItem = (issuePair: IssuePair) => issuePair.issueInfo
override protected def jsonObjectFromItem(item: IssuePair) = item.toJSONObject
override protected def itemFromJSONObject(jsonObject: JSONObject) = IssuePair.fromJSONObject(jsonObject)
override protected def initialize() = {
mIssueListTextView.setText(getResources().getString(R.string.issue_list_issues_of_all_reposes))
loadRepositories(this) match {
case Left(e) =>
false
case Right(reposes) =>
mReposes = reposes
val settings = Settings(this)
mState = settings.state
mSortingByCreated = settings.sortingByCreated
mSorting = if(mSortingByCreated) IssueSorting.Created else IssueSorting.Updated
mPage = 1
val issuePairOrdering = new Ordering[IssuePair] {
override def compare(issuePair1: IssuePair, issuePair2: IssuePair) =
if(!mSortingByCreated)
issuePair1.issueInfo.updatedAt.compareTo(issuePair2.issueInfo.updatedAt)
else
issuePair1.issueInfo.createdAt.compareTo(issuePair2.issueInfo.createdAt)
}
mIssuePairQueue = PriorityQueue()(issuePairOrdering)
mUnloadedIssuePairReposes = mReposes.toSet
mReposIssueCounts = mReposes.map { _ -> 0L }.toMap
mReposTimestampInfos = log(mTag, loadPreviousOldRepositoryTimestampInfos(this)).fold(_ => Map(), identity)
mCanShowReposes = true
true
}
}
override protected def loadItems(f: (Vector[IssuePair], Boolean) => Unit)
{
val tmpReposes = mReposes
val tmpState = mState
val tmpSorting = mSorting
val tmpPage = mPage
val tmpIssuePairQueue = mIssuePairQueue
val tmpUnloadedIssuePairReposes = mUnloadedIssuePairReposes
val tmpReposIssueCounts = mReposIssueCounts
log(mTag, "loadItems(): tmpState = " + tmpState)
log(mTag, "loadItems(): tmpSorting = " + tmpSorting)
log(mTag, "loadItems(): tmpPage = " + tmpPage)
for(p <- tmpReposes.zipWithIndex)
log(mTag, "loadItems(): tmpReposes(" + p._2 + ") = " + stringFromRepository(p._1))
log(mTag, "loadItems(): tmpIssuePairQueue.size = " + tmpIssuePairQueue.size)
for(p <- tmpUnloadedIssuePairReposes.zipWithIndex)
log(mTag, "loadItems(): tmpUnloadedIssuePairReposes(" + p._2 + ") = " + stringFromRepository(p._1))
for(p <- tmpReposIssueCounts)
log(mTag, "loadItems(): tmpReposIssueCounts(" + stringFromRepository(p._1) + ") = " + p._2)
startThreadAndPost(mHandler, mStopFlag) {
() =>
(Vector() ++ tmpUnloadedIssuePairReposes).foldLeft(Right(Vector()): Either[Exception, Vector[(Repository, Vector[IssueInfo])]]) {
case (left @ Left(_), _) => left
case (Right(issueInfoLists), repos) =>
if(tmpReposIssueCounts.getOrElse(repos, 0L) <= 0L)
MainService.DataFetchers.get(repos.server).map {
dataFetcher =>
log(mTag, "loadItems(): fetching issues from " + stringFromRepository(repos) + " ...")
val res = log(mTag, dataFetcher.fetchIssueInfos(
repos, Some(tmpState), Some(tmpSorting), Some(Direction.Desc), None,
Some(tmpPage), Some(mPerPage), Some(30000)))
log(mTag, "loadItems(): fetched issues from " + stringFromRepository(repos) +
res.fold(_ => "", issueInfos => " (issueInfoCount = " + issueInfos.size + ")"))
res
}.getOrElse(Right(Vector())) match {
case Left(e) =>
e match {
case HttpStatusException(HttpStatus.SC_NOT_FOUND, _) => Right(issueInfoLists)
case _ => Left(e)
}
case Right(issueInfos) => Right(issueInfoLists :+ (repos -> issueInfos))
}
else
Right(Vector())
} match {
case Left(e) => Left(e)
case Right(issueInfoLists) =>
tmpIssuePairQueue ++= issueInfoLists.flatMap {
case (repos, issueInfos) => issueInfos.map { issueInfo => IssuePair(repos, issueInfo) }
}
val unloadedIssuePairReposes = issueInfoLists.flatMap {
case (repos, issueInfos) => if(issueInfos.size >= mPerPage) Vector(repos) else Vector()
}.toSet
val tmpReposIssueCounts2 = issueInfoLists.map {
case (repos, issueInfos) => repos -> issueInfos.size
}.toMap
val tmpReposIssueCounts3 = tmpReposIssueCounts.map {
case (repos, issueCount) => repos -> (issueCount + tmpReposIssueCounts2.getOrElse(repos, 0))
}
dequeueIssuePairQueue(tmpIssuePairQueue, unloadedIssuePairReposes, Vector(), MutableMap() ++ tmpReposIssueCounts3) match {
case (issuePairs, issuePairQueue, reposIssueCounts) =>
val areUnloadedItems = !unloadedIssuePairReposes.isEmpty
Right((issuePairs, issuePairQueue, unloadedIssuePairReposes, reposIssueCounts.toMap, areUnloadedItems))
}
}
} {
case Left(_) =>
showDialog(IssuePairListActivity.DialogFetchingError)
f(Vector(), false)
case Right(tuple) =>
val (issuePairs, issuePairQueue, unloadedIssuePairReposes, reposIssueCounts, areUnloadedItems) = tuple
log(mTag, "loadItems(): issuePairs.size = " + issuePairs.size)
log(mTag, "loadItems(): issuePairQueue.size = " + issuePairQueue.size)
for(p <- unloadedIssuePairReposes.zipWithIndex)
log(mTag, "loadItems(): unloadedIssuePairReposes(" + p._2 + ") = " + stringFromRepository(p._1))
for(p <- reposIssueCounts)
log(mTag, "loadItems(): reposIssueCounts(" + stringFromRepository(p._1) + ") = " + p._2)
log(mTag, "loadItems(): areUnloadedItems = " + areUnloadedItems)
mPage += 1
f(issuePairs, areUnloadedItems)
}
}
@tailrec
private def dequeueIssuePairQueue(queue: PriorityQueue[IssuePair], unloadedIssuePairReposes: Set[Repository], issuePairs: Vector[IssuePair], reposIssueCounts: MutableMap[Repository, Long]): (Vector[IssuePair], PriorityQueue[IssuePair], MutableMap[Repository, Long]) =
if(queue.isEmpty) {
(issuePairs, queue, reposIssueCounts)
} else {
val issuePair = queue.dequeue()
val issueCount = reposIssueCounts.getOrElse(issuePair.repos, 0L) - 1L
reposIssueCounts += (issuePair.repos -> issueCount)
val newIssuePairs = issuePairs :+ issuePair
if(issueCount > 0 || !unloadedIssuePairReposes.contains(issuePair.repos))
dequeueIssuePairQueue(queue, unloadedIssuePairReposes, newIssuePairs, reposIssueCounts)
else
(newIssuePairs, queue, reposIssueCounts)
}
override def onCreateDialog(id: Int, bundle: Bundle) =
id match {
case IssuePairListActivity.DialogFetchingError =>
createErrorDialog(this, getResources().getString(R.string.fetching_error_message))
case IssuePairListActivity.DialogIOError =>
createErrorDialog(this, getResources().getString(R.string.io_error_message))
case _ =>
super.onCreateDialog(id, bundle)
}
}
object IssuePairListActivity
{
private val DialogFetchingError = 0
private val DialogIOError = 1
}
| luckboy/IssueNotifier | src/main/scala/pl/luckboy/issuenotifier/IssuePairListActivity.scala | Scala | gpl-3.0 | 9,139 |
object Base {
def toDecimal (li: List[Int], base: Int) : BigInt = li match {
case Nil => BigInt (0)
case x :: xs => BigInt (x % base) + (BigInt (base) * toDecimal (xs, base)) }
def fromDecimal (dec: BigInt, base: Int) : List[Int] =
if (dec==0L) Nil else (dec % base).toInt :: fromDecimal (dec/base, base)
def x2y (value: List[Int], from: Int, to: Int) =
fromDecimal (toDecimal (value.reverse, from), to).reverse
def test (li: List[Int], from: Int, to: Int, s: String) = {
val erg= "" + x2y (li, from, to)
if (! erg.equals (s))
println ("2dec: " + toDecimal (li, from) + "\n\terg: " + erg + "\n\texp: " + s)
}
def main(args: Array[String]){
test (List (1, 2, 3, 4), 16, 16, "List(1, 2, 3, 4)")
test (List (1, 0), 10, 100, "List(10)")
test (List (41, 15, 156, 123, 254, 156, 141, 2, 24), 256, 16, "List(2, 9, 0, 15, 9, 12, 7, 11, 15, 14, 9, 12, 8, 13, 0, 2, 1, 8)")
test (List (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 2, 10, "List(1, 2, 3, 7, 9, 4, 0, 0, 3, 9, 2, 8, 5, 3, 8, 0, 2, 7, 4, 8, 9, 9, 1, 2, 4, 2, 2, 3)")
test (List (41, 42, 43), 256, 36, "List(1, 21, 29, 22, 3)")
}
} | kirbyfan64/o | Base.scala | Scala | mit | 1,490 |
package cs220.ducks
object Ducks {
abstract class Duck {
def swim: Unit
def fly: Unit
}
abstract class RubberDuck extends Duck with Swims
trait Swims extends Duck {
def swim = println("I swim!")
}
trait Flier extends Duck {
def fly = println("I fly!")
}
// val d = new RubberDuck with Swims with Flier
} | umass-cs-220/week-09-libraries | code/traits/src/main/scala/cs220/ducks/Duck.scala | Scala | apache-2.0 | 341 |
/**
* Copyright 2017 ZuInnoTe (Jörn Franke) <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
/**
*
* This test intregrates HDFS and Spark
*
*/
package org.zuinnote.flink.office.example.excel
import org.apache.hadoop.hdfs.MiniDFSCluster
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FSDataInputStream
import org.apache.hadoop.fs.Path
import java.io.BufferedReader
import java.io.File
import java.io.InputStream
import java.io.InputStreamReader
import java.io.IOException
import java.nio.file.attribute.BasicFileAttributes
import java.nio.file.Files
import java.nio.file.FileVisitResult
import java.nio.file.SimpleFileVisitor
import java.util.ArrayList
import java.util.List
import java.util.Locale
import java.text.SimpleDateFormat
import java.text.DecimalFormat
import java.text.NumberFormat
import java.text.DateFormat
import org.apache.hadoop.io.compress.CodecPool
import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.hadoop.io.compress.CompressionCodecFactory
import org.apache.hadoop.io.compress.Decompressor
import org.apache.hadoop.io.compress.SplittableCompressionCodec
import org.apache.hadoop.io.compress.SplitCompressionInputStream
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.api.Types
import org.apache.flink.types.Row
import org.apache.flink.core.fs.FileSystem.WriteMode
import org.zuinnote.hadoop.office.format.common.util._
import org.zuinnote.hadoop.office.format.common.converter._
import org.zuinnote.hadoop.office.format.common.dao._
import org.zuinnote.hadoop.office.format.common.parser._
import org.zuinnote.hadoop.office.format.common._
import org.zuinnote.flink.office.excel.ExcelFlinkTableSource
import scala.collection.mutable.ArrayBuffer
import org.scalatest.flatspec.AnyFlatSpec;
import org.scalatest._
import matchers.should._
import org.scalatest.{ BeforeAndAfterAll, GivenWhenThen }
class FlinkTableSourceTableSinkScalaExcelIntegrationSpec extends AnyFlatSpec with BeforeAndAfterAll with GivenWhenThen with Matchers {
private val appName: String = "example-flinktablesourcetablesink-integrationtest"
private val tmpPrefix: String = "ho-integrationtest"
private var tmpPath: java.nio.file.Path = _
private val CLUSTERNAME: String ="hcl-minicluster"
private val DFS_INPUT_DIR_NAME: String = "/input"
private val DFS_OUTPUT_DIR_NAME: String = "/output"
private val DEFAULT_OUTPUT_FILENAME: String = "part-00000"
private val DFS_INPUT_DIR : Path = new Path(DFS_INPUT_DIR_NAME)
private val DFS_OUTPUT_DIR : Path = new Path(DFS_OUTPUT_DIR_NAME)
private val NOOFDATANODES: Int =4
private var dfsCluster: MiniDFSCluster = _
private var conf: Configuration = _
private var flinkEnvironment: ExecutionEnvironment = _
private var tableEnvironment: BatchTableEnvironment = _
private var openDecompressors = ArrayBuffer[Decompressor]();
override def beforeAll(): Unit = {
super.beforeAll()
// Create temporary directory for HDFS base and shutdownhook
// create temp directory
tmpPath = Files.createTempDirectory(tmpPrefix)
// create shutdown hook to remove temp files (=HDFS MiniCluster) after shutdown, may need to rethink to avoid many threads are created
Runtime.getRuntime.addShutdownHook(new Thread("remove temporary directory") {
override def run(): Unit = {
try {
Files.walkFileTree(tmpPath, new SimpleFileVisitor[java.nio.file.Path]() {
override def visitFile(file: java.nio.file.Path,attrs: BasicFileAttributes): FileVisitResult = {
Files.delete(file)
return FileVisitResult.CONTINUE
}
override def postVisitDirectory(dir: java.nio.file.Path, e: IOException): FileVisitResult = {
if (e == null) {
Files.delete(dir)
return FileVisitResult.CONTINUE
}
throw e
}
})
} catch {
case e: IOException => throw new RuntimeException("Error temporary files in following path could not be deleted "+tmpPath, e)
}}})
// create DFS mini cluster
conf = new Configuration()
val baseDir = new File(tmpPath.toString()).getAbsoluteFile()
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath())
val builder = new MiniDFSCluster.Builder(conf)
dfsCluster = builder.numDataNodes(NOOFDATANODES).build()
conf.set("fs.defaultFS", dfsCluster.getFileSystem().getUri().toString())
// create local Flink cluster
flinkEnvironment = ExecutionEnvironment.createLocalEnvironment(1)
tableEnvironment = BatchTableEnvironment.create(flinkEnvironment)
}
override def afterAll(): Unit = {
// close decompressor
for ( currentDecompressor <- this.openDecompressors) {
if (currentDecompressor!=null) {
CodecPool.returnDecompressor(currentDecompressor)
}
}
// close dfs cluster
dfsCluster.shutdown()
super.afterAll()
}
"The test excel file" should "be loaded using Flink Table Source and Written to disk using Flink Table Sink" in {
Given("Excel 2013 test file on DFS")
// create input directory
dfsCluster.getFileSystem().mkdirs(DFS_INPUT_DIR)
// copy input file to DFS
val classLoader = getClass().getClassLoader()
// put testdata on DFS
val fileName: String="testsimple.xlsx"
val fileNameFullLocal=classLoader.getResource(fileName).getFile()
val inputFile=new Path(fileNameFullLocal)
dfsCluster.getFileSystem().copyFromLocalFile(false, false, inputFile, DFS_INPUT_DIR)
When("read Excel (using TableSource) and write Excel (using TableSink)")
FlinkTableSourceTableSinkExample.readwriteExcelTableAPI(flinkEnvironment,tableEnvironment,dfsCluster.getFileSystem().getUri().toString()+DFS_INPUT_DIR_NAME,dfsCluster.getFileSystem().getUri().toString()+DFS_OUTPUT_DIR_NAME)
flinkEnvironment.execute("HadoopOffice Flink TableSource/TableSink for Excel files Demonstration")
Then("Excel should be written correctly to DFS")
val hocr: HadoopOfficeReadConfiguration = new HadoopOfficeReadConfiguration()
val dateFormat: SimpleDateFormat = DateFormat.getDateInstance(DateFormat.SHORT, Locale.US).asInstanceOf[SimpleDateFormat]
val decimalFormat: DecimalFormat = NumberFormat.getInstance(Locale.GERMANY).asInstanceOf[DecimalFormat]
hocr.setReadHeader(true)
hocr.setLocale(Locale.GERMANY)
hocr.setSimpleDateFormat(dateFormat)
hocr.setSimpleDecimalFormat(decimalFormat)
val sourceReWritten: ExcelFlinkTableSource = ExcelFlinkTableSource.builder()
.path(dfsCluster.getFileSystem().getUri().toString() + DFS_OUTPUT_DIR_NAME)
.field("decimalsc1", Types.DECIMAL)
.field("booleancolumn", Types.BOOLEAN)
.field("datecolumn", Types.SQL_DATE)
.field("stringcolumn", Types.STRING)
.field("decimalp8sc3", Types.DECIMAL)
.field("bytecolumn", Types.BYTE)
.field("shortcolumn", Types.SHORT)
.field("intcolumn", Types.INT)
.field("longcolumn", Types.LONG)
.conf(hocr)
.build()
tableEnvironment.registerTableSource("testsimplerewritten", sourceReWritten)
Then("all cells should be read correctly")
// check results of written data!!
val testRewrittenSimpleScan = tableEnvironment.scan("testsimplerewritten")
val testRewrittenSimpleResult = testRewrittenSimpleScan.select("*")
val testRewrittenSimpleDS = testRewrittenSimpleResult.toDataSet[Row]
assert(6 == testRewrittenSimpleDS.count)
val allRows = testRewrittenSimpleDS.collect
// check column1
assert(new java.math.BigDecimal("1.0").compareTo(allRows(0).getField(0).asInstanceOf[java.math.BigDecimal]) == 0)
assert(new java.math.BigDecimal("1.5").compareTo(allRows(1).getField(0).asInstanceOf[java.math.BigDecimal]) == 0)
assert(new java.math.BigDecimal("3.4").compareTo(allRows(2).getField(0).asInstanceOf[java.math.BigDecimal]) == 0)
assert(new java.math.BigDecimal("5.5").compareTo(allRows(3).getField(0).asInstanceOf[java.math.BigDecimal]) == 0)
assert(null == allRows(4).getField(0))
assert(new java.math.BigDecimal("3.4").compareTo(allRows(5).getField(0).asInstanceOf[java.math.BigDecimal]) == 0)
// check column2
assert(true == allRows(0).getField(1).asInstanceOf[Boolean])
assert(false == allRows(1).getField(1).asInstanceOf[Boolean])
assert(false == allRows(2).getField(1).asInstanceOf[Boolean])
assert(false == allRows(3).getField(1).asInstanceOf[Boolean])
assert(null == allRows(4).getField(1))
assert(true == allRows(5).getField(1).asInstanceOf[Boolean])
// check column3
val sdf = new SimpleDateFormat("yyyy-MM-dd")
val expectedDate1 = sdf.parse("2017-01-01")
val expectedDate2 = sdf.parse("2017-02-28")
val expectedDate3 = sdf.parse("2000-02-29")
val expectedDate4 = sdf.parse("2017-03-01")
val expectedDate5 = null
val expectedDate6 = sdf.parse("2017-03-01")
assert(expectedDate1.compareTo(allRows(0).getField(2).asInstanceOf[java.sql.Date]) == 0)
assert(expectedDate2.compareTo(allRows(1).getField(2).asInstanceOf[java.sql.Date]) == 0)
assert(expectedDate3.compareTo(allRows(2).getField(2).asInstanceOf[java.sql.Date]) == 0)
assert(expectedDate4.compareTo(allRows(3).getField(2).asInstanceOf[java.sql.Date]) == 0)
assert(expectedDate5 == allRows(4).getField(2))
assert(expectedDate6.compareTo(allRows(5).getField(2).asInstanceOf[java.sql.Date]) == 0)
// check column4
assert("This is a text" == allRows(0).getField(3).asInstanceOf[String])
assert("Another String" == allRows(1).getField(3).asInstanceOf[String])
assert("10" == allRows(2).getField(3).asInstanceOf[String])
assert("test3" == allRows(3).getField(3).asInstanceOf[String])
assert("test4" == allRows(4).getField(3).asInstanceOf[String])
assert("test5" == allRows(5).getField(3).asInstanceOf[String])
// check column5
assert(new java.math.BigDecimal("10.000").compareTo(allRows(0).getField(4).asInstanceOf[java.math.BigDecimal]) == 0)
assert(new java.math.BigDecimal("2.334").compareTo(allRows(1).getField(4).asInstanceOf[java.math.BigDecimal]) == 0)
assert(new java.math.BigDecimal("4.500").compareTo(allRows(2).getField(4).asInstanceOf[java.math.BigDecimal]) == 0)
assert(new java.math.BigDecimal("11.000").compareTo(allRows(3).getField(4).asInstanceOf[java.math.BigDecimal]) == 0)
assert(new java.math.BigDecimal("100.000").compareTo(allRows(4).getField(4).asInstanceOf[java.math.BigDecimal]) == 0)
assert(new java.math.BigDecimal("10000.500").compareTo(allRows(5).getField(4).asInstanceOf[java.math.BigDecimal]) == 0)
// check column6
assert(3 == allRows(0).getField(5).asInstanceOf[Byte])
assert(5 == allRows(1).getField(5).asInstanceOf[Byte])
assert(-100 == allRows(2).getField(5).asInstanceOf[Byte])
assert(2 == allRows(3).getField(5).asInstanceOf[Byte])
assert(3 == allRows(4).getField(5).asInstanceOf[Byte])
assert(120 == allRows(5).getField(5).asInstanceOf[Byte])
// check column7
assert(3 == allRows(0).getField(6).asInstanceOf[Short])
assert(4 == allRows(1).getField(6).asInstanceOf[Short])
assert(5 == allRows(2).getField(6).asInstanceOf[Short])
assert(250 == allRows(3).getField(6).asInstanceOf[Short])
assert(3 == allRows(4).getField(6).asInstanceOf[Short])
assert(100 == allRows(5).getField(6).asInstanceOf[Short])
// check column8
assert(100 == allRows(0).getField(7).asInstanceOf[Int])
assert(65335 == allRows(1).getField(7).asInstanceOf[Int])
assert(1 == allRows(2).getField(7).asInstanceOf[Int])
assert(250 == allRows(3).getField(7).asInstanceOf[Int])
assert(5 == allRows(4).getField(7).asInstanceOf[Int])
assert(10000 == allRows(5).getField(7).asInstanceOf[Int])
// check column9
assert(65335 == allRows(0).getField(8).asInstanceOf[Long])
assert(1 == allRows(1).getField(8).asInstanceOf[Long])
assert(250 == allRows(2).getField(8).asInstanceOf[Long])
assert(10 == allRows(3).getField(8).asInstanceOf[Long])
assert(3147483647L == allRows(4).getField(8).asInstanceOf[Long])
assert(10 == allRows(5).getField(8).asInstanceOf[Long])
}
/**
* Read results from the default output directory and default outputfile name
*
* @param numOfRows number of rows to read
*
*/
def readDefaultResults(numOfRows: Int): List[String] = {
val result: ArrayList[String] = new ArrayList[String]()
val defaultOutputfile = new Path(DFS_OUTPUT_DIR_NAME+"/"+DEFAULT_OUTPUT_FILENAME)
val defaultInputStream = openFile(defaultOutputfile)
val reader=new BufferedReader(new InputStreamReader(defaultInputStream))
var i=0
while((reader.ready()) && (i!=numOfRows))
{
result.add(reader.readLine())
i += 1
}
reader.close()
return result
}
/*
* Opens a file using the Hadoop API. It supports uncompressed and compressed files.
*
* @param path path to the file, e.g. file://path/to/file for a local file or hdfs://path/to/file for HDFS file. All filesystem configured for Hadoop can be used
*
* @return InputStream from which the file content can be read
*
* @throws java.io.Exception in case there is an issue reading the file
*
*
*/
def openFile(path: Path): InputStream = {
val codec=new CompressionCodecFactory(conf).getCodec(path)
val fileIn: InputStream=dfsCluster.getFileSystem().open(path)
// check if compressed
if (codec==null) { // uncompressed
return fileIn
} else { // compressed
val decompressor: Decompressor = CodecPool.getDecompressor(codec)
openDecompressors+=decompressor // to be returned later using close
if (codec.isInstanceOf[SplittableCompressionCodec]) {
val end : Long = dfsCluster.getFileSystem().getFileStatus(path).getLen()
val cIn =codec.asInstanceOf[SplittableCompressionCodec].createInputStream(fileIn, decompressor, 0, end,SplittableCompressionCodec.READ_MODE.CONTINUOUS)
return cIn
} else {
return codec.createInputStream(fileIn,decompressor)
}
}
}
} | ZuInnoTe/hadoopoffice | examples/scala-flinkts-excel/src/it/scala/org/zuinnote/flink/office/example/excel/FlinkTableSourceTableSinkScalaExcelIntegrationSpec.scala | Scala | apache-2.0 | 14,530 |
package zangelo.spray.json.annotation
import scala.annotation.StaticAnnotation
case class JsonUnwrapped(prefix:String = "", suffix:String = "") extends StaticAnnotation
| zackangelo/spray-json-macros | src/main/scala/zangelo/spray/json/annotation/JsonUnwrapped.scala | Scala | apache-2.0 | 171 |
/*
* Copyright (C) 2014-2015 by Nokia.
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package wookie.yql.finance
import argonaut._
import Argonaut._
import scalaz._
import wookie.yql.common.CodecsUtils
case class StockQuote(price: Double, volume: Long, symbol: String, change: Double, date: Long,
captialization: String)
object StockCodecs {
implicit val decoder: DecodeJson[List[StockQuote]] = DecodeJson {
c =>
val currentTime = System.currentTimeMillis()
val curs = c --\\ "query" --\\ "results" --\\ "quote"
CodecsUtils.loopOverArray(curs.downArray, decodeStocks(currentTime), DecodeResult(\\/-(List[StockQuote]())))
}
def decodeStocks(dateTime: Long): ACursor => DecodeResult[StockQuote] = {
curs =>
for {
price <- (curs --\\ "LastTradePriceOnly").as[String]
volume <- (curs --\\ "Volume").as[String]
symbol <- (curs --\\ "Symbol").as[String]
change <- (curs --\\ "Change").as[String]
capitalization <- (curs --\\ "MarketCapitalization").as[String]
} yield {
StockQuote(price.toDouble, volume.toLong, symbol, change.toDouble,
dateTime, capitalization)
}
}
implicit val encoder: EncodeJson[StockQuote] = {
casecodec6(StockQuote.apply, StockQuote.unapply)("price", "volume", "symbol",
"change", "date", "captialization")
}
}
| elyast/wookie | examples/src/main/scala/wookie/yql/finance/StockQuote.scala | Scala | apache-2.0 | 1,988 |
package org.jetbrains.plugins.scala.lang.refactoring.mock
import com.intellij.openapi.editor._
import com.intellij.openapi.editor.markup.TextAttributes
import com.intellij.openapi.editor.event.CaretListener
import java.util
/**
* Pavel Fatin
*/
class CaretModelStub extends CaretModel {
override def setCaretsAndSelections(caretStates: util.List[CaretState]): Unit = ???
override def getCaretCount: Int = ???
override def getTextAttributes: TextAttributes = null
override def getVisualLineEnd: Int = 0
override def getVisualLineStart: Int = 0
override def removeCaretListener(listener: CaretListener): Unit = {}
override def addCaretListener(listener: CaretListener): Unit = {}
override def getOffset: Int = 0
override def getVisualPosition: VisualPosition = null
override def getLogicalPosition: LogicalPosition = null
override def moveToOffset(offset: Int, locateBeforeSoftWrap: Boolean): Unit = {}
override def moveToOffset(offset: Int): Unit = {}
override def moveToVisualPosition(pos: VisualPosition): Unit = {}
override def moveToLogicalPosition(pos: LogicalPosition): Unit = {}
override def moveCaretRelatively(columnShift: Int, lineShift: Int, withSelection: Boolean, blockSelection: Boolean, scrollToCaret: Boolean): Unit = {}
override def isUpToDate = false
override def removeSecondaryCarets(): Unit = ???
override def removeCaret(caret: Caret): Boolean = ???
override def addCaret(pos: VisualPosition): Caret = ???
override def getAllCarets: util.List[Caret] = ???
override def getCaretAt(pos: VisualPosition): Caret = ???
override def getPrimaryCaret: Caret = ???
override def getCurrentCaret: Caret = ???
override def supportsMultipleCarets(): Boolean = false
override def runBatchCaretOperation(runnable: Runnable): Unit = ???
override def runForEachCaret(action: CaretAction): Unit = ???
override def getCaretsAndSelections: util.List[CaretState] = ???
} | consulo/consulo-scala | test/org/jetbrains/plugins/scala/lang/refactoring/mock/CaretModelStub.scala | Scala | apache-2.0 | 1,955 |
// Vim has many normal mode keys to easte the pains of repetition, but `.` is
// the one that is (probably) used most often. `.` will repeat the last
// "action" taken. Some quick examples of what counts as "one action":
// - dropping into insert mode to insert text
// - `dd`, deleting a line
// - `di"`, deleting inner quotes
// - `cit`, changing inner tags
// - `O`, insert one empty line above current one
// Suppose you have a series of function calls, all of which took one parameter
// which is a string. You are refactoring the code, and have decided that you
// can and no longer need the string as a parameter.
// One very quick way to do it would be:
// `f(di(j.j.j.`
// - `f(` => forward to next occurrence of (
// - `di(` => delete inner (
// - `j.` => move down, repeat last action
val v1 = foo("bar 1")
val v2 = foo("bar 2")
val v2 = foo("bar 3")
val v2 = foo("foo 4")
// A new technical manager really loves hungarian notation, so you must refactor
// the following variables and append 'Int' to all of them.
// `flaInt<Esc>W.W.W.W.`
// - `fl` => forward to next occurrence of l
// - `aInt<Esc> => append 'Int' at the cursor, drop back to Normal mode
// - `W.` => move to next WORD (WORD is delimited only by space, word can be
// delimited by punctuation as well), repeat last action
foo(l, m, n, o, p)
// You are converting the following snippet from Scala to Java (WHY????). The
// first step is to insert semi-colons at the ends of lines.
// `A;<Esc>j.j.`
// - `A;<Esc>` => Append colon at the end of line, drop back to Normal mode
// - `j.` => move down, repeat last action
val v1 = foo("bar 1")
val v2 = foo("bar 222237")
val v3 = v1 + v2
| alexkuang/vim-ll | demo/move-op-repeat.scala | Scala | mit | 1,688 |
import scala.concurrent.duration._
import org.scalatest.FunSuite
import akka.actor._
import scala.concurrent._
import akka.pattern.ask
import akka.util.Timeout
import scala.util.Random
object TestHelper {
def createBank(bankId: String): (ActorRef, Bank) = {
val bankRef: ActorRef = BankManager.createBank(bankId)
implicit val timeout = Timeout(5 seconds)
val bank = Await.result(ask(bankRef, IdentifyActor).mapTo[Bank], 10 seconds)
(bankRef, bank)
}
def createBankAccount(bankId: String, amount: Double): (ActorRef, Account) = {
val bank: ActorRef = BankManager.findBank(bankId)
implicit val timeout = Timeout(5 seconds)
val accountRef = Await.result(ask(bank, CreateAccountRequest(amount)).mapTo[ActorRef], 10 seconds)
val account = Await.result(ask(accountRef, IdentifyActor).mapTo[Account], 10 seconds)
(accountRef, account)
}
def waitUntilAllTransactionsAreCompleted(accounts: List[Account]): Unit = {
var completed = false
while (!completed) {
Thread.sleep(500)
var completedNow = true
accounts.foreach(a => {
completedNow = completedNow && a.allTransactionsCompleted
})
completed = completedNow
}
}
}
class Test01 extends FunSuite {
test("Add new bank") {
val bankRef: ActorRef = BankManager.createBank("2001")
implicit val timeout = Timeout(5 seconds)
val bank: Bank = Await.result(ask(bankRef, IdentifyActor).mapTo[Bank], 10 seconds)
assert(bank.bankId == "2001")
}
}
class Test02 extends FunSuite {
test("Add new bank account") {
val bank: ActorRef = BankManager.createBank("2002")
val (accountRef, account) = TestHelper.createBankAccount("2002", 1000)
assert(account.accountId == "1001" && account.getBalanceAmount == 1000)
}
}
class Test03 extends FunSuite {
test("Valid transaction within same bank, accounts should have correct balance.") {
val bank: ActorRef = BankManager.createBank("2003")
val (accountRef1, account1) = TestHelper.createBankAccount("2003", 1000)
val (accountRef2, account2) = TestHelper.createBankAccount("2003", 1000)
implicit val timeout = Timeout(5 seconds)
account1.transferTo(account2.accountId, 200)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1, account2))
assert(account1.getBalanceAmount == 800 && account2.getBalanceAmount == 1200)
}
}
class Test04 extends FunSuite {
test("Valid transaction between two different banks, accounts should have correct balance.") {
val (bank1Ref, bank1): (ActorRef, Bank) = TestHelper.createBank("2010")
val (bank2Ref, bank2): (ActorRef, Bank) = TestHelper.createBank("2011")
val (accountRef1, account1) = TestHelper.createBankAccount("2010", 1000)
val (accountRef2, account2) = TestHelper.createBankAccount("2011", 1000)
implicit val timeout = Timeout(5 seconds)
account1.transferTo(account2.getFullAddress, 200)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1, account2))
assert(account1.getBalanceAmount == 800 && account2.getBalanceAmount == 1200)
}
}
class Test05 extends FunSuite {
test("Valid transaction between two different banks, sender transaction list should have the correct status information.") {
val (bank1Ref, bank1): (ActorRef, Bank) = TestHelper.createBank("2015")
val (bank2Ref, bank2): (ActorRef, Bank) = TestHelper.createBank("2016")
val (accountRef1, account1) = TestHelper.createBankAccount("2015", 1000)
val (accountRef2, account2) = TestHelper.createBankAccount("2016", 1000)
implicit val timeout = Timeout(5 seconds)
account1.transferTo(account2.getFullAddress, 200)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1, account2))
account1.getTransactions.foreach(t => {
assert(t.isCompleted)
assert(t.isSuccessful)
})
}
}
class Test06 extends FunSuite {
test("Two valid transactions back and forth between two banks, account balances should be correct.") {
val (bank1Ref, bank1): (ActorRef, Bank) = TestHelper.createBank("6000")
val (bank2Ref, bank2): (ActorRef, Bank) = TestHelper.createBank("6001")
val (accountRef1, account1) = TestHelper.createBankAccount("6000", 1000)
val (accountRef2, account2) = TestHelper.createBankAccount("6001", 1000)
implicit val timeout = Timeout(5 seconds)
val sendToAddress = bank2.bankId + account2.accountId
account1.transferTo(account2.getFullAddress, 250)
account2.transferTo(account1.getFullAddress, 200)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1, account2))
assert(account1.getBalanceAmount == 950 && account2.getBalanceAmount == 1050)
}
}
class Test07 extends FunSuite {
test("Four valid transactions back and forth between two banks, account balances should be correct.") {
val (bank1Ref, bank1): (ActorRef, Bank) = TestHelper.createBank("7000")
val (bank2Ref, bank2): (ActorRef, Bank) = TestHelper.createBank("7001")
val (accountRef1, account1) = TestHelper.createBankAccount("7000", 1000)
val (accountRef2, account2) = TestHelper.createBankAccount("7001", 1000)
implicit val timeout = Timeout(5 seconds)
val sendToAddress = bank2.bankId + account2.accountId
account1.transferTo(account2.getFullAddress, 250)
account2.transferTo(account1.getFullAddress, 50)
account1.transferTo(account2.getFullAddress, 100)
account2.transferTo(account1.getFullAddress, 150)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1, account2))
assert(account1.getBalanceAmount == 850 && account2.getBalanceAmount == 1150)
}
}
class Test08 extends FunSuite {
test("Several transactions between two banks and three accounts, all transaction lists should hold correct status information, and account balances should be correct.") {
val (bank1Ref, bank1): (ActorRef, Bank) = TestHelper.createBank("8000")
val (bank2Ref, bank2): (ActorRef, Bank) = TestHelper.createBank("8001")
val (accountRef1, account1) = TestHelper.createBankAccount("8000", 1000)
val (accountRef2, account2) = TestHelper.createBankAccount("8001", 1000)
val (accountRef3, account3) = TestHelper.createBankAccount("8001", 1000)
implicit val timeout = Timeout(5 seconds)
account1.transferTo(account2.getFullAddress, 250)
account1.transferTo(account2.getFullAddress, 150)
account2.transferTo(account1.getFullAddress, 130)
account2.transferTo(account1.getFullAddress, 150)
account3.transferTo(account1.getFullAddress, 100)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1, account2, account3))
account1.getTransactions.foreach(t => {
assert(t.isSuccessful)
})
account2.getTransactions.foreach(t => {
assert(t.isSuccessful)
})
account3.getTransactions.foreach(t => {
assert(t.isSuccessful)
})
assert(account1.getBalanceAmount == 980
&& account2.getBalanceAmount == 1120
&& account3.getBalanceAmount == 900)
}
}
class Test09 extends FunSuite {
test("Failed transactions should lead to correct status information in all transaction lists, and no balances should be affected.") {
val bank: ActorRef = BankManager.createBank("9000")
val (accountRef1, account1) = TestHelper.createBankAccount("9000", 1000)
val (accountRef2, account2) = TestHelper.createBankAccount("9000", 1000)
implicit val timeout = Timeout(5 seconds)
account1.transferTo(account2.accountId, 2000)
account2.transferTo(account2.accountId, 2000)
account2.transferTo(account1.accountId, -2000)
account2.transferTo(account1.accountId, 2000)
account1.transferTo(account2.accountId, -400)
account1.transferTo(account2.accountId, -300)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1, account2))
account1.getTransactions.foreach(t => {
assert(t.isCompleted)
assert(!t.isSuccessful)
})
account2.getTransactions.foreach(t => {
assert(t.isCompleted)
assert(!t.isSuccessful)
})
assert(account1.getBalanceAmount == 1000)
assert(account2.getBalanceAmount == 1000)
}
}
class Test10 extends FunSuite {
test("Valid transactions within one bank, transaction list should have correct status information.") {
val bank: ActorRef = BankManager.createBank("1000")
val (accountRef1, account1) = TestHelper.createBankAccount("1000", 1000)
val (accountRef2, account2) = TestHelper.createBankAccount("1000", 1000)
implicit val timeout = Timeout(5 seconds)
account1.transferTo(account2.accountId, 200)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1, account2))
account1.getTransactions.foreach(t => {
assert(t.isCompleted)
assert(t.isSuccessful)
})
account2.getTransactions.foreach(t => {
assert(t.isCompleted)
assert(t.isSuccessful)
})
}
}
class Test11 extends FunSuite {
test("Invalid transaction within one bank, transaction lists should have correct status information.") {
val bank: ActorRef = BankManager.createBank("1100")
val (accountRef1, account1) = TestHelper.createBankAccount("1100", 1000)
val (accountRef2, account2) = TestHelper.createBankAccount("1100", 1000)
implicit val timeout = Timeout(5 seconds)
account1.transferTo(account2.accountId, 1500)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1, account2))
account1.getTransactions.foreach(t => {
assert(t.isCompleted)
assert(!t.isSuccessful)
})
}
}
class Test12 extends FunSuite {
test("Invalid transactions within one bank, account balances should not be affected.") {
val bank: ActorRef = BankManager.createBank("1200")
val (accountRef1, account1) = TestHelper.createBankAccount("1200", 1000)
val (accountRef2, account2) = TestHelper.createBankAccount("1200", 1000)
implicit val timeout = Timeout(5 seconds)
account1.transferTo(account2.accountId, 1500)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1, account2))
assert(account1.getBalanceAmount == 1000)
assert(account2.getBalanceAmount == 1000)
}
}
class Test13 extends FunSuite {
test("Create and find two different banks.") {
BankManager.createBank("1301")
BankManager.createBank("1302")
implicit val timeout = Timeout(5 seconds)
val bank1: Bank = Await.result(ask(BankManager.findBank("1301"), IdentifyActor).mapTo[Bank], 10 seconds)
val bank2: Bank = Await.result(ask(BankManager.findBank("1302"), IdentifyActor).mapTo[Bank], 10 seconds)
assert(bank1.bankId.equals("1301") && bank2.bankId.equals("1302"))
}
}
class Test14 extends FunSuite {
test("Valid transactions between two banks using full account address, account balances should be correct. Also, sending a BalanceRequest to an account should yield the correct balance.") {
val bank1: ActorRef = BankManager.createBank("1400")
val bank2: ActorRef = BankManager.createBank("1401")
val (accountRef1, account1) = TestHelper.createBankAccount("1400", 1000)
val (accountRef2, account2) = TestHelper.createBankAccount("1401", 1000)
val (accountRef3, account3) = TestHelper.createBankAccount("1401", 1000)
implicit val timeout = Timeout(30 seconds)
account1.transferTo(account2.getFullAddress, 100)
account2.transferTo(account3.getFullAddress, 4)
account3.transferTo(account2.getFullAddress, 100)
account3.transferTo(account1.getFullAddress, 8)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1, account2))
val acc1Balance: Double = Await.result(ask(accountRef1, BalanceRequest).mapTo[Double], 10 seconds)
val acc2Balance: Double = Await.result(ask(accountRef2, BalanceRequest).mapTo[Double], 10 seconds)
val acc3Balance: Double = Await.result(ask(accountRef3, BalanceRequest).mapTo[Double], 10 seconds)
assert(acc1Balance + acc2Balance + acc3Balance == 3000, "Total balance not equal to 3000")
assert(acc1Balance == 908, "Account 1 balance should be 908 at the end")
assert(acc1Balance == account1.getBalanceAmount, "Should be equal to ")
assert(acc2Balance == 1196 && acc2Balance == account2.getBalanceAmount)
assert(acc3Balance == 896 && acc2Balance == account2.getBalanceAmount)
}
}
class Test15 extends FunSuite {
test("Several valid transactions between several accounts in several banks. All information should be correct.") {
var accounts = List[(ActorRef, Account)]()
val numberOfBanks = 4
val numberOfAccountsPerBank = 4
for (bank <- 1 to numberOfBanks) {
val b: ActorRef = BankManager.createBank(s"150$bank")
for (account <- 1 to numberOfAccountsPerBank) {
val a: (ActorRef, Account) = TestHelper.createBankAccount(s"150$bank", 1000)
accounts = a :: accounts
}
}
for (x <- 1 until 10) {
val randomBankId = s"150${Random.nextInt(numberOfBanks) + 1}"
val randomAccountId = s"100${Random.nextInt(numberOfAccountsPerBank) + 1}"
val randomAmount = Random.nextInt(1000)
accounts(Random.nextInt(15))._2.transferTo(s"$randomBankId$randomAccountId", randomAmount)
}
val accountsList = accounts.map((acc: (ActorRef, Account)) => acc._2)
TestHelper.waitUntilAllTransactionsAreCompleted(accountsList)
val balances = accountsList.map((acc: Account) => acc.getBalanceAmount)
assert(balances.sum == 16000)
var notAllBalancesIs1000 = false
balances foreach ((i: Double) => {
if (i != 1000) {
notAllBalancesIs1000 = true
}
})
assert(notAllBalancesIs1000)
}
}
class Test16 extends FunSuite {
test("Transaction to a non-existing bank should fail, and account balance should not be affected and transaction list should hold correct status information.") {
val bank1: ActorRef = BankManager.createBank("1600")
val (accountRef1, account1) = TestHelper.createBankAccount("1600", 1000)
account1.transferTo("99998888", 200)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1))
account1.getTransactions.foreach(t => {
assert(t.isCompleted)
assert(!t.isSuccessful)
})
assert(account1.getBalanceAmount == 1000, "Money not returned when transactions failed, 1000 != " + account1.getBalanceAmount)
}
}
class Test17 extends FunSuite {
test("Transaction to a non-existing account should fail, account balance should not be affected and transaction list should hold correct status information.") {
val bank1: ActorRef = BankManager.createBank("1700")
val (accountRef1, account1) = TestHelper.createBankAccount("1700", 1000)
account1.transferTo("9999", 200)
TestHelper.waitUntilAllTransactionsAreCompleted(List(account1))
account1.getTransactions.foreach(t => {
assert(t.isCompleted)
assert(!t.isSuccessful)
})
assert(account1.getBalanceAmount == 1000)
}
} | DagF/tdt4165_progspraak_project_h15 | part3-exercise/src/test/scala/AccountTests.scala | Scala | mit | 14,897 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.{Properties, Random}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.mockito.Matchers.{any, anyInt, anyString}
import org.mockito.Mockito.{mock, never, spy, times, verify, when}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.apache.spark._
import org.apache.spark.internal.config
import org.apache.spark.internal.Logging
import org.apache.spark.serializer.SerializerInstance
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.{AccumulatorV2, ManualClock, Utils}
class FakeDAGScheduler(sc: SparkContext, taskScheduler: FakeTaskScheduler)
extends DAGScheduler(sc) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {
taskScheduler.startedTasks += taskInfo.index
}
override def taskEnded(
task: Task[_],
reason: TaskEndReason,
result: Any,
accumUpdates: Seq[AccumulatorV2[_, _]],
taskInfo: TaskInfo) {
taskScheduler.endedTasks(taskInfo.index) = reason
}
override def executorAdded(execId: String, host: String) {}
override def executorLost(execId: String, reason: ExecutorLossReason) {}
override def taskSetFailed(
taskSet: TaskSet,
reason: String,
exception: Option[Throwable]): Unit = {
taskScheduler.taskSetsFailed += taskSet.id
}
}
// Get the rack for a given host
object FakeRackUtil {
private val hostToRack = new mutable.HashMap[String, String]()
def cleanUp() {
hostToRack.clear()
}
def assignHostToRack(host: String, rack: String) {
hostToRack(host) = rack
}
def getRackForHost(host: String): Option[String] = {
hostToRack.get(host)
}
}
/**
* A mock TaskSchedulerImpl implementation that just remembers information about tasks started and
* feedback received from the TaskSetManagers. Note that it's important to initialize this with
* a list of "live" executors and their hostnames for isExecutorAlive and hasExecutorsAliveOnHost
* to work, and these are required for locality in TaskSetManager.
*/
class FakeTaskScheduler(sc: SparkContext, liveExecutors: (String, String)* /* execId, host */)
extends TaskSchedulerImpl(sc)
{
val startedTasks = new ArrayBuffer[Long]
val endedTasks = new mutable.HashMap[Long, TaskEndReason]
val finishedManagers = new ArrayBuffer[TaskSetManager]
val taskSetsFailed = new ArrayBuffer[String]
val executors = new mutable.HashMap[String, String]
for ((execId, host) <- liveExecutors) {
addExecutor(execId, host)
}
for ((execId, host) <- liveExecutors; rack <- getRackForHost(host)) {
hostsByRack.getOrElseUpdate(rack, new mutable.HashSet[String]()) += host
}
dagScheduler = new FakeDAGScheduler(sc, this)
def removeExecutor(execId: String) {
executors -= execId
val host = executorIdToHost.get(execId)
assert(host != None)
val hostId = host.get
val executorsOnHost = hostToExecutors(hostId)
executorsOnHost -= execId
for (rack <- getRackForHost(hostId); hosts <- hostsByRack.get(rack)) {
hosts -= hostId
if (hosts.isEmpty) {
hostsByRack -= rack
}
}
}
override def taskSetFinished(manager: TaskSetManager): Unit = finishedManagers += manager
override def isExecutorAlive(execId: String): Boolean = executors.contains(execId)
override def hasExecutorsAliveOnHost(host: String): Boolean = executors.values.exists(_ == host)
override def hasHostAliveOnRack(rack: String): Boolean = {
hostsByRack.get(rack) != None
}
def addExecutor(execId: String, host: String) {
executors.put(execId, host)
val executorsOnHost = hostToExecutors.getOrElseUpdate(host, new mutable.HashSet[String])
executorsOnHost += execId
executorIdToHost += execId -> host
for (rack <- getRackForHost(host)) {
hostsByRack.getOrElseUpdate(rack, new mutable.HashSet[String]()) += host
}
}
override def getRackForHost(value: String): Option[String] = FakeRackUtil.getRackForHost(value)
}
/**
* A Task implementation that results in a large serialized task.
*/
class LargeTask(stageId: Int) extends Task[Array[Byte]](stageId, 0, 0) {
val randomBuffer = new Array[Byte](TaskSetManager.TASK_SIZE_TO_WARN_KB * 1024)
val random = new Random(0)
random.nextBytes(randomBuffer)
override def runTask(context: TaskContext): Array[Byte] = randomBuffer
override def preferredLocations: Seq[TaskLocation] = Seq[TaskLocation]()
}
class TaskSetManagerSuite extends SparkFunSuite with LocalSparkContext with Logging {
import TaskLocality.{ANY, PROCESS_LOCAL, NO_PREF, NODE_LOCAL, RACK_LOCAL}
private val conf = new SparkConf
val LOCALITY_WAIT_MS = conf.getTimeAsMs("spark.locality.wait", "3s")
val MAX_TASK_FAILURES = 4
var sched: FakeTaskScheduler = null
override def beforeEach(): Unit = {
super.beforeEach()
FakeRackUtil.cleanUp()
sched = null
}
override def afterEach(): Unit = {
super.afterEach()
if (sched != null) {
sched.dagScheduler.stop()
sched.stop()
sched = null
}
}
test("TaskSet with no preferences") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
val accumUpdates = taskSet.tasks.head.metrics.internalAccums
// Offer a host with NO_PREF as the constraint,
// we should get a nopref task immediately since that's what we only have
val taskOption = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption.isDefined)
clock.advance(1)
// Tell it the task has finished
manager.handleSuccessfulTask(0, createTaskResult(0, accumUpdates))
assert(sched.endedTasks(0) === Success)
assert(sched.finishedManagers.contains(manager))
}
test("multiple offers with no preferences") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(3)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
val accumUpdatesByTask: Array[Seq[AccumulatorV2[_, _]]] = taskSet.tasks.map { task =>
task.metrics.internalAccums
}
// First three offers should all find tasks
for (i <- 0 until 3) {
val taskOption = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption.isDefined)
val task = taskOption.get
assert(task.executorId === "exec1")
}
assert(sched.startedTasks.toSet === Set(0, 1, 2))
// Re-offer the host -- now we should get no more tasks
assert(manager.resourceOffer("exec1", "host1", NO_PREF) === None)
// Finish the first two tasks
manager.handleSuccessfulTask(0, createTaskResult(0, accumUpdatesByTask(0)))
manager.handleSuccessfulTask(1, createTaskResult(1, accumUpdatesByTask(1)))
assert(sched.endedTasks(0) === Success)
assert(sched.endedTasks(1) === Success)
assert(!sched.finishedManagers.contains(manager))
// Finish the last task
manager.handleSuccessfulTask(2, createTaskResult(2, accumUpdatesByTask(2)))
assert(sched.endedTasks(2) === Success)
assert(sched.finishedManagers.contains(manager))
}
test("skip unsatisfiable locality levels") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execC", "host2"))
val taskSet = FakeTask.createTaskSet(1, Seq(TaskLocation("host1", "execB")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// An executor that is not NODE_LOCAL should be rejected.
assert(manager.resourceOffer("execC", "host2", ANY) === None)
// Because there are no alive PROCESS_LOCAL executors, the base locality level should be
// NODE_LOCAL. So, we should schedule the task on this offered NODE_LOCAL executor before
// any of the locality wait timers expire.
assert(manager.resourceOffer("execA", "host1", ANY).get.index === 0)
}
test("basic delay scheduling") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host2", "exec2")),
Seq(TaskLocation("host1"), TaskLocation("host2", "exec2")),
Seq() // Last task has no locality prefs
)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// First offer host1, exec1: first task should be chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
assert(manager.resourceOffer("exec1", "host1", PROCESS_LOCAL) == None)
clock.advance(LOCALITY_WAIT_MS)
// Offer host1, exec1 again, at NODE_LOCAL level: the node local (task 3) should
// get chosen before the noPref task
assert(manager.resourceOffer("exec1", "host1", NODE_LOCAL).get.index == 2)
// Offer host2, exec2, at NODE_LOCAL level: we should choose task 2
assert(manager.resourceOffer("exec2", "host2", NODE_LOCAL).get.index == 1)
// Offer host2, exec2 again, at NODE_LOCAL level: we should get noPref task
// after failing to find a node_Local task
assert(manager.resourceOffer("exec2", "host2", NODE_LOCAL) == None)
clock.advance(LOCALITY_WAIT_MS)
assert(manager.resourceOffer("exec2", "host2", NO_PREF).get.index == 3)
}
test("we do not need to delay scheduling when we only have noPref tasks in the queue") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec3", "host2"))
val taskSet = FakeTask.createTaskSet(3,
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host2", "exec3")),
Seq() // Last task has no locality prefs
)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// First offer host1, exec1: first task should be chosen
assert(manager.resourceOffer("exec1", "host1", PROCESS_LOCAL).get.index === 0)
assert(manager.resourceOffer("exec3", "host2", PROCESS_LOCAL).get.index === 1)
assert(manager.resourceOffer("exec3", "host2", NODE_LOCAL) == None)
assert(manager.resourceOffer("exec3", "host2", NO_PREF).get.index === 2)
}
test("delay scheduling with fallback") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc,
("exec1", "host1"), ("exec2", "host2"), ("exec3", "host3"))
val taskSet = FakeTask.createTaskSet(5,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(TaskLocation("host2")),
Seq(TaskLocation("host3")),
Seq(TaskLocation("host2"))
)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// First offer host1: first task should be chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
// Offer host1 again: nothing should get chosen
assert(manager.resourceOffer("exec1", "host1", ANY) === None)
clock.advance(LOCALITY_WAIT_MS)
// Offer host1 again: second task (on host2) should get chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 1)
// Offer host1 again: third task (on host2) should get chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 2)
// Offer host2: fifth task (also on host2) should get chosen
assert(manager.resourceOffer("exec2", "host2", ANY).get.index === 4)
// Now that we've launched a local task, we should no longer launch the task for host3
assert(manager.resourceOffer("exec2", "host2", ANY) === None)
clock.advance(LOCALITY_WAIT_MS)
// After another delay, we can go ahead and launch that task non-locally
assert(manager.resourceOffer("exec2", "host2", ANY).get.index === 3)
}
test("delay scheduling with failed hosts") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"),
("exec3", "host3"))
val taskSet = FakeTask.createTaskSet(3,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(TaskLocation("host3"))
)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// First offer host1: first task should be chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
// After this, nothing should get chosen, because we have separated tasks with unavailable
// preference from the noPrefPendingTasks
assert(manager.resourceOffer("exec1", "host1", ANY) === None)
// Now mark host2 as dead
sched.removeExecutor("exec2")
manager.executorLost("exec2", "host2", SlaveLost())
// nothing should be chosen
assert(manager.resourceOffer("exec1", "host1", ANY) === None)
clock.advance(LOCALITY_WAIT_MS * 2)
// task 1 and 2 would be scheduled as nonLocal task
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 1)
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 2)
// all finished
assert(manager.resourceOffer("exec1", "host1", ANY) === None)
assert(manager.resourceOffer("exec2", "host2", ANY) === None)
}
test("task result lost") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val clock = new ManualClock
clock.advance(1)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
// Tell it the task has finished but the result was lost.
manager.handleFailedTask(0, TaskState.FINISHED, TaskResultLost)
assert(sched.endedTasks(0) === TaskResultLost)
// Re-offer the host -- now we should get task 0 again.
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
}
test("repeated failures lead to task set abortion") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val clock = new ManualClock
clock.advance(1)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// Fail the task MAX_TASK_FAILURES times, and check that the task set is aborted
// after the last failure.
(1 to manager.maxTaskFailures).foreach { index =>
val offerResult = manager.resourceOffer("exec1", "host1", ANY)
assert(offerResult.isDefined,
"Expect resource offer on iteration %s to return a task".format(index))
assert(offerResult.get.index === 0)
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
if (index < MAX_TASK_FAILURES) {
assert(!sched.taskSetsFailed.contains(taskSet.id))
} else {
assert(sched.taskSetsFailed.contains(taskSet.id))
}
}
}
test("executors should be blacklisted after task failure, in spite of locality preferences") {
val rescheduleDelay = 300L
val conf = new SparkConf().
set(config.BLACKLIST_ENABLED, true).
set(config.BLACKLIST_TIMEOUT_CONF, rescheduleDelay).
// don't wait to jump locality levels in this test
set("spark.locality.wait", "0")
sc = new SparkContext("local", "test", conf)
// two executors on same host, one on different.
sched = new FakeTaskScheduler(sc, ("exec1", "host1"),
("exec1.1", "host1"), ("exec2", "host2"))
// affinity to exec1 on host1 - which we will fail.
val taskSet = FakeTask.createTaskSet(1, Seq(TaskLocation("host1", "exec1")))
val clock = new ManualClock
clock.advance(1)
// We don't directly use the application blacklist, but its presence triggers blacklisting
// within the taskset.
val mockListenerBus = mock(classOf[LiveListenerBus])
val blacklistTrackerOpt = Some(new BlacklistTracker(mockListenerBus, conf, None, clock))
val manager = new TaskSetManager(sched, taskSet, 4, blacklistTrackerOpt, clock)
{
val offerResult = manager.resourceOffer("exec1", "host1", PROCESS_LOCAL)
assert(offerResult.isDefined, "Expect resource offer to return a task")
assert(offerResult.get.index === 0)
assert(offerResult.get.executorId === "exec1")
// Cause exec1 to fail : failure 1
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
assert(!sched.taskSetsFailed.contains(taskSet.id))
// Ensure scheduling on exec1 fails after failure 1 due to blacklist
assert(manager.resourceOffer("exec1", "host1", PROCESS_LOCAL).isEmpty)
assert(manager.resourceOffer("exec1", "host1", NODE_LOCAL).isEmpty)
assert(manager.resourceOffer("exec1", "host1", RACK_LOCAL).isEmpty)
assert(manager.resourceOffer("exec1", "host1", ANY).isEmpty)
}
// Run the task on exec1.1 - should work, and then fail it on exec1.1
{
val offerResult = manager.resourceOffer("exec1.1", "host1", NODE_LOCAL)
assert(offerResult.isDefined,
"Expect resource offer to return a task for exec1.1, offerResult = " + offerResult)
assert(offerResult.get.index === 0)
assert(offerResult.get.executorId === "exec1.1")
// Cause exec1.1 to fail : failure 2
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
assert(!sched.taskSetsFailed.contains(taskSet.id))
// Ensure scheduling on exec1.1 fails after failure 2 due to blacklist
assert(manager.resourceOffer("exec1.1", "host1", NODE_LOCAL).isEmpty)
}
// Run the task on exec2 - should work, and then fail it on exec2
{
val offerResult = manager.resourceOffer("exec2", "host2", ANY)
assert(offerResult.isDefined, "Expect resource offer to return a task")
assert(offerResult.get.index === 0)
assert(offerResult.get.executorId === "exec2")
// Cause exec2 to fail : failure 3
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
assert(!sched.taskSetsFailed.contains(taskSet.id))
// Ensure scheduling on exec2 fails after failure 3 due to blacklist
assert(manager.resourceOffer("exec2", "host2", ANY).isEmpty)
}
// Despite advancing beyond the time for expiring executors from within the blacklist,
// we *never* expire from *within* the stage blacklist
clock.advance(rescheduleDelay)
{
val offerResult = manager.resourceOffer("exec1", "host1", PROCESS_LOCAL)
assert(offerResult.isEmpty)
}
{
val offerResult = manager.resourceOffer("exec3", "host3", ANY)
assert(offerResult.isDefined)
assert(offerResult.get.index === 0)
assert(offerResult.get.executorId === "exec3")
assert(manager.resourceOffer("exec3", "host3", ANY).isEmpty)
// Cause exec3 to fail : failure 4
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
}
// we have failed the same task 4 times now : task id should now be in taskSetsFailed
assert(sched.taskSetsFailed.contains(taskSet.id))
}
test("new executors get added and lost") {
// Assign host2 to rack2
FakeRackUtil.assignHostToRack("host2", "rack2")
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc)
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host1", "execB")),
Seq(TaskLocation("host2", "execC")),
Seq())
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// Only ANY is valid
assert(manager.myLocalityLevels.sameElements(Array(NO_PREF, ANY)))
// Add a new executor
sched.addExecutor("execD", "host1")
manager.executorAdded()
// Valid locality should contain NODE_LOCAL and ANY
assert(manager.myLocalityLevels.sameElements(Array(NODE_LOCAL, NO_PREF, ANY)))
// Add another executor
sched.addExecutor("execC", "host2")
manager.executorAdded()
// Valid locality should contain PROCESS_LOCAL, NODE_LOCAL, RACK_LOCAL and ANY
assert(manager.myLocalityLevels.sameElements(
Array(PROCESS_LOCAL, NODE_LOCAL, NO_PREF, RACK_LOCAL, ANY)))
// test if the valid locality is recomputed when the executor is lost
sched.removeExecutor("execC")
manager.executorLost("execC", "host2", SlaveLost())
assert(manager.myLocalityLevels.sameElements(Array(NODE_LOCAL, NO_PREF, ANY)))
sched.removeExecutor("execD")
manager.executorLost("execD", "host1", SlaveLost())
assert(manager.myLocalityLevels.sameElements(Array(NO_PREF, ANY)))
}
test("Executors exit for reason unrelated to currently running tasks") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc)
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host1", "execB")),
Seq(TaskLocation("host2", "execC")),
Seq())
val clock = new ManualClock()
clock.advance(1)
val manager = new TaskSetManager(sched, taskSet, 1, clock = clock)
sched.addExecutor("execA", "host1")
manager.executorAdded()
sched.addExecutor("execC", "host2")
manager.executorAdded()
assert(manager.resourceOffer("exec1", "host1", ANY).isDefined)
sched.removeExecutor("execA")
manager.executorLost(
"execA",
"host1",
ExecutorExited(143, false, "Terminated for reason unrelated to running tasks"))
assert(!sched.taskSetsFailed.contains(taskSet.id))
assert(manager.resourceOffer("execC", "host2", ANY).isDefined)
sched.removeExecutor("execC")
manager.executorLost(
"execC", "host2", ExecutorExited(1, true, "Terminated due to issue with running tasks"))
assert(sched.taskSetsFailed.contains(taskSet.id))
}
test("test RACK_LOCAL tasks") {
// Assign host1 to rack1
FakeRackUtil.assignHostToRack("host1", "rack1")
// Assign host2 to rack1
FakeRackUtil.assignHostToRack("host2", "rack1")
// Assign host3 to rack2
FakeRackUtil.assignHostToRack("host3", "rack2")
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc,
("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
val taskSet = FakeTask.createTaskSet(2,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host1", "execA")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, RACK_LOCAL, ANY)))
// Set allowed locality to ANY
clock.advance(LOCALITY_WAIT_MS * 3)
// Offer host3
// No task is scheduled if we restrict locality to RACK_LOCAL
assert(manager.resourceOffer("execC", "host3", RACK_LOCAL) === None)
// Task 0 can be scheduled with ANY
assert(manager.resourceOffer("execC", "host3", ANY).get.index === 0)
// Offer host2
// Task 1 can be scheduled with RACK_LOCAL
assert(manager.resourceOffer("execB", "host2", RACK_LOCAL).get.index === 1)
}
test("do not emit warning when serialized task is small") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
assert(!manager.emittedTaskSizeWarning)
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
assert(!manager.emittedTaskSizeWarning)
}
test("emit warning when serialized task is large") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = new TaskSet(Array(new LargeTask(0)), 0, 0, 0, null)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
assert(!manager.emittedTaskSizeWarning)
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
assert(manager.emittedTaskSizeWarning)
}
test("Not serializable exception thrown if the task cannot be serialized") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = new TaskSet(
Array(new NotSerializableFakeTask(1, 0), new NotSerializableFakeTask(0, 1)), 0, 0, 0, null)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
intercept[TaskNotSerializableException] {
manager.resourceOffer("exec1", "host1", ANY)
}
assert(manager.isZombie)
}
test("abort the job if total size of results is too large") {
val conf = new SparkConf().set("spark.driver.maxResultSize", "2m")
sc = new SparkContext("local", "test", conf)
def genBytes(size: Int): (Int) => Array[Byte] = { (x: Int) =>
val bytes = Array.ofDim[Byte](size)
scala.util.Random.nextBytes(bytes)
bytes
}
// multiple 1k result
val r = sc.makeRDD(0 until 10, 10).map(genBytes(1024)).collect()
assert(10 === r.size)
// single 10M result
val thrown = intercept[SparkException] {sc.makeRDD(genBytes(10 << 20)(0), 1).collect()}
assert(thrown.getMessage().contains("bigger than spark.driver.maxResultSize"))
// multiple 1M results
val thrown2 = intercept[SparkException] {
sc.makeRDD(0 until 10, 10).map(genBytes(1 << 20)).collect()
}
assert(thrown2.getMessage().contains("bigger than spark.driver.maxResultSize"))
}
test("[SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie") {
val conf = new SparkConf().set("spark.speculation", "true")
sc = new SparkContext("local", "test", conf)
val sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execB", "host2"))
sched.initialize(new FakeSchedulerBackend() {
override def killTask(
taskId: Long,
executorId: String,
interruptThread: Boolean,
reason: String): Unit = {}
})
// Keep track of the number of tasks that are resubmitted,
// so that the test can check that no tasks were resubmitted.
var resubmittedTasks = 0
val dagScheduler = new FakeDAGScheduler(sc, sched) {
override def taskEnded(
task: Task[_],
reason: TaskEndReason,
result: Any,
accumUpdates: Seq[AccumulatorV2[_, _]],
taskInfo: TaskInfo): Unit = {
super.taskEnded(task, reason, result, accumUpdates, taskInfo)
reason match {
case Resubmitted => resubmittedTasks += 1
case _ =>
}
}
}
sched.setDAGScheduler(dagScheduler)
val singleTask = new ShuffleMapTask(0, 0, null, new Partition {
override def index: Int = 0
}, Seq(TaskLocation("host1", "execA")), new Properties, null)
val taskSet = new TaskSet(Array(singleTask), 0, 0, 0, null)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
// Offer host1, which should be accepted as a PROCESS_LOCAL location
// by the one task in the task set
val task1 = manager.resourceOffer("execA", "host1", TaskLocality.PROCESS_LOCAL).get
// Mark the task as available for speculation, and then offer another resource,
// which should be used to launch a speculative copy of the task.
manager.speculatableTasks += singleTask.partitionId
val task2 = manager.resourceOffer("execB", "host2", TaskLocality.ANY).get
assert(manager.runningTasks === 2)
assert(manager.isZombie === false)
val directTaskResult = new DirectTaskResult[String](null, Seq()) {
override def value(resultSer: SerializerInstance): String = ""
}
// Complete one copy of the task, which should result in the task set manager
// being marked as a zombie, because at least one copy of its only task has completed.
manager.handleSuccessfulTask(task1.taskId, directTaskResult)
assert(manager.isZombie === true)
assert(resubmittedTasks === 0)
assert(manager.runningTasks === 1)
manager.executorLost("execB", "host2", new SlaveLost())
assert(manager.runningTasks === 0)
assert(resubmittedTasks === 0)
}
test("speculative and noPref task should be scheduled after node-local") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(
sc, ("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host2"), TaskLocation("host1")),
Seq(),
Seq(TaskLocation("host3", "execC")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
assert(manager.resourceOffer("execA", "host1", PROCESS_LOCAL).get.index === 0)
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL) == None)
assert(manager.resourceOffer("execA", "host1", NO_PREF).get.index == 1)
manager.speculatableTasks += 1
clock.advance(LOCALITY_WAIT_MS)
// schedule the nonPref task
assert(manager.resourceOffer("execA", "host1", NO_PREF).get.index === 2)
// schedule the speculative task
assert(manager.resourceOffer("execB", "host2", NO_PREF).get.index === 1)
clock.advance(LOCALITY_WAIT_MS * 3)
// schedule non-local tasks
assert(manager.resourceOffer("execB", "host2", ANY).get.index === 3)
}
test("node-local tasks should be scheduled right away " +
"when there are only node-local and no-preference tasks") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(
sc, ("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(),
Seq(TaskLocation("host3")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// node-local tasks are scheduled without delay
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL).get.index === 0)
assert(manager.resourceOffer("execA", "host2", NODE_LOCAL).get.index === 1)
assert(manager.resourceOffer("execA", "host3", NODE_LOCAL).get.index === 3)
assert(manager.resourceOffer("execA", "host3", NODE_LOCAL) === None)
// schedule no-preference after node local ones
assert(manager.resourceOffer("execA", "host3", NO_PREF).get.index === 2)
}
test("SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished")
{
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execB", "host2"))
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(ExecutorCacheTaskLocation("host1", "execA")),
Seq(ExecutorCacheTaskLocation("host2", "execB")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// process-local tasks are scheduled first
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL).get.index === 2)
assert(manager.resourceOffer("execB", "host2", NODE_LOCAL).get.index === 3)
// node-local tasks are scheduled without delay
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL).get.index === 0)
assert(manager.resourceOffer("execB", "host2", NODE_LOCAL).get.index === 1)
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL) == None)
assert(manager.resourceOffer("execB", "host2", NODE_LOCAL) == None)
}
test("SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execB", "host2"))
val taskSet = FakeTask.createTaskSet(3,
Seq(),
Seq(ExecutorCacheTaskLocation("host1", "execA")),
Seq(ExecutorCacheTaskLocation("host2", "execB")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// process-local tasks are scheduled first
assert(manager.resourceOffer("execA", "host1", PROCESS_LOCAL).get.index === 1)
assert(manager.resourceOffer("execB", "host2", PROCESS_LOCAL).get.index === 2)
// no-pref tasks are scheduled without delay
assert(manager.resourceOffer("execA", "host1", PROCESS_LOCAL) == None)
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL) == None)
assert(manager.resourceOffer("execA", "host1", NO_PREF).get.index === 0)
assert(manager.resourceOffer("execA", "host1", ANY) == None)
}
test("Ensure TaskSetManager is usable after addition of levels") {
// Regression test for SPARK-2931
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc)
val taskSet = FakeTask.createTaskSet(2,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host2", "execB.1")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// Only ANY is valid
assert(manager.myLocalityLevels.sameElements(Array(ANY)))
// Add a new executor
sched.addExecutor("execA", "host1")
sched.addExecutor("execB.2", "host2")
manager.executorAdded()
assert(manager.pendingTasksWithNoPrefs.size === 0)
// Valid locality should contain PROCESS_LOCAL, NODE_LOCAL and ANY
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, ANY)))
assert(manager.resourceOffer("execA", "host1", ANY) !== None)
clock.advance(LOCALITY_WAIT_MS * 4)
assert(manager.resourceOffer("execB.2", "host2", ANY) !== None)
sched.removeExecutor("execA")
sched.removeExecutor("execB.2")
manager.executorLost("execA", "host1", SlaveLost())
manager.executorLost("execB.2", "host2", SlaveLost())
clock.advance(LOCALITY_WAIT_MS * 4)
sched.addExecutor("execC", "host3")
manager.executorAdded()
// Prior to the fix, this line resulted in an ArrayIndexOutOfBoundsException:
assert(manager.resourceOffer("execC", "host3", ANY) !== None)
}
test("Test that locations with HDFSCacheTaskLocation are treated as PROCESS_LOCAL.") {
// Regression test for SPARK-2931
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc,
("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
val taskSet = FakeTask.createTaskSet(3,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(TaskLocation("hdfs_cache_host3")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, ANY)))
sched.removeExecutor("execA")
manager.executorAdded()
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, ANY)))
sched.removeExecutor("execB")
manager.executorAdded()
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, ANY)))
sched.removeExecutor("execC")
manager.executorAdded()
assert(manager.myLocalityLevels.sameElements(Array(ANY)))
}
test("Test TaskLocation for different host type.") {
assert(TaskLocation("host1") === HostTaskLocation("host1"))
assert(TaskLocation("hdfs_cache_host1") === HDFSCacheTaskLocation("host1"))
assert(TaskLocation("executor_host1_3") === ExecutorCacheTaskLocation("host1", "3"))
assert(TaskLocation("executor_some.host1_executor_task_3") ===
ExecutorCacheTaskLocation("some.host1", "executor_task_3"))
}
test("Kill other task attempts when one attempt belonging to the same task succeeds") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(4)
// Set the speculation multiplier to be 0 so speculative tasks are launched immediately
sc.conf.set("spark.speculation.multiplier", "0.0")
sc.conf.set("spark.speculation", "true")
val clock = new ManualClock()
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
val accumUpdatesByTask: Array[Seq[AccumulatorV2[_, _]]] = taskSet.tasks.map { task =>
task.metrics.internalAccums
}
// Offer resources for 4 tasks to start
for ((k, v) <- List(
"exec1" -> "host1",
"exec1" -> "host1",
"exec2" -> "host2",
"exec2" -> "host2")) {
val taskOption = manager.resourceOffer(k, v, NO_PREF)
assert(taskOption.isDefined)
val task = taskOption.get
assert(task.executorId === k)
}
assert(sched.startedTasks.toSet === Set(0, 1, 2, 3))
clock.advance(1)
// Complete the 3 tasks and leave 1 task in running
for (id <- Set(0, 1, 2)) {
manager.handleSuccessfulTask(id, createTaskResult(id, accumUpdatesByTask(id)))
assert(sched.endedTasks(id) === Success)
}
// checkSpeculatableTasks checks that the task runtime is greater than the threshold for
// speculating. Since we use a threshold of 0 for speculation, tasks need to be running for
// > 0ms, so advance the clock by 1ms here.
clock.advance(1)
assert(manager.checkSpeculatableTasks(0))
// Offer resource to start the speculative attempt for the running task
val taskOption5 = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption5.isDefined)
val task5 = taskOption5.get
assert(task5.index === 3)
assert(task5.taskId === 4)
assert(task5.executorId === "exec1")
assert(task5.attemptNumber === 1)
sched.backend = mock(classOf[SchedulerBackend])
// Complete the speculative attempt for the running task
manager.handleSuccessfulTask(4, createTaskResult(3, accumUpdatesByTask(3)))
// Verify that it kills other running attempt
verify(sched.backend).killTask(3, "exec2", true, "another attempt succeeded")
// Because the SchedulerBackend was a mock, the 2nd copy of the task won't actually be
// killed, so the FakeTaskScheduler is only told about the successful completion
// of the speculated task.
assert(sched.endedTasks(3) === Success)
}
test("Killing speculative tasks does not count towards aborting the taskset") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(5)
// Set the speculation multiplier to be 0 so speculative tasks are launched immediately
sc.conf.set("spark.speculation.multiplier", "0.0")
sc.conf.set("spark.speculation.quantile", "0.6")
sc.conf.set("spark.speculation", "true")
val clock = new ManualClock()
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
val accumUpdatesByTask: Array[Seq[AccumulatorV2[_, _]]] = taskSet.tasks.map { task =>
task.metrics.internalAccums
}
// Offer resources for 5 tasks to start
val tasks = new ArrayBuffer[TaskDescription]()
for ((k, v) <- List(
"exec1" -> "host1",
"exec1" -> "host1",
"exec1" -> "host1",
"exec2" -> "host2",
"exec2" -> "host2")) {
val taskOption = manager.resourceOffer(k, v, NO_PREF)
assert(taskOption.isDefined)
val task = taskOption.get
assert(task.executorId === k)
tasks += task
}
assert(sched.startedTasks.toSet === (0 until 5).toSet)
clock.advance(1)
// Complete 3 tasks and leave 2 tasks in running
for (id <- Set(0, 1, 2)) {
manager.handleSuccessfulTask(id, createTaskResult(id, accumUpdatesByTask(id)))
assert(sched.endedTasks(id) === Success)
}
def runningTaskForIndex(index: Int): TaskDescription = {
tasks.find { task =>
task.index == index && !sched.endedTasks.contains(task.taskId)
}.getOrElse {
throw new RuntimeException(s"couldn't find index $index in " +
s"tasks: ${tasks.map { t => t.index -> t.taskId }} with endedTasks:" +
s" ${sched.endedTasks.keys}")
}
}
// have each of the running tasks fail 3 times (not enough to abort the stage)
(0 until 3).foreach { attempt =>
Seq(3, 4).foreach { index =>
val task = runningTaskForIndex(index)
logInfo(s"failing task $task")
val endReason = ExceptionFailure("a", "b", Array(), "c", None)
manager.handleFailedTask(task.taskId, TaskState.FAILED, endReason)
sched.endedTasks(task.taskId) = endReason
assert(!manager.isZombie)
val nextTask = manager.resourceOffer(s"exec2", s"host2", NO_PREF)
assert(nextTask.isDefined, s"no offer for attempt $attempt of $index")
tasks += nextTask.get
}
}
// we can't be sure which one of our running tasks will get another speculative copy
val originalTasks = Seq(3, 4).map { index => index -> runningTaskForIndex(index) }.toMap
// checkSpeculatableTasks checks that the task runtime is greater than the threshold for
// speculating. Since we use a threshold of 0 for speculation, tasks need to be running for
// > 0ms, so advance the clock by 1ms here.
clock.advance(1)
assert(manager.checkSpeculatableTasks(0))
// Offer resource to start the speculative attempt for the running task
val taskOption5 = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption5.isDefined)
val speculativeTask = taskOption5.get
assert(speculativeTask.index === 3 || speculativeTask.index === 4)
assert(speculativeTask.taskId === 11)
assert(speculativeTask.executorId === "exec1")
assert(speculativeTask.attemptNumber === 4)
sched.backend = mock(classOf[SchedulerBackend])
// Complete the speculative attempt for the running task
manager.handleSuccessfulTask(speculativeTask.taskId, createTaskResult(3, accumUpdatesByTask(3)))
// Verify that it kills other running attempt
val origTask = originalTasks(speculativeTask.index)
verify(sched.backend).killTask(origTask.taskId, "exec2", true, "another attempt succeeded")
// Because the SchedulerBackend was a mock, the 2nd copy of the task won't actually be
// killed, so the FakeTaskScheduler is only told about the successful completion
// of the speculated task.
assert(sched.endedTasks(3) === Success)
// also because the scheduler is a mock, our manager isn't notified about the task killed event,
// so we do that manually
manager.handleFailedTask(origTask.taskId, TaskState.KILLED, TaskKilled("test"))
// this task has "failed" 4 times, but one of them doesn't count, so keep running the stage
assert(manager.tasksSuccessful === 4)
assert(!manager.isZombie)
// now run another speculative task
val taskOpt6 = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOpt6.isDefined)
val speculativeTask2 = taskOpt6.get
assert(speculativeTask2.index === 3 || speculativeTask2.index === 4)
assert(speculativeTask2.index !== speculativeTask.index)
assert(speculativeTask2.attemptNumber === 4)
// Complete the speculative attempt for the running task
manager.handleSuccessfulTask(speculativeTask2.taskId,
createTaskResult(3, accumUpdatesByTask(3)))
// Verify that it kills other running attempt
val origTask2 = originalTasks(speculativeTask2.index)
verify(sched.backend).killTask(origTask2.taskId, "exec2", true, "another attempt succeeded")
assert(manager.tasksSuccessful === 5)
assert(manager.isZombie)
}
test("SPARK-19868: DagScheduler only notified of taskEnd when state is ready") {
// dagScheduler.taskEnded() is async, so it may *seem* ok to call it before we've set all
// appropriate state, eg. isZombie. However, this sets up a race that could go the wrong way.
// This is a super-focused regression test which checks the zombie state as soon as
// dagScheduler.taskEnded() is called, to ensure we haven't introduced a race.
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val mockDAGScheduler = mock(classOf[DAGScheduler])
sched.dagScheduler = mockDAGScheduler
val taskSet = FakeTask.createTaskSet(numTasks = 1, stageId = 0, stageAttemptId = 0)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = new ManualClock(1))
when(mockDAGScheduler.taskEnded(any(), any(), any(), any(), any())).thenAnswer(
new Answer[Unit] {
override def answer(invocationOnMock: InvocationOnMock): Unit = {
assert(manager.isZombie)
}
})
val taskOption = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption.isDefined)
// this would fail, inside our mock dag scheduler, if it calls dagScheduler.taskEnded() too soon
manager.handleSuccessfulTask(0, createTaskResult(0))
}
test("SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(numTasks = 1, stageId = 0, stageAttemptId = 0)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = new ManualClock)
assert(manager.name === "TaskSet_0.0")
// Make sure a task set with the same stage ID but different attempt ID has a unique name
val taskSet2 = FakeTask.createTaskSet(numTasks = 1, stageId = 0, stageAttemptId = 1)
val manager2 = new TaskSetManager(sched, taskSet2, MAX_TASK_FAILURES, clock = new ManualClock)
assert(manager2.name === "TaskSet_0.1")
// Make sure a task set with the same attempt ID but different stage ID also has a unique name
val taskSet3 = FakeTask.createTaskSet(numTasks = 1, stageId = 1, stageAttemptId = 1)
val manager3 = new TaskSetManager(sched, taskSet3, MAX_TASK_FAILURES, clock = new ManualClock)
assert(manager3.name === "TaskSet_1.1")
}
test("don't update blacklist for shuffle-fetch failures, preemption, denied commits, " +
"or killed tasks") {
// Setup a taskset, and fail some tasks for a fetch failure, preemption, denied commit,
// and killed task.
val conf = new SparkConf().
set(config.BLACKLIST_ENABLED, true)
sc = new SparkContext("local", "test", conf)
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(4)
val tsm = new TaskSetManager(sched, taskSet, 4)
// we need a spy so we can attach our mock blacklist
val tsmSpy = spy(tsm)
val blacklist = mock(classOf[TaskSetBlacklist])
when(tsmSpy.taskSetBlacklistHelperOpt).thenReturn(Some(blacklist))
// make some offers to our taskset, to get tasks we will fail
val taskDescs = Seq(
"exec1" -> "host1",
"exec2" -> "host1"
).flatMap { case (exec, host) =>
// offer each executor twice (simulating 2 cores per executor)
(0 until 2).flatMap{ _ => tsmSpy.resourceOffer(exec, host, TaskLocality.ANY)}
}
assert(taskDescs.size === 4)
// now fail those tasks
tsmSpy.handleFailedTask(taskDescs(0).taskId, TaskState.FAILED,
FetchFailed(BlockManagerId(taskDescs(0).executorId, "host1", 12345), 0, 0, 0, "ignored"))
tsmSpy.handleFailedTask(taskDescs(1).taskId, TaskState.FAILED,
ExecutorLostFailure(taskDescs(1).executorId, exitCausedByApp = false, reason = None))
tsmSpy.handleFailedTask(taskDescs(2).taskId, TaskState.FAILED,
TaskCommitDenied(0, 2, 0))
tsmSpy.handleFailedTask(taskDescs(3).taskId, TaskState.KILLED, TaskKilled("test"))
// Make sure that the blacklist ignored all of the task failures above, since they aren't
// the fault of the executor where the task was running.
verify(blacklist, never())
.updateBlacklistForFailedTask(anyString(), anyString(), anyInt())
}
test("update application blacklist for shuffle-fetch") {
// Setup a taskset, and fail some one task for fetch failure.
val conf = new SparkConf()
.set(config.BLACKLIST_ENABLED, true)
.set(config.SHUFFLE_SERVICE_ENABLED, true)
.set(config.BLACKLIST_FETCH_FAILURE_ENABLED, true)
sc = new SparkContext("local", "test", conf)
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(4)
val blacklistTracker = new BlacklistTracker(sc, None)
val tsm = new TaskSetManager(sched, taskSet, 4, Some(blacklistTracker))
// make some offers to our taskset, to get tasks we will fail
val taskDescs = Seq(
"exec1" -> "host1",
"exec2" -> "host2"
).flatMap { case (exec, host) =>
// offer each executor twice (simulating 2 cores per executor)
(0 until 2).flatMap{ _ => tsm.resourceOffer(exec, host, TaskLocality.ANY)}
}
assert(taskDescs.size === 4)
assert(!blacklistTracker.isExecutorBlacklisted(taskDescs(0).executorId))
assert(!blacklistTracker.isNodeBlacklisted("host1"))
// Fail the task with fetch failure
tsm.handleFailedTask(taskDescs(0).taskId, TaskState.FAILED,
FetchFailed(BlockManagerId(taskDescs(0).executorId, "host1", 12345), 0, 0, 0, "ignored"))
assert(blacklistTracker.isNodeBlacklisted("host1"))
}
test("update blacklist before adding pending task to avoid race condition") {
// When a task fails, it should apply the blacklist policy prior to
// retrying the task otherwise there's a race condition where run on
// the same executor that it was intended to be black listed from.
val conf = new SparkConf().
set(config.BLACKLIST_ENABLED, true)
// Create a task with two executors.
sc = new SparkContext("local", "test", conf)
val exec = "executor1"
val host = "host1"
val exec2 = "executor2"
val host2 = "host2"
sched = new FakeTaskScheduler(sc, (exec, host), (exec2, host2))
val taskSet = FakeTask.createTaskSet(1)
val clock = new ManualClock
val mockListenerBus = mock(classOf[LiveListenerBus])
val blacklistTracker = new BlacklistTracker(mockListenerBus, conf, None, clock)
val taskSetManager = new TaskSetManager(sched, taskSet, 1, Some(blacklistTracker))
val taskSetManagerSpy = spy(taskSetManager)
val taskDesc = taskSetManagerSpy.resourceOffer(exec, host, TaskLocality.ANY)
// Assert the task has been black listed on the executor it was last executed on.
when(taskSetManagerSpy.addPendingTask(anyInt())).thenAnswer(
new Answer[Unit] {
override def answer(invocationOnMock: InvocationOnMock): Unit = {
val task = invocationOnMock.getArgumentAt(0, classOf[Int])
assert(taskSetManager.taskSetBlacklistHelperOpt.get.
isExecutorBlacklistedForTask(exec, task))
}
}
)
// Simulate a fake exception
val e = new ExceptionFailure("a", "b", Array(), "c", None)
taskSetManagerSpy.handleFailedTask(taskDesc.get.taskId, TaskState.FAILED, e)
verify(taskSetManagerSpy, times(1)).addPendingTask(anyInt())
}
test("SPARK-21563 context's added jars shouldn't change mid-TaskSet") {
sc = new SparkContext("local", "test")
val addedJarsPreTaskSet = Map[String, Long](sc.addedJars.toSeq: _*)
assert(addedJarsPreTaskSet.size === 0)
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet1 = FakeTask.createTaskSet(3)
val manager1 = new TaskSetManager(sched, taskSet1, MAX_TASK_FAILURES, clock = new ManualClock)
// all tasks from the first taskset have the same jars
val taskOption1 = manager1.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption1.get.addedJars === addedJarsPreTaskSet)
val taskOption2 = manager1.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption2.get.addedJars === addedJarsPreTaskSet)
// even with a jar added mid-TaskSet
val jarPath = Thread.currentThread().getContextClassLoader.getResource("TestUDTF.jar")
sc.addJar(jarPath.toString)
val addedJarsMidTaskSet = Map[String, Long](sc.addedJars.toSeq: _*)
assert(addedJarsPreTaskSet !== addedJarsMidTaskSet)
val taskOption3 = manager1.resourceOffer("exec1", "host1", NO_PREF)
// which should have the old version of the jars list
assert(taskOption3.get.addedJars === addedJarsPreTaskSet)
// and then the jar does appear in the next TaskSet
val taskSet2 = FakeTask.createTaskSet(1)
val manager2 = new TaskSetManager(sched, taskSet2, MAX_TASK_FAILURES, clock = new ManualClock)
val taskOption4 = manager2.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption4.get.addedJars === addedJarsMidTaskSet)
}
private def createTaskResult(
id: Int,
accumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty): DirectTaskResult[Int] = {
val valueSer = SparkEnv.get.serializer.newInstance()
new DirectTaskResult[Int](valueSer.serialize(id), accumUpdates)
}
}
| someorz/spark | core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala | Scala | apache-2.0 | 53,949 |
package browser
import helpers.TestDataPerTest
import org.scalatestplus.play.{HtmlUnitFactory, OneBrowserPerTest, OneServerPerTest, PlaySpec}
class ViewItemPageTests extends PlaySpec with TestDataPerTest with OneServerPerTest with OneBrowserPerTest with HtmlUnitFactory {
"The view item page" should {
"display the item's name" in {
go to viewItemPage
textField("name").value mustBe item.name
}
"display the item's weight" in {
go to viewItemPage
numberField("weight").value mustBe item.weightInGrams.get.toString
}
"display new name after saving" in {
go to viewItemPage
textField("name").value = "new-name"
click on "save-item"
textField("name").value mustBe "new-name"
}
"display selected worn checkbox after saving" in {
go to viewItemPage
checkbox("worn").select()
click on "save-item"
checkbox("worn").isSelected mustBe true
}
"display unselected worn checkbox after saving" in {
go to viewItemPage
checkbox("worn").clear()
click on "save-item"
checkbox("worn").isSelected mustBe false
}
"display selected consumable checkbox after saving" in {
go to viewItemPage
checkbox("consumable").select()
click on "save-item"
checkbox("consumable").isSelected mustBe true
}
"display unselected consumable checkbox after saving" in {
go to viewItemPage
checkbox("consumable").clear()
click on "save-item"
checkbox("consumable").isSelected mustBe false
}
"display an error when a negative weight is given" in {
go to viewItemPage
textField("name").value = "item-with-negative-weight"
numberField("weight").value = "-500"
click on "save-item"
find("save-item-form").get.text must include("Must be greater or equal to 0")
}
"display an error when item name is not provided" in {
go to viewItemPage
textField("name").value = ""
click on "save-item"
find("save-item-form").get.text must include("This field is required")
}
"save the name when there are validation errors" in {
go to viewItemPage
textField("name").value = "item-with-negative-weight"
numberField("weight").value = "-500"
click on "save-item"
telField("name").value mustBe "item-with-negative-weight"
}
"save the weight when there are validation errors" in {
go to viewItemPage
numberField("weight").value = "400"
click on "save-item"
numberField("weight").value mustBe "400"
}
}
private def viewItemPage = {
s"http://localhost:$port/items/${item.id.get}"
}
}
| notclive/backpack | test/browser/ViewItemPageTests.scala | Scala | mit | 2,703 |
/*
* Happy Melly Teller
* Copyright (C) 2013 - 2015, Happy Melly http://www.happymelly.com
*
* This file is part of the Happy Melly Teller.
*
* Happy Melly Teller is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Happy Melly Teller is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Happy Melly Teller. If not, see <http://www.gnu.org/licenses/>.
*
* If you have questions concerning this license or the applicable additional terms, you may contact
* by email Sergey Kotlov, [email protected] or
* in writing Happy Melly One, Handelsplein 37, Rotterdam, The Netherlands, 3071 PR
*/
package security
import be.objectify.deadbolt.core.models.Subject
import be.objectify.deadbolt.scala.{DeadboltHandler, DynamicResourceHandler}
import models.ActiveUser
import models.repository.IRepositories
import play.api.i18n.{I18nSupport, Messages, MessagesApi}
import play.api.mvc.{Request, Result}
import securesocial.core.SecureSocial
import securesocial.core.SecureSocial.SecuredRequest
import services.TellerRuntimeEnvironment
import scala.concurrent.Future
/**
* Deadbolt authorisation handler.
*/
class AuthorisationHandler @javax.inject.Inject() (override implicit val env: TellerRuntimeEnvironment,
val messagesApi: MessagesApi,
val services: IRepositories)
extends DeadboltHandler
with I18nSupport
with SecureSocial {
/**
* Invoked prior to a constraint's test. If Option.None is returned, the constraint is applied. If
* the option contains a result, the constraint will not be applied and the wrapped action will not
* be invoked.
*
* @return an option possible containing a Result.
*/
def beforeAuthCheck[A](request: Request[A]): Future[Option[Result]] = Future.successful(None)
/**
* Gets the current subject e.g. the current user.
*
* @return an option containing the current subject
*/
override def getSubject[A](request: Request[A]): Future[Option[Subject]] = {
try {
val securedRequest = request.asInstanceOf[SecuredRequest[A, ActiveUser]]
securedRequest.user match {
case user: ActiveUser ⇒ Future.successful(Some(user.account))
case _ => Future.successful(None)
}
} catch {
case e: ClassCastException => Future.successful(None)
}
}
/**
* Gets the handler used for dealing with resources restricted to specific users/groups.
*
* @return an option containing the handler for restricted resources
*/
override def getDynamicResourceHandler[A](request: Request[A]): Future[Option[DynamicResourceHandler]] = {
try {
val securedRequest = request.asInstanceOf[SecuredRequest[A, ActiveUser]]
securedRequest.user match {
case user: ActiveUser ⇒ Future.successful(Some(new ResourceHandler(user, services)))
case _ => Future.successful(None)
}
} catch {
case e: ClassCastException => Future.successful(None)
}
}
/**
* Invoked when an authorisation failure is detected for the request.
*
* @return the action
*/
def onAuthFailure[A](request: Request[A]): Future[Result] = Future.successful {
Redirect(controllers.core.routes.Dashboard.index()).flashing("error" -> Messages("error.authorisation"))
}
}
| HappyMelly/teller | app/security/AuthorisationHandler.scala | Scala | gpl-3.0 | 3,781 |
package org.apache.mesos.chronos.scheduler.jobs
import java.util.TimeZone
import org.joda.time.format.{ISODateTimeFormat, ISOPeriodFormat}
import org.joda.time.{DateTime, DateTimeZone, Period}
/**
* Parsing, creating and validation for Iso8601 expressions.
*
* @author Florian Leibert ([email protected])
*/
object Iso8601Expressions {
val iso8601ExpressionRegex = """(R[0-9]*)/(.*)/(P.*)""".r
private[this] val formatter = ISODateTimeFormat.dateTime
/**
* Verifies that the given expression is a valid Iso8601Expression. Currently not all Iso8601Expression formats
* are supported.
*
* @param input
* @return
*/
def canParse(input: String, timeZoneStr: String = ""): Boolean = {
parse(input, timeZoneStr) match {
case Some((_, _, _)) =>
true
case None =>
false
}
}
/**
* Parses a ISO8601 expression into a tuple consisting of the number of repetitions (or -1 for infinity),
* the start and the period.
*
* @param input the input string which is a ISO8601 expression consisting of Repetition, Start and Period.
* @return a three tuple (repetitions, start, period)
*/
def parse(input: String, timeZoneStr: String = ""): Option[(Long, DateTime, Period)] = {
try {
val iso8601ExpressionRegex(repeatStr, startStr, periodStr) = input
val repeat: Long = {
if (repeatStr.length == 1)
-1L
else
repeatStr.substring(1).toLong
}
val start: DateTime = if (startStr.length == 0) DateTime.now(DateTimeZone.UTC).minusSeconds(1) else convertToDateTime(startStr, timeZoneStr)
val period: Period = ISOPeriodFormat.standard.parsePeriod(periodStr)
Some((repeat, start, period))
} catch {
case e: scala.MatchError =>
None
case e: IllegalArgumentException =>
None
}
}
/**
* Creates a DateTime object from an input string. This parses the object by first checking for a time zone and then
* using a datetime formatter to format the date and time.
*
* @param dateTimeStr the input date time string with optional time zone
* @return the date time
*/
def convertToDateTime(dateTimeStr: String, timeZoneStr: String): DateTime = {
val dateTime = DateTime.parse(dateTimeStr)
if (timeZoneStr != null && timeZoneStr.length > 0) {
val timeZone = DateTimeZone.forTimeZone(TimeZone.getTimeZone(timeZoneStr))
dateTime.withZoneRetainFields(timeZone)
} else {
dateTime
}
}
/**
* Creates a valid Iso8601Expression based on the input parameters.
*
* @param recurrences
* @param startDate
* @param period
* @return
*/
def create(recurrences: Long, startDate: DateTime, period: Period): String = {
if (recurrences != -1)
"R%d/%s/%s".format(recurrences, formatter.print(startDate), ISOPeriodFormat.standard.print(period))
else
"R/%s/%s".format(formatter.print(startDate), ISOPeriodFormat.standard.print(period))
}
}
| motusllc/chronos | src/main/scala/org/apache/mesos/chronos/scheduler/jobs/Iso8601Expressions.scala | Scala | apache-2.0 | 3,020 |
package cromwell.database.sql
import java.sql.Connection
import java.util.UUID
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
trait SqlDatabase extends AutoCloseable {
protected val urlKey: String
protected val originalDatabaseConfig: Config
lazy val databaseConfig = SqlDatabase.withUniqueSchema(originalDatabaseConfig, urlKey)
def withConnection[A](block: Connection => A): A
}
object SqlDatabase {
/**
* Modifies config.getString("url") to return a unique schema, if the original url contains the text
* "\\${uniqueSchema}".
*
* This allows each instance of a database object to use a clean, and different, in memory database.
*
* @return Config with \\${uniqueSchema} in url replaced with a unique string.
*/
def withUniqueSchema(config: Config, urlKey: String): Config = {
val urlValue = config.getString(urlKey)
if (urlValue.contains(s"$${uniqueSchema}")) {
// Config wasn't updating with a simple withValue/withFallback.
// So instead, do a bit of extra work to insert the generated schema name in the url.
val schema = UUID.randomUUID().toString
val newUrl = urlValue.replaceAll("""\\$\\{uniqueSchema\\}""", schema)
val origin = urlKey + " with uniqueSchema=" + schema
val urlConfigValue = ConfigValueFactory.fromAnyRef(newUrl, origin)
val urlConfig = ConfigFactory.empty(origin).withValue(urlKey, urlConfigValue)
urlConfig.withFallback(config)
} else {
config
}
}
}
| ohsu-comp-bio/cromwell | database/sql/src/main/scala/cromwell/database/sql/SqlDatabase.scala | Scala | bsd-3-clause | 1,511 |
package akashic.storage.service
import akashic.storage.backend.NodePath
import akashic.storage.server
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.{ETag, _}
import akka.http.scaladsl.server.Directives._
import com.google.common.net.HttpHeaders._
object HeadObject {
val matcher =
head &
GetObject.matcherCommon &
provide("HEAD Object")
// (from description on transparant-head-requests)
// Note that, even when this setting is off the server will never send
// out message bodies on responses to HEAD requests.
val route =
matcher.as(GetObject.t)(_.run)
}
object GetObject {
val matcherCommon =
extractObject &
parameters(
"versionId"?,
"response-content-type"?,
"response-content-language"?,
"response-expires"?,
"response-cache-control"?,
"response-content-disposition"?,
"response-content-encoding"?)
val matcher =
get &
matcherCommon &
provide("GET Object")
val route = matcher.as(t)(_.run)
case class t(
bucketName: String, keyName: String,
versionId: Option[String], // not used yet
responseContentType: Option[String],
responseContentLanguage: Option[String],
responseExpires: Option[String],
responseCacheControl: Option[String],
responseContentDisposition: Option[String],
responseContentEncoding: Option[String],
_name: String
) extends AuthorizedAPI {
def name = _name
def resource = Resource.forObject(bucketName, keyName)
def runOnce = {
val bucket = findBucket(server.tree, bucketName)
val key = findKey(bucket, keyName)
val version = key.findLatestVersion match {
case Some(a) => a
case None => failWith(Error.NoSuchKey())
}
val versionAcl = version.acl.get
if (!versionAcl.grant(callerId, Acl.Read()))
failWith(Error.AccessDenied())
// TODO if this is a delete marker?
val meta = version.meta.get
val filePath: NodePath = version.data.filePath
val contentType = responseContentType <+ Some(filePath.detectContentType)
val contentDisposition = responseContentDisposition <+ meta.attrs.find("Content-Disposition")
val lastModified = DateTime(filePath.getAttr.creationTime)
val headers = ResponseHeaderList.builder
.withHeader(X_AMZ_REQUEST_ID, requestId)
.withHeader(`Last-Modified`(lastModified))
.withHeader(ETag(meta.eTag))
.withHeader(CONTENT_DISPOSITION, contentDisposition)
.withHeader(meta.xattrs.unwrap)
.build
val ct: ContentType = ContentType.parse(contentType.get).right.get
conditional(EntityTag(meta.eTag), lastModified) {
withRangeSupport {
complete(StatusCodes.OK, headers, HttpEntity.Default(ct, filePath.getAttr.length, filePath.getSource(1 << 20)))
}
}
}
}
}
| akiradeveloper/fss3 | src/main/scala/akashic/storage/service/GetObject.scala | Scala | apache-2.0 | 2,890 |
package almhirt.aggregates
import org.scalatest._
import _root_.java.time.{ ZonedDateTime, LocalDateTime }
import scalaz._, Scalaz._
import almhirt.common._
import almhirt.almvalidation.kit._
class UpdateRecorderWithUpdaterTests extends FlatSpec with Matchers with Inside
with UserEventHandler with UserUpdater with RebuildsAggregateRootFromTimeline[User, UserEvent] {
import aggregatesforthelazyones._
implicit val ccuad = {
val dt = LocalDateTime.of(1: Int, 1: Int, 1: Int, 1: Int, 1: Int)
new CanCreateUuidsAndDateTimes {
override def getUuid(): java.util.UUID = ???
override def getUniqueString(): String = "unique"
override def getDateTime(): ZonedDateTime = ???
override def getUtcTimestamp(): LocalDateTime = dt
}
}
behavior of "UpdateRecorder used with an AggregateRootUpdater"
it should "create an aggregate root" in {
val (ar, events) = create("a", "hans", "meier").recordings.forceResult
inside(ar) {
case Vivus(User(id, version, surname, lastname, age)) ⇒
id should equal(arid("a"))
version should equal(arv(1))
surname should equal("hans")
lastname should equal("meier")
age should equal(None)
}
events should equal(List(
UserCreated(EventHeader(), arid("a"), arv(0L), "hans", "meier")))
}
it should "create an aggregate root and modify it" in {
val (ar, events) =
(for {
a ← create("a", "hans", "meier")
b ← (changeAge(_: User, 18)).liftWith(a)
} yield b).recordings.forceResult
inside(ar) {
case Vivus(User(id, version, surname, lastname, age)) ⇒
id should equal(arid("a"))
version should equal(arv(2))
surname should equal("hans")
lastname should equal("meier")
age should equal(Some(18))
}
val expectedEvents = List(UserCreated(EventHeader(), "a", 0L, "hans", "meier"), UserAgeChanged(EventHeader(), "a", 1L, 18))
events should equal(expectedEvents)
}
it should "create an aggregate root, modify and then kill it" in {
val (ar, events) =
(for {
a ← create("a", "hans", "meier")
b ← (changeAge(_: User, 18)).liftWith(a)
c ← (die(_: User)).liftWith(b)
} yield c).recordings.forceResult
inside(ar) {
case Mortuus(id, version) ⇒
id should equal(arid("a"))
version should equal(arv(3))
}
val expectedEvents = List(
UserCreated(EventHeader(), "a", 0L, "hans", "meier"),
UserAgeChanged(EventHeader(), "a", 1L, 18),
UserDied(EventHeader(), "a", 2L))
events should equal(expectedEvents)
}
it should "create an aggregate root, modify it many times and then kill it" in {
val (ar, events) =
(for {
a ← create("a", "hans", "meier")
b ← (changeAge(_: User, 18)).liftWith(a)
c ← (changeFullName(_: User, "Hans", "Meier")).liftWith(b)
d ← (leave(_: User)).liftWith(c)
} yield d).recordings.forceResult
inside(ar) {
case Mortuus(id, version) ⇒
id should equal(arid("a"))
version should equal(arv(5))
}
val expectedEvents = List(
UserCreated(EventHeader(), "a", 0L, "hans", "meier"),
UserAgeChanged(EventHeader(), "a", 1L, 18),
UserSurnameChanged(EventHeader(), "a", 2L, "Hans"),
UserLastnameChanged(EventHeader(), "a", 3L, "Meier"),
UserLeft(EventHeader(), "a", 4L))
events should equal(expectedEvents)
}
it should "allow a lifecycle transition from Vacat directly to Mortuus" in {
val (ar, events) = doNotAccept("a", "hans", "meier").recordings.forceResult
inside(ar) {
case Mortuus(id, version) ⇒
id should equal(arid("a"))
version should equal(arv(1))
}
events should equal(List(UserNotAccepted(EventHeader(), arid("a"), arv(0L), "hans", "meier")))
}
it should "create a timeline that leads to the same result" in {
val (ar, timeline) =
(for {
a ← create("a", "hans", "meier")
b ← (changeAge(_: User, 18)).liftWith(a)
c ← (changeFullName(_: User, "Hans", "Meier")).liftWith(b)
} yield c).recordings.forceResult
ar should equal(rebuildFromTimeline(timeline))
}
it should "not allow modification after death(Mortuus)" in {
val ur =
(for {
a ← create("a", "hans", "meier")
b ← (die(_: User)).liftWith(a)
c ← (changeAge(_: User, 18)).liftWith(b)
} yield c)
ur.isRejected should equal(true)
}
it should "return the events until the invalid operation happened(excluding the invalid ones)" in {
val ur =
(for {
a ← create("a", "hans", "meier")
b ← (die(_: User)).liftWith(a)
c ← (changeAge(_: User, 18)).liftWith(b)
} yield c)
val expectedEvents = List(
UserCreated(EventHeader(), "a", 0L, "hans", "meier"),
UserDied(EventHeader(), "a", 1L))
ur.events should equal(expectedEvents)
}
it should "return events up to an invalid operation that form a valid timeline up to the last state before the invalid operation" in {
val timeline =
(for {
a ← create("a", "hans", "meier")
a ← (changeAge(_: User, 18)).liftWith(a)
a ← (changeSurname(_: User, " ")).liftWith(a)
a ← (changeFullName(_: User, "Hans", "Meier")).liftWith(a)
} yield a).events
inside(rebuildFromTimeline(timeline)) {
case Vivus(User(id, version, surname, lastname, age)) ⇒
id should equal(arid("a"))
version should equal(arv(2))
surname should equal("hans")
lastname should equal("meier")
age should equal(Some(18))
}
}
it should "ALLOW recreation(Vacat→Vivus→Mortuus→Vivus) even though the timeline becomes invalid" in {
val (ar, events) =
(for {
a ← create("a", "hans", "meier")
b ← (die(_: User)).liftWith(a)
c ← create("a", "hans", "meier") // See, that there is no b here?
} yield c).recordings.forceResult
inside(ar) {
case Vivus(User(id, version, surname, lastname, age)) ⇒
id should equal(arid("a"))
version should equal(arv(1))
surname should equal("hans")
lastname should equal("meier")
age should equal(None)
}
val expectedEvents = List(
UserCreated(EventHeader(), "a", 0L, "hans", "meier"),
UserDied(EventHeader(), "a", 1L),
UserCreated(EventHeader(), "a", 0L, "hans", "meier"))
events should equal(expectedEvents)
}
it should "create an invalid timeline when the aggregate root is recreated(Vacat→Vivus→Mortuus→Vivus)" in {
val (ar, timeline) =
(for {
a ← create("a", "hans", "meier")
b ← (die(_: User)).liftWith(a)
c ← create("a", "hans", "meier")
} yield c).recordings.forceResult
intercept[Exception] {
rebuildFromTimeline(timeline)
}
}
}
| chridou/almhirt | almhirt-common/src/test/scala/almhirt/aggregates/UpdateRecorderWithUpdaterTests.scala | Scala | apache-2.0 | 6,958 |
package database.common
/**
* Created by cf on 9/27/2016.
*/
object Common {
object ActionType extends Enumeration {
val INSERT = Value(0)
val UPDATE = Value(1)
val DELETE = Value(2)
val REBUILD = Value(3)
}
}
| astrofed/akka-rest-slick-flyway-auth-token | db-api/src/main/scala/database/common/Common.scala | Scala | mit | 235 |
package slick.test.codegen
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global
import slick.codegen.SourceCodeGenerator
import slick.jdbc.JdbcProfile
/** Generates code for CodeGenRoundTripTest.
*
* This is generated using Derby currently because Derby strips column size of some columns, which
* works with all backends. If the code was generated using model data where the size is included
* it would fail in derby and hsqldb. The code is tested using all enabled profiles. We should also
* diversify generation as well at some point. */
object GenerateRoundtripSources {
def main(args: Array[String]) {
val profile = slick.jdbc.H2Profile
val url = "jdbc:h2:mem:test4"
val jdbcDriver = "org.h2.Driver"
object Tables extends Tables(profile)
import Tables._
import Tables.profile.api._
val ddl = posts.schema ++ categories.schema ++ typeTest.schema ++ large.schema ++ `null`.schema ++ X.schema ++ SingleNonOptionColumn.schema ++ SelfRef.schema
val a1 = profile.createModel(ignoreInvalidDefaults=false).map(m => new SourceCodeGenerator(m) {
override def tableName = {
case n if n.toLowerCase == "null" => "null" // testing null as table name
case n => super.tableName(n)
}
})
val a2 = profile.createModel(ignoreInvalidDefaults=false).map(m => new SourceCodeGenerator(m) {
override def Table = new Table(_){
override def autoIncLast = true
override def Column = new Column(_){
override def asOption = autoInc
}
}
})
val db = Database.forURL(url=url, driver=jdbcDriver, keepAliveConnection=true)
val (gen,gen2) = try Await.result(db.run(ddl.create >> (a1 zip a2)), Duration.Inf) finally db.close
val pkg = "slick.test.codegen.roundtrip"
gen.writeToFile( "slick.jdbc.H2Profile", args(0), pkg )
gen2.writeToFile( "slick.jdbc.H2Profile", args(0), pkg+"2" )
}
}
class Tables(val profile: JdbcProfile){
import profile.api._
/** Tests single column table, scala keyword type name, non-dentifier column name and all nullable columns table*/
class `null`(tag: Tag) extends Table[Option[String]](tag, "null") {
def name = column[Option[String]]("na me")
def * = name
}
val `null` = TableQuery[`null`]
/** Tests table with self-referring foreign key */
class SelfRef(tag: Tag) extends Table[(Int,Option[Int])](tag, "SELF_REF") {
def id = column[Int]("id",O.AutoInc)
def parent = column[Option[Int]]("parent")
def parentFK = foreignKey("parent_fk", parent, SelfRef)(_.id.?)
def * = (id,parent)
}
val SelfRef = TableQuery[SelfRef]
/** Tests single column table, scala keyword type name and all nullable columns table*/
class SingleNonOptionColumn(tag: Tag) extends Table[String](tag, "SingleNonOptionColumn") {
def name = column[String]("name")
def * = name
}
val SingleNonOptionColumn = TableQuery[SingleNonOptionColumn]
/** Tests single column table and collision with generated names */
class all(tag: Tag) extends Table[String](tag, "all") {
def dynamic = column[String]("dynamic")
def * = dynamic
}
val all = TableQuery[all]
/** Tests slick term name collision */
class X(tag: Tag) extends Table[(Int,Int,Option[Int],Int,Double,String,Option[Int],Option[Int],Option[String],Option[String],Option[String])](tag, "X") {
def pk = column[Int]("pk")
def pk2 = column[Int]("pk2")
def pkpk = primaryKey( "", (pk,pk2) ) // pk column collision
def i1 = column[Option[Int]]("index_1") // scala keyword collision
def c = column[Int]("column") // slick Table method with args collision
def p = column[Option[Int]]("posts")
def a = column[Option[Int]]("val") // scala keyword collision
def s = column[Double]("schema_name") // slick Table no-arg method collision
def sx = column[String]("schema_name_x") // column name collision after disambiguation
def t_ag = column[Option[String]]("tag") // column name collision after disambiguation
def tt = column[Option[String]]("_table_tag") // column name collision after disambiguation
def _underscore = column[Option[String]]("_underscore") // column name collision after disambiguation
def * = (pk,pk2,a,c,s,sx,i1,p,t_ag,tt,_underscore)
def idx1 = index("",i1) // idx column collision
def idx2 = index("i2",i1) // idx column collision
def idx3 = index("foo",c,unique=true)
def idx4 = index("bar",p,unique=true)
def categoryFK1 = foreignKey("fk1", pk, categories)(_.id) // dup FK collision
def categoryFK2 = foreignKey("fk2", pk2, categories)(_.id)
def postsFK = foreignKey("fk_to_posts", p, posts)(_.id.?) // fk column name collision
}
val X = TableQuery[X]
case class Category(id: Int, name: String)
class Categories(tag: Tag) extends Table[Category](tag, "categories") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def name = column[String]("name", O.Length(254))
def * = (id, name) <> (Category.tupled,Category.unapply)
def idx = index("IDX_NAME",name)
}
val categories = TableQuery[Categories]
class Posts(tag: Tag) extends Table[(Int, String, Option[Int])](tag, "POSTS") {
def id = column[Int]("id")
def title = column[String]("title")
def category = column[Option[Int]]("category")
def * = (id, title, category)
def categoryFK = foreignKey("_", category, categories)(_.id.?)
}
val posts = TableQuery[Posts]
// Clob disabled because it fails in postgres and mysql, see https://github.com/slick/slick/issues/637
class TypeTest(tag: Tag) extends Table[(
String,Boolean,Byte,Short,Int,Long,Float,Double,String,java.sql.Date,java.sql.Time,java.sql.Timestamp,java.util.UUID,java.sql.Blob//,java.sql.Clob
,Option[Int]
,(
Option[Boolean],Option[Byte],Option[Short],Option[Int],Option[Long],Option[Float],Option[Double],Option[String],Option[java.sql.Date],Option[java.sql.Time],Option[java.sql.Timestamp],Option[java.util.UUID],Option[java.sql.Blob]//,Option[java.sql.Clob]
)
)](tag, "TYPE_TEST") {
def `type` = column[String]("type") // <- test escaping of keywords
def Boolean = column[Boolean]("Boolean",O.Default(true), O.Unique)
def Byte = column[Byte]("Byte")
def Short = column[Short]("Short")
def Int = column[Int]("Int",O.Default(-5))
def Long = column[Long]("Long",O.Default(5L))
//def java_math_BigInteger = column[java.math.BigInteger]("java_math_BigInteger")
def Float = column[Float]("Float",O.Default(9.999F))
def Double = column[Double]("Double",O.Default(9.999))
//def java_math_BigDecimal = column[java.math.BigDecimal]("java_math_BigDecimal")
def String = column[String]("String",O.Default("someDefaultString"),O.Length(254))
def java_sql_Date = column[java.sql.Date]("java_sql_Date")
def java_sql_Time = column[java.sql.Time]("java_sql_Time")
def java_sql_Timestamp = column[java.sql.Timestamp]("java_sql_Timestamp")
def java_util_UUID = column[java.util.UUID]("java_util_UUID")
def java_sql_Blob = column[java.sql.Blob]("java_sql_Blob")
//def java_sql_Clob = column[java.sql.Clob]("java_sql_Clob")
def None_Int = column[Option[Int]]("None_Int",O.Default(None))
def Option_Boolean = column[Option[Boolean]]("Option_Boolean",O.Default(Some(true)))
def Option_Byte = column[Option[Byte]]("Option_Byte")
def Option_Short = column[Option[Short]]("Option_Short")
def Option_Int = column[Option[Int]]("Option_Int",O.Default(Some(5)))
def Option_Long = column[Option[Long]]("Option_Long",O.Default(Some(5L)))
//def java_math_BigInteger = column[Option[java.math.BigInteger]]("java_math_BigInteger")
def Option_Float = column[Option[Float]]("Option_Float",O.Default(Some(9.999F)))
def Option_Double = column[Option[Double]]("Option_Double",O.Default(Some(9.999)))
//def java_math_BigDecimal = column[Option[java.math.BigDecimal]]("java_math_BigDecimal")
def Option_String = column[Option[String]]("Option_String",O.Default(Some("someDefaultString")),O.Length(254))
def Option_java_sql_Date = column[Option[java.sql.Date]]("Option_java_sql_Date")
def Option_java_sql_Time = column[Option[java.sql.Time]]("Option_java_sql_Time")
def Option_java_sql_Timestamp = column[Option[java.sql.Timestamp]]("Option_java_sql_Timestamp")
def Option_java_util_UUID = column[Option[java.util.UUID]]("Option_java_util_UUID")
def Option_java_sql_Blob = column[Option[java.sql.Blob]]("Option_java_sql_Blob")
def Option_java_sql_Option_Blob = column[Option[Option[java.sql.Blob]]]("Option_java_sql_Blob")
//def Option_java_sql_Clob = column[Option[java.sql.Clob]]("Option_java_sql_Clob")
def * = (
`type`,
Boolean,Byte,Short,Int,Long,Float,Double,String,java_sql_Date,java_sql_Time,java_sql_Timestamp,java_util_UUID,java_sql_Blob//,java_sql_Clob
,None_Int
,(
Option_Boolean,Option_Byte,Option_Short,Option_Int,Option_Long,Option_Float,Option_Double,Option_String,Option_java_sql_Date,Option_java_sql_Time,Option_java_sql_Timestamp,Option_java_util_UUID,Option_java_sql_Blob//,Option_java_sql_Clob
)
)
def pk = primaryKey("PK", (Int,Long))
}
val typeTest = TableQuery[TypeTest]
// testing table larger 22 columns (code gen round trip does not preserve structure of the * projection or names of mapped to classes)
case class Part(i1: Int, i2: Int, i3: Int, i4: Int, i5: Int, i6: Int)
case class Whole(id: Long, p1: Part, p2: Part, p3: Part, p4: Part, p5: Part, p6: Part)
class Large(tag: Tag) extends Table[Whole](tag, "LARGE") {
def id = column[Long]("id", O.PrimaryKey)
def p1i1 = column[Int]("p1i1",O.Default(11))
def p1i2 = column[Int]("p1i2",O.Default(12))
def p1i3 = column[Int]("p1i3",O.Default(13))
def p1i4 = column[Int]("p1i4",O.Default(14))
def p1i5 = column[Int]("p1i5",O.Default(15))
def p1i6 = column[Int]("p1i6",O.Default(16))
def p2i1 = column[Int]("p2i1",O.Default(21))
def p2i2 = column[Int]("p2i2",O.Default(22))
def p2i3 = column[Int]("p2i3",O.Default(23))
def p2i4 = column[Int]("p2i4",O.Default(24))
def p2i5 = column[Int]("p2i5",O.Default(25))
def p2i6 = column[Int]("p2i6",O.Default(26))
def p3i1 = column[Int]("p3i1",O.Default(31))
def p3i2 = column[Int]("p3i2",O.Default(32))
def p3i3 = column[Int]("p3i3",O.Default(33))
def p3i4 = column[Int]("p3i4",O.Default(34))
def p3i5 = column[Int]("p3i5",O.Default(35))
def p3i6 = column[Int]("p3i6",O.Default(36))
def p4i1 = column[Int]("p4i1",O.Default(41))
def p4i2 = column[Int]("p4i2",O.Default(42))
def p4i3 = column[Int]("p4i3",O.Default(43))
def p4i4 = column[Int]("p4i4",O.Default(44))
def p4i5 = column[Int]("p4i5",O.Default(45))
def p4i6 = column[Int]("p4i6",O.Default(46))
def p5i1 = column[Int]("p5i1",O.Default(51))
def p5i2 = column[Int]("p5i2",O.Default(52))
def p5i3 = column[Int]("p5i3",O.Default(53))
def p5i4 = column[Int]("p5i4",O.Default(54))
def p5i5 = column[Int]("p5i5",O.Default(55))
def p5i6 = column[Int]("p5i6",O.Default(56))
def p6i1 = column[Int]("p6i1",O.Default(61))
def p6i2 = column[Int]("p6i2",O.Default(62))
def p6i3 = column[Int]("p6i3",O.Default(63))
def p6i4 = column[Int]("p6i4",O.Default(64))
def p6i5 = column[Int]("p6i5",O.Default(65))
def p6i6 = column[Int]("p6i6",O.Default(66))
def * = (
id,
(p1i1, p1i2, p1i3, p1i4, p1i5, p1i6),
(p2i1, p2i2, p2i3, p2i4, p2i5, p2i6),
(p3i1, p3i2, p3i3, p3i4, p3i5, p3i6),
(p4i1, p4i2, p4i3, p4i4, p4i5, p4i6),
(p5i1, p5i2, p5i3, p5i4, p5i5, p5i6),
(p6i1, p6i2, p6i3, p6i4, p6i5, p6i6)
).shaped <> ({ case (id, p1, p2, p3, p4, p5, p6) =>
// We could do this without .shaped but then we'd have to write a type annotation for the parameters
Whole(id, Part.tupled.apply(p1), Part.tupled.apply(p2), Part.tupled.apply(p3), Part.tupled.apply(p4), Part.tupled.apply(p5), Part.tupled.apply(p6))
}, { w: Whole =>
def f(p: Part) = Part.unapply(p).get
Some((w.id, f(w.p1), f(w.p2), f(w.p3), f(w.p4), f(w.p5), f(w.p6)))
})
}
val large = TableQuery[Large]
}
| xavier-fernandez/slick | slick-testkit/src/codegen/scala/slick/test/codegen/GenerateRoundtripSources.scala | Scala | bsd-2-clause | 12,238 |
package com.github.mdr.mash.ns.core
import com.github.mdr.mash.evaluator.EvaluationInterruptedException
import com.github.mdr.mash.functions.{ BoundParams, MashFunction, Parameter, ParameterModel }
import com.github.mdr.mash.inference._
import com.github.mdr.mash.ns.core.NoArgFunction.NoArgValue
import com.github.mdr.mash.runtime.{ MashUnit, MashValue }
import scala.util.control.NonFatal
object TryFunction extends MashFunction("core.try") {
object Params {
val Body = Parameter(
nameOpt = Some("body"),
summaryOpt = Some("Code to execute"),
isLazy = true)
val Catch = Parameter(
nameOpt = Some("catch"),
summaryOpt = Some("Code to execute if an exception is thrown in the body"),
defaultValueGeneratorOpt = Some(NoArgValue),
isLazy = true)
val Finally = Parameter(
nameOpt = Some("finally"),
summaryOpt = Some("Code to execute after execution of the body, regardless of whether an exception is thrown or not"),
defaultValueGeneratorOpt = Some(NoArgValue),
isLazy = true)
}
import Params._
val params = ParameterModel(Body, Catch, Finally)
def call(boundParams: BoundParams): MashValue = {
val body = boundParams(Body).asInstanceOf[MashFunction]
try
body.callNullary()
catch {
case EvaluationInterruptedException ⇒ throw EvaluationInterruptedException
case NonFatal(_) ⇒
NoArgFunction.option(boundParams(Catch)) match {
case Some(catchBlock) ⇒ catchBlock.asInstanceOf[MashFunction].callNullary()
case None ⇒ MashUnit
}
} finally
for (finallyBlock ← NoArgFunction.option(boundParams(Finally)))
finallyBlock.asInstanceOf[MashFunction].callNullary()
}
override def typeInferenceStrategy = new TypeInferenceStrategy {
def inferTypes(inferencer: Inferencer, arguments: TypedArguments): Option[Type] =
params.bindTypes(arguments).getType(Body)
}
override def summaryOpt = Some("Execute the given code, catching any exceptions")
} | mdr/mash | src/main/scala/com/github/mdr/mash/ns/core/TryFunction.scala | Scala | mit | 2,070 |
package com.twitter.server.view
import com.twitter.finagle.http.Status
import com.twitter.finagle.{Service, SimpleFilter}
import com.twitter.io.{Buf, Charsets}
import com.twitter.server.util.HttpUtils._
import com.twitter.util.Future
object NotFoundView {
private val NotFoundHtml: String =
s"""<html>
<head>
<title>404 · Twitter Server Admin</title>
<link type="text/css" href="/admin/files/css/bootstrap.min.css" rel="stylesheet"/>
</head>
<style>
body { margin: 20px; }
</style>
<body>
<h3>404 <small>The page you requested could not be found.</small></h3>
<hr/>
<h6>
<em>
Some endpoints are only available when the correct dependencies are added
to your class path.
<br/>
For more information, please see
<a href="http://twitter.github.io/twitter-server/Features.html#http-admin-interface">
the twitter-server docs
</a> or return to <a href="/admin">/admin</a>.
</em>
</h6>
</body>
</html>"""
}
class NotFoundView extends SimpleFilter[Request, Response] {
import NotFoundView._
def apply(req: Request, svc: Service[Request, Response]) =
if (!isWebBrowser(req)) svc(req)
else svc(req) flatMap { res =>
if (res.getStatus != Status.NotFound) Future.value(res) else {
newResponse(
contentType = "text/html;charset=UTF-8",
status = Status.NotFound,
content = Buf.Utf8(NotFoundHtml)
)
}
}
} | cogitate/twitter-server-uuid | src/main/scala/com/twitter/server/view/NotFoundView.scala | Scala | apache-2.0 | 1,659 |
package com.liangdp.graphviz4s.examples
import com.liangdp.graphviz4s.Graph
import scala.collection.mutable.Map
/**
* corresponding to the python example:
* https://github.com/xflr6/graphviz/blob/master/examples/er.py
* @author Depeng Liang
*/
object Er {
def main(args: Array[String]): Unit = {
val e = new Graph("ER")
e.attr("node", Map("shape" -> "box"))
e.node("course")
e.node("institute")
e.node("student")
e.attr("node", Map("shape" -> "ellipse"))
e.node("name0", attrs = Map("label" -> "name"))
e.node("name1", attrs = Map("label" -> "name"))
e.node("name2", attrs = Map("label" -> "name"))
e.node("code")
e.node("grade")
e.node("number")
e.attr("node", attrs = Map("shape" -> "diamond",
"style" -> "filled", "color" -> "lightgrey"))
e.node("C-I")
e.node("S-C")
e.node("S-I")
e.edge("name0", "course")
e.edge("code", "course")
e.edge("course", """"C-I"""", label = "n", attrs = Map("len" -> "1.00"))
e.edge(""""C-I"""", "institute", label = "1", attrs = Map("len" -> "1.00"))
e.edge("institute", "name1")
e.edge("institute", """"S-I"""", label = "1", attrs = Map("len" -> "1.00"))
e.edge(""""S-I"""", "student", label = "n", attrs = Map("len" -> "1.00"))
e.edge("student", "grade")
e.edge("student", "name2")
e.edge("student", "number")
e.edge("student", """"S-C"""", label = "m", attrs = Map("len" -> "1.00"))
e.edge(""""S-C"""", "course", label = "n", attrs = Map("len" -> "1.00"))
e.body += """label = "\\n\\nEntity Relation Diagram\\ndrawn by NEATO""""
e.body += "fontsize=20"
e.view(engine = "neato", fileName = "er.gv", directory = ".")
}
}
| Ldpe2G/Graphviz4S | src/main/scala/com/liangdp/graphviz4s/examples/Er.scala | Scala | mit | 1,700 |
/*
* Copyright 2014 Lars Edenbrandt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package se.nimsa.sbx.app.routing
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.directives.FileInfo
import akka.pattern.ask
import akka.stream.scaladsl.{Source => StreamSource}
import akka.util.ByteString
import se.nimsa.dicom.streams.DicomStreamException
import se.nimsa.sbx.app.GeneralProtocol.SourceType.IMPORT
import se.nimsa.sbx.app.GeneralProtocol._
import se.nimsa.sbx.app.SliceboxBase
import se.nimsa.sbx.dicom.Contexts
import se.nimsa.sbx.dicom.DicomHierarchy.Image
import se.nimsa.sbx.importing.ImportProtocol._
import se.nimsa.sbx.log.SbxLog
import se.nimsa.sbx.metadata.MetaDataProtocol.GetImage
import se.nimsa.sbx.user.UserProtocol.ApiUser
import scala.concurrent.Future
import scala.util.{Failure, Success}
trait ImportRoutes {
this: SliceboxBase =>
def importRoutes(apiUser: ApiUser): Route =
path("import" / "sessions" / LongNumber / "images") { id =>
post {
withoutSizeLimit {
fileUpload("file") {
case (fileInfo, bytes) => addImageToImportSessionRoute(Some(fileInfo), bytes, id)
} ~ extractDataBytes { bytes =>
addImageToImportSessionRoute(None, bytes, id)
}
}
}
} ~ pathPrefix("import") {
pathPrefix("sessions") {
pathEndOrSingleSlash {
get {
parameters((
'startindex.as(nonNegativeFromStringUnmarshaller) ? 0,
'count.as(nonNegativeFromStringUnmarshaller) ? 20)) { (startIndex, count) =>
onSuccess(importService.ask(GetImportSessions(startIndex, count))) {
case ImportSessions(importSessions) =>
complete(importSessions)
}
}
} ~ post {
entity(as[ImportSession]) { importSession =>
onSuccess(importService.ask(AddImportSession(importSession.copy(user = apiUser.user, userId = apiUser.id)))) {
case importSession: ImportSession => {
system.eventStream.publish(SourceAdded(SourceRef(IMPORT, importSession.id)))
complete((Created, importSession))
}
}
}
}
} ~ pathPrefix(LongNumber) { id =>
pathEndOrSingleSlash {
(get & rejectEmptyResponse) {
complete(importService.ask(GetImportSession(id)).mapTo[Option[ImportSession]])
} ~ delete {
complete(importService.ask(DeleteImportSession(id)).map(_ => {
system.eventStream.publish(SourceDeleted(SourceRef(IMPORT, id)))
NoContent
}))
}
} ~ path("images") {
get {
onSuccess(importService.ask(GetImportSessionImages(id))) {
case ImportSessionImages(importSessionImages) =>
complete {
Future.sequence {
importSessionImages.map { importSessionImage =>
metaDataService.ask(GetImage(importSessionImage.imageId)).mapTo[Option[Image]]
}
}.map(_.flatten)
}
}
}
}
}
}
}
def addImageToImportSessionRoute(fileInfo: Option[FileInfo], bytes: StreamSource[ByteString, Any], importSessionId: Long): Route = {
onSuccess(importService.ask(GetImportSession(importSessionId)).mapTo[Option[ImportSession]]) {
case Some(importSession) =>
val source = Source(SourceType.IMPORT, importSession.name, importSessionId)
val futureImport = storeDicomData(bytes, source, storage, Contexts.imageDataContexts, reverseAnonymization = true)
onComplete(futureImport) {
case Success(metaData) =>
onSuccess(importService.ask(AddImageToSession(importSession.id, metaData.image, !metaData.imageAdded)).mapTo[ImageAddedToSession]) { _ =>
val overwrite = !metaData.imageAdded
system.eventStream.publish(ImageAdded(metaData.image.id, source, overwrite))
val httpStatus = if (metaData.imageAdded) Created else OK
complete((httpStatus, metaData.image))
}
case Failure(failure) =>
failure.printStackTrace()
val status = failure match {
case _: DicomStreamException => BadRequest
case _ => InternalServerError
}
fileInfo match {
case Some(fi) =>
SbxLog.error("Import", s"${failure.getClass.getSimpleName} during import of ${fi.fileName}: ${failure.getMessage}")
onComplete(importService.ask(UpdateSessionWithRejection(importSession.id))) {
_ => complete((status, s"${fi.fileName}: ${failure.getMessage}"))
}
case None =>
SbxLog.error("Import", s"${failure.getClass.getSimpleName} during import: ${failure.getMessage}")
onComplete(importService.ask(UpdateSessionWithRejection(importSession.id))) {
_ => complete((status, failure.getMessage))
}
}
}
case None =>
complete(NotFound)
}
}
}
| slicebox/slicebox | src/main/scala/se/nimsa/sbx/app/routing/ImportRoutes.scala | Scala | apache-2.0 | 5,887 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ittest.org.apache.geode.spark
import org.scalatest.Tag
package object connector {
object OnlyTest extends Tag("OnlyTest")
object FetchDataTest extends Tag("FetchDateTest")
object FilterTest extends Tag("FilterTest")
object JoinTest extends Tag("JoinTest")
object OuterJoinTest extends Tag("OuterJoinTest")
}
| prasi-in/geode | geode-spark-connector/geode-spark-connector/src/it/scala/ittest/org/apache/geode/spark/connector/package.scala | Scala | apache-2.0 | 1,137 |
package com.twitter.server.handler
import com.twitter.finagle.http.HttpMuxer
import com.twitter.finagle.{Service, httpx}
import com.twitter.io.Buf
import com.twitter.server.util.HttpUtils._
import com.twitter.util.Future
/**
* A handler which outputs `patterns` as html anchors. By default,
* it outputs the patterns from the globals [[com.twitter.finagle.http.HttpMuxer]]
* and [[com.twitter.finagle.httpx.HttpMuxer]].
*/
class IndexHandler(
prefix: String = "/",
patterns: Seq[String] = HttpMuxer.patterns ++ httpx.HttpMuxer.patterns)
extends Service[Request, Response] {
def apply(req: Request): Future[Response] = {
val paths = patterns.filter(_.startsWith(prefix))
val links = paths map { p => s"<a href='$p'>$p</a>" }
if (!expectsHtml(req)) newOk(paths.mkString("\\n"))
else newResponse(
contentType = "text/html;charset=UTF-8",
content = Buf.Utf8(links.mkString("<br />\\n"))
)
}
} | travisbrown/twitter-server | src/main/scala/com/twitter/server/handler/IndexHandler.scala | Scala | apache-2.0 | 938 |
object CompoundTypeUnapply {
val Length: {def unapply(s: String): Option[Int]} = new {
def unapply(s: String): Option[Int] = Some(s.length)
}
"text" match {
case Length(length) => /*start*/length/*end*/
}
}
//Int | LPTK/intellij-scala | testdata/typeInference/bugs5/CompoundTypeUnapply.scala | Scala | apache-2.0 | 229 |
package 四法
package 测试1 {
// 减法:双向流动,都减少
// 加法:单向流动,我减少,你增加
// 位置:逆向单向流动,我减少,你减少(存疑)
trait P7Num1 {
def method2(num7: P7Num2): P7Result
}
case class P7Num1S(tail: P7Num1) extends P7Num1 {
override def method2(num7: P7Num2): P7Result = num7.method1(tail)
}
case class P7Num1T(tail: () => P7Num1) extends P7Num1 {
override def method2(num7: P7Num2): P7Result = tail().method2(num7)
}
trait P7Num2 {
def method1(num7: P7Num1): P7Result
}
case class P7Num2S(tail: P7Num2) extends P7Num2 {
override def method1(num7: P7Num1): P7Result = {
Counter.count += 1
P7ResultP(tail.method1(num7))
}
}
case class P7Num2T(tail: () => P7Num2) extends P7Num2 {
override def method1(num7: P7Num1): P7Result = num7.method2(tail())
}
object Counter {
var count: Int = 0
}
trait P7Result
case class P7ResultP(tail: P7Result) extends P7Result
object Test1 extends App {
lazy val num1: P7Num1 = P7Num1S(P7Num1S(P7Num1S(P7Num1S(P7Num1S(num1Zero)))))
lazy val num1Zero: P7Num1 = P7Num1T(() => num1)
lazy val num2: P7Num2 = P7Num2S(P7Num2S(P7Num2S(num2Zero)))
lazy val num2Zero: P7Num2 = P7Num2T(() => num2)
try {
num1.method2(num2)
} catch {
case e: StackOverflowError =>
}
println(Counter.count)
}
}
| djx314/ubw | a60-四/src/main/scala/四法/Counter7.scala | Scala | bsd-3-clause | 1,415 |
// The MIT License (MIT)
//
// Copyright (c) 2015 AT&T
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
import java.io.{File, PrintWriter, IOException}
import java.util.Calendar
import java.text.SimpleDateFormat
import org.json4s.jackson.JsonMethods
object ResultCollector {
val usage =
"""
Usage: java -jar resultcollector-assembly-1.0.jar -db "database" -query "InfluxDBQuery" -name "NameForTheSavedResult" [-dir "OutputDirectory"]
"""
val INFLUXDB_HOST = "172.31.2.11"
val INFLUXDB_PORT = 31410
var database = "foo"
var query = "foo"
var resultname = "foo"
var dir = "."
def handleCmdlineArguments(args: Array[String]) = {
def getFlagIndex(flag: String) : Int = args.indexOf(flag) + 1
if (args.contains("-db")) database = args(getFlagIndex("-db"))
if (args.contains("-query")) query = args(getFlagIndex("-query"))
if (args.contains("-name")) resultname = args(getFlagIndex("-name"))
if (args.contains("-dir")) dir = args(getFlagIndex("-dir"))
println("Cmdline Arguments: db: " + database + ", query: " + query + ", resultname: " + resultname + ", dir: " + dir)
}
def sendQueryToInfluxDB(databaseName: String, query: String): String = try {
val in = scala.io.Source.fromURL("http://"
+ INFLUXDB_HOST
+ ":"
+ INFLUXDB_PORT
+ "/db/"+databaseName+"/series?u=root&p=root&q="
+ java.net.URLEncoder.encode(query),
"utf-8")
var data = ""
for (line <- in.getLines)
data = line
data
} catch {
case e: IOException => ""
case e: java.net.ConnectException => ""
}
def exportDataToCSV(databaseName: String, sqlQuery: String, fileName: String) = {
val writer = new PrintWriter(new File(fileName))
val rawData = sendQueryToInfluxDB(databaseName,sqlQuery)
if (rawData.length != 0) {
val json = JsonMethods.parse(rawData)
val columns = (json \\\\ "columns").values
val points = (json \\\\ "points").values
// handle header
var header = ""
for (columnName <- columns.asInstanceOf[List[String]]) {
if (header != "") header = header + ","
header = header + columnName
}
writer.println(header)
for (point <- points.asInstanceOf[List[Any]]) {
var line = ""
for (elmt <- point.asInstanceOf[List[Any]]) {
if (line != "") line = line + ","
line = line + elmt.toString
}
writer.println(line)
}
}
writer.close()
}
def getTimestamp(): String = {
val now = Calendar.getInstance().getTime()
val dataFormat = new SimpleDateFormat("yyyyMMddkkmm")
dataFormat.format(now)
}
def main(args: Array[String]) {
if (args.length < 6) println(usage)
else handleCmdlineArguments(args)
val outputFilePath = dir + "/" + getTimestamp() + "-"+resultname + ".csv"
exportDataToCSV(database, query, outputFilePath)
}
} | att-innovate/charmander-experiment-nessy | analytics/resultcollector/src/main/scala/resultcollector.scala | Scala | mit | 3,934 |
package com.sksamuel.scrimage.filter
import java.util.Random
import com.sksamuel.scrimage.ImmutableImage
import com.sksamuel.scrimage.nio.PngWriter
import org.scalatest.{FunSuite, Matchers}
class SwimFilterTest extends FunSuite with Matchers {
implicit val writer: PngWriter = PngWriter.MaxCompression
private val bird = ImmutableImage.fromResource("/bird_small.png")
private val love = ImmutableImage.fromResource("/love.jpg")
private val masks = ImmutableImage.fromResource("/masks.jpg")
ignore("swim filter output matches expected") {
bird.filter(new SwimFilter(new Random(0))) shouldBe ImmutableImage.fromResource("/com/sksamuel/scrimage/filters/swim/bird_small_swim.png")
love.filter(new SwimFilter(new Random(0))) shouldBe ImmutableImage.fromResource("/com/sksamuel/scrimage/filters/swim/love_swim.png")
masks.filter(new SwimFilter(new Random(0))) shouldBe ImmutableImage.fromResource("/com/sksamuel/scrimage/filters/swim/masks_swim.png")
}
ignore("swim filter should support amount") {
masks.filter(new SwimFilter(new Random(0), 10, 2)) shouldBe ImmutableImage.fromResource("/com/sksamuel/scrimage/filters/swim/masks_swim_10_2.png")
masks.filter(new SwimFilter(new Random(0), 16, 2)) shouldBe ImmutableImage.fromResource("/com/sksamuel/scrimage/filters/swim/masks_swim_16_2.png")
masks.filter(new SwimFilter(new Random(0), 32, 2)) shouldBe ImmutableImage.fromResource("/com/sksamuel/scrimage/filters/swim/masks_swim_32_2.png")
}
}
| sksamuel/scrimage | scrimage-filters/src/test/scala/com/sksamuel/scrimage/filter/SwimFilterTest.scala | Scala | apache-2.0 | 1,485 |
package margn.ast
import margn.types.DType
abstract class ASTree
case class ASTProgram(children: Seq[ASTStatement]) extends ASTree
abstract class ASTStatement extends ASTree
abstract class ASTExpr extends ASTree {
var _type_ : DType = null
}
| 193s/margn | src/main/scala/margn/ast/ASTCore.scala | Scala | gpl-2.0 | 249 |
/*This file is part of BinaryReader.
BinaryReader is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
BinaryReader is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with BinaryReader. If not, see <http://www.gnu.org/licenses/>.*/
package binaryreader.structure
object Padding {
def apply(name: String = "Padding", size: Int): Padding = new Padding(name, size)
}
class Padding(val name: String = "Padding", val size: Int) extends DataStructure {
val length = size
def copy(name: String): Padding = new Padding(name, size)
override def propagate(pos: Int): Int = {
currentPosition = pos
size
}
override def debugString(level: Int): Unit = {
println("\t"* level + name + ": " + size + " Position: " + position)
}
}
| Supermanu/BinaryReader | src/main/scala/binaryreader/structure/Padding.scala | Scala | gpl-2.0 | 1,175 |
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Mon Sep 28 11:18:16 EDT 2015
* @see LICENSE (MIT style license file).
*/
package scalation.stat
import language.implicitConversions
import scalation.linalgebra.mem_mapped.{VectorC, VectorD, VectorI, VectorL, VectorQ, VectorR, VectorS}
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Conversions` object provide implicit conversion from memory mapped
* vectors to `StatVectors`.
*/
object Conversions
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Implicit conversion from `VectorD` to `StatVector`, which supports more
* advanced statistical operations on vectors (e.g., covariance).
* Other vector types require to conversion to VectorD via 'toDouble'.
* Caveat: won't work for vectors of string numbers (`VectorS`).
* @param x the vector to be enriched
*/
implicit def vectorC2StatVector (x: VectorC) = new MM_StatVector (x.toDouble)
implicit def vectorD2StatVector (x: VectorD) = new MM_StatVector (x)
implicit def vectorI2StatVector (x: VectorI) = new MM_StatVector (x.toDouble)
implicit def vectorL2StatVector (x: VectorL) = new MM_StatVector (x.toDouble)
implicit def vectorQ2StatVector (x: VectorQ) = new MM_StatVector (x.toDouble)
implicit def vectorR2StatVector (x: VectorR) = new MM_StatVector (x.toDouble)
} // Conversions object
| mvnural/scalation | src/main/scala/scalation/stat/Conversions.scala | Scala | mit | 1,532 |
import stainless.lang._
import stainless.collection._
import stainless.lang.Option._
import stainless.annotation._
import stainless.proof._
import stainless.lang.StaticChecks._
object ArraySliceExample {
def updateSizeLemma[T](xs: List[T], i: BigInt, y: T): Boolean = {
require(0 <= i && i < xs.size)
decreases(xs)
xs match {
case Nil() => true
case Cons(x, xs) => if (i == 0) true else updateSizeLemma[T](xs, i - 1, y)
}
xs.updated(i, y).size == xs.size
}.holds
final case class SArray[T](var content: List[T]) extends AnyHeapRef {
def fill(n: BigInt)(default: T): Unit = {
// should become a constructor eventually instead of a method
// Creates visible sharing, so needs support for allocator
reads(Set(this))
require(0 <= n)
modifies(Set(this))
content = List.fill(n)(default)
} ensuring { _ => content.size == n }
def apply(i: BigInt): T = {
reads(Set(this))
require(0 <= i && i < content.size)
content(i)
}
def update(i: BigInt, v: T): Unit = {
reads(Set(this))
require(0 <= i && i < content.size)
modifies(Set(this))
updateSizeLemma(content, i, v)
content = content.updated(i, v)
} ensuring { _ => content.size == old(content.size) }
}
final case class ArraySlice[T](a: SArray[T], from: BigInt, until: BigInt) {
// these slices retain their original indices but preclude access outside of range
require(0 <= from && from <= until)
def valid: Boolean = { // this method aspires to become part of the above `require`
reads(Set(a))
until <= a.content.size
}
def apply(i: BigInt): T = {
reads(Set(a))
require(from <= i && i < until && valid)
a(i)
}
def update(i: BigInt, v: T): Unit = {
reads(Set(a))
require(from <= i && i < until && valid)
modifies(Set(a))
a(i) = v
} ensuring { _ => valid }
def reSlice(from1: BigInt, until1: BigInt): ArraySlice[T] = {
reads(Set(a))
require(from <= from1 && from1 <= until1 && until1 <= until && valid)
ArraySlice[T](a, from1, until1)
}
}
@extern
def pr(s: String): Unit = {
println(s)
}
// @extern // remove the `@extern` for some hard VCs
def testSlices(a: SArray[String]): Unit = {
reads(Set(a))
modifies(Set(a))
a.fill(5)("")
a(3) = "s3"
a(4) = "s4"
a(0) = "s0"
a(2) = "s2"
a(1) = "s1"
val slice14 = ArraySlice(a, 1, 4)
assert(slice14.valid)
val slice23 = slice14.reSlice(2, 3)
pr("slice23(2) = " + slice23(2))
slice14(2) = "42"
pr("slice23(2) = " + slice23(2))
}
@extern
def main(args: Array[String]): Unit = {
testSlices(SArray[String](List[String]()))
}
}
| epfl-lara/stainless | frontends/benchmarks/full-imperative/valid/ArraySlice.scala | Scala | apache-2.0 | 2,766 |
package com.eevolution.context.dictionary.domain.api.service
import com.eevolution.context.dictionary.api
import com.eevolution.context.dictionary.domain.model.PrintFont
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 10/11/17.
*/
/**
* Print Font Service
*/
trait PrintFontService extends api.Service[PrintFont, Int] {
//Definition
}
| adempiere/ADReactiveSystem | dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/api/service/PrintFontService.scala | Scala | gpl-3.0 | 1,217 |
package org.jetbrains.plugins.scala
package lang.surroundWith.surrounders.scaladoc
import com.intellij.lang.surroundWith.Surrounder
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.TextRange
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.scaladoc.lexer.ScalaDocTokenType
/**
* User: Dmitry Naydanov
* Date: 3/3/12
*/
trait ScalaDocWithSyntaxSurrounder extends Surrounder {
def isApplicable(elements: Array[PsiElement]): Boolean = elements != null && elements.length >= 1
def surroundElements(project: Project, editor: Editor, elements: Array[PsiElement]): TextRange = {
val startOffset = editor.getSelectionModel.getSelectionStart
val endOffset = editor.getSelectionModel.getSelectionEnd
val offset = elements(0).getTextOffset
def getNewExprText(expr: String): String = expr.substring(0, startOffset - offset) + getSyntaxTag +
expr.substring(startOffset - offset, endOffset - offset) + getSyntaxTag + expr.substring(endOffset - offset)
val surroundedText = new StringBuilder()
elements.foreach(surroundedText append _.getText)
var newExpr = ScalaPsiElementFactory.createDocSimpleData(getNewExprText(surroundedText.toString()), elements(0).getManager)
while (newExpr != null && newExpr.getNode.getElementType != ScalaDocTokenType.DOC_COMMENT_END) {
elements(0).getParent.addBefore(newExpr, elements(0))
newExpr = newExpr.getNextSibling
}
elements.foreach(_.delete())
new TextRange(endOffset + 2*getSyntaxTag.length, endOffset + 2*getSyntaxTag.length)
}
def getSyntaxTag: String
}
| LPTK/intellij-scala | src/org/jetbrains/plugins/scala/lang/surroundWith/surrounders/scaladoc/ScalaDocWithSyntaxSurrounder.scala | Scala | apache-2.0 | 1,739 |
package io.youi
import scala.scalajs.js
@js.native
trait DOMRectReadOnly extends js.Object {
def x: Double
def y: Double
def width: Double
def height: Double
def top: Double
def right: Double
def bottom: Double
def left: Double
}
| outr/youi | dom/src/main/scala/io/youi/DOMRectReadOnly.scala | Scala | mit | 248 |
package xyz.suchanek.parsers
import org.htmlcleaner.{HtmlCleaner, TagNode}
import scala.util.Try
object Xroxy {
def getProxyList(data:String) = {
val content = (new HtmlCleaner()).clean(data)
val l1 = content.evaluateXPath("//tr[@class='row0']").toList
val l2 = content.evaluateXPath("//tr[@class='row1']").toList
val total = l1 ++ l2
Try(
total
.map(_.asInstanceOf[TagNode])
.map(_.getAllElements(false))
.filter(_.length > 3)
.map(x => (x.apply(1).getText.toString.trim(), x.apply(2).getText.toString.toInt))
).toOption
}
}
| wsuchy/ProxyPool | src/main/scala/xyz/suchanek/parsers/Xroxy.scala | Scala | gpl-2.0 | 597 |
package scalikejdbc.bigquery
import scalikejdbc._
case class TagId(value: Int) extends AnyVal
case class Tag(
id: TagId,
postId: PostId,
name: String
)
object Tag extends SQLSyntaxSupport[Tag] {
override val columns = Seq("id", "post_id", "name")
import Post.postIdBinders
implicit val tagIdBinders: Binders[TagId] = Binders.int.xmap(TagId.apply, _.value)
val t = this.syntax("t")
def apply(rs: WrappedResultSet): Tag = new Tag(
id = rs.get[TagId](t.resultName.id),
postId = rs.get[PostId](t.resultName.postId),
name = rs.get[String](t.resultName.name)
)
def opt(rs: WrappedResultSet): Option[Tag] = rs.intOpt(t.resultName.id).map(_ => apply(rs))
}
| ocadaruma/scalikejdbc-bigquery | src/it/scala/scalikejdbc/bigquery/Tag.scala | Scala | apache-2.0 | 689 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.keras.layers
import com.intel.analytics.bigdl.dllib.nn.{Log => BLog}
import com.intel.analytics.bigdl.dllib.keras.layers.{Log => ZLog}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.Shape
import com.intel.analytics.bigdl.dllib.keras.ZooSpecHelper
import com.intel.analytics.bigdl.dllib.keras.serializer.ModuleSerializationTest
class LogSpec extends ZooSpecHelper {
"Log 3D Zoo" should "be the same as BigDL" in {
val blayer = BLog[Float]()
val zlayer = ZLog[Float](inputShape = Shape(3, 4))
zlayer.build(Shape(-1, 3, 4))
zlayer.getOutputShape().toSingle().toArray should be (Array(-1, 3, 4))
val input = Tensor[Float](Array(2, 3, 4)).rand()
compareOutputAndGradInput(blayer, zlayer, input)
}
"Log 4D Zoo" should "be the same as BigDL" in {
val blayer = BLog[Float]()
val zlayer = ZLog[Float](inputShape = Shape(4, 8, 8))
zlayer.build(Shape(-1, 4, 8, 8))
zlayer.getOutputShape().toSingle().toArray should be (Array(-1, 4, 8, 8))
val input = Tensor[Float](Array(3, 4, 8, 8)).rand()
compareOutputAndGradInput(blayer, zlayer, input)
}
}
class LogSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = ZLog[Float](inputShape = Shape(4, 8, 8))
layer.build(Shape(2, 4, 8, 8))
val input = Tensor[Float](2, 4, 8, 8).rand()
runSerializationTest(layer, input)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/keras/layers/LogSpec.scala | Scala | apache-2.0 | 2,062 |
package org.taylorbrown.randomforest
import org.scalatest._
/**
* Created by taylor on 2/23/14.
*/
class ForestTest extends FlatSpec with Matchers {
val examples = List(
Example(List(1f),Label(0)),
Example(List(2f),Label(0)),
Example(List(1f),Label(0)),
Example(List(1f),Label(1)),
Example(List(2f),Label(1)),
Example(List(2f),Label(1)),
Example(List(3f),Label(1)),
Example(List(3f),Label(1)),
Example(List(3f),Label(1)),
Example(List(3f),Label(1))
)
val giniLearner = new GiniLearner
def sq(x:Float) = x * x
"The forest " should "classify things mostly right" in {
val forest = new Forest(examples, nTrees = 100)
// println(forest.probs(examples))
println(forest.classify(examples))
(for (e <- examples) yield forest.classify(List(e)).head == e.label).count(e => e) should be > 7
}
}
| taylor-brown/random-forest-scala | src/test/scala/org/taylorbrown/randomforest/ForestTest.scala | Scala | apache-2.0 | 860 |
/*
* Copyright 2009-2017. DigitalGlobe, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*/
package org.mrgeo.mapalgebra.raster
import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput}
import org.apache.spark.{SparkConf, SparkContext}
import org.mrgeo.data.ProviderProperties
import org.mrgeo.data.rdd.RasterRDD
import org.mrgeo.image.MrsPyramidMetadata
import org.mrgeo.job.JobArguments
import org.mrgeo.mapalgebra.parser.{ParserException, ParserNode}
import org.mrgeo.mapalgebra.{MapAlgebra, MapOp}
import scala.collection.JavaConverters._
class SaveRasterMapOp extends RasterMapOp with Externalizable {
var providerProperties:ProviderProperties = null
private var rasterRDD:Option[RasterRDD] = None
private var input:Option[RasterMapOp] = None
private var output:String = null
override def rdd():Option[RasterRDD] = rasterRDD
override def execute(context:SparkContext):Boolean = {
input match {
case Some(pyramid) =>
rasterRDD = pyramid.rdd()
val meta = new MrsPyramidMetadata(
pyramid.metadata() getOrElse
(throw new IOException("Can't load metadata! Ouch! " + pyramid.getClass.getName)))
// set the pyramid name to the output
meta.setPyramid(output)
metadata(meta)
pyramid.save(output, providerProperties, context)
case None => throw new IOException("Error saving raster")
}
true
}
override def setup(job:JobArguments, conf:SparkConf):Boolean = {
providerProperties = ProviderProperties.fromDelimitedString(job.getSetting(MapAlgebra.ProviderProperties, ""))
true
}
override def getZoomLevel(): Int = {
return input.getOrElse(throw new IOException("Unable to get zoom level from input raster")).getZoomLevel()
}
override def teardown(job:JobArguments, conf:SparkConf):Boolean = true
override def readExternal(in:ObjectInput):Unit = {}
override def writeExternal(out:ObjectOutput):Unit = {}
private[mapalgebra] def this(inputMapOp:Option[RasterMapOp], name:String) = {
this()
input = inputMapOp
output = name
}
private[mapalgebra] def this(node:ParserNode, variables:String => Option[ParserNode]) = {
this()
if (node.getNumChildren < 2) {
throw new ParserException(node.getName + " takes 2 arguments")
}
input = RasterMapOp.decodeToRaster(node.getChild(0), variables)
output = MapOp.decodeString(node.getChild(1)) match {
case Some(s) => s
case _ => throw new ParserException("Error decoding String")
}
}
}
| ngageoint/mrgeo | mrgeo-core/src/main/scala/org/mrgeo/mapalgebra/raster/SaveRasterMapOp.scala | Scala | apache-2.0 | 3,057 |
import scala.reflect.{ClassTag, classTag}
// /scala/trac/5452/a.scala
// Mon Feb 13 22:52:36 PST 2012
// import scala.reflect.runtime.universe._
trait Tree
object Bip {
def ??? = sys.error("")
}
import Bip._
case class Queryable[T]() {
def treeFilter( t:Tree ) : Queryable[T] = ???
}
object Queryable {
def apply[T:ClassTag] = ???
def apply[T:ClassTag]( t:Tree ) = ???
}
trait CoffeesTable{
def sales : Int
}
object Test extends App{
val q = new Queryable[CoffeesTable]
Queryable[CoffeesTable]( q.treeFilter(null) )
}
| AlexSikia/dotty | tests/untried/neg/t5452-new.scala | Scala | bsd-3-clause | 542 |
package scala.reflect.macros
package runtime
import scala.reflect.internal.util.Position
import scala.util.control.ControlThrowable
class AbortMacroException(val pos: Position, val msg: String) extends Throwable(msg) with ControlThrowable | felixmulder/scala | src/compiler/scala/reflect/macros/runtime/AbortMacroException.scala | Scala | bsd-3-clause | 240 |
class S { new J }
| yusuke2255/dotty | tests/pos/java-interop/t1176/S.scala | Scala | bsd-3-clause | 18 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import org.mockito.Mockito.when
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.BoxValidationFixture
import uk.gov.hmrc.ct.box.CtValidation
import uk.gov.hmrc.ct.computations.Validators.LossesPreviousToCurrentFixture
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
class CP283aSpec extends WordSpec with Matchers with MockitoSugar with BoxValidationFixture[ComputationsBoxRetriever] with LossesPreviousToCurrentFixture {
override val boxRetriever = makeBoxRetriever()
testMandatoryWhen("CP283a", CP283a.apply) {
makeBoxRetriever()
}
testBoxIsZeroOrPositive("CP283a", CP283a.apply)
testGlobalErrorsForBroughtForwardGtTotalProfit(b => b.cp283a()) {
makeBoxRetriever()
}
"when cp117 is zero, cp283a should pass when no value is entered" in {
setUpMocks()
CP283a(None).validate(makeBoxRetriever(None, 0)) shouldBe Set()
}
private def makeBoxRetriever(cp281aValue: Option[Int] = Some(1), cp117: Int = 1) = {
val retriever = mock[ComputationsBoxRetriever]
when(retriever.cp281a()).thenReturn(CP281a(cp281aValue))
when(retriever.cp283a()).thenReturn(CP283a(None))
when(retriever.cp283b()).thenReturn(CP283b(None))
when(retriever.cp117()).thenReturn(CP117(cp117))
retriever
}
}
| hmrc/ct-calculations | src/test/scala/uk/gov/hmrc/ct/computations/CP283aSpec.scala | Scala | apache-2.0 | 1,963 |
package io.github.interestinglab.waterdrop.filter
import io.github.interestinglab.waterdrop.config.{Config, ConfigFactory}
import io.github.interestinglab.waterdrop.apis.BaseFilter
import io.github.interestinglab.waterdrop.core.RowConstant
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{Dataset, Row, SparkSession}
import scala.collection.JavaConversions._
class Split extends BaseFilter {
var conf: Config = ConfigFactory.empty()
/**
* Set Config.
* */
override def setConfig(config: Config): Unit = {
this.conf = config
}
/**
* Get Config.
* */
override def getConfig(): Config = {
this.conf
}
// TODO: check fields.length == field_types.length if field_types exists
override def checkConfig(): (Boolean, String) = {
conf.hasPath("fields") && conf.getStringList("fields").size() > 0 match {
case true => (true, "")
case false => (false, "please specify [fields] as a non-empty string list")
}
}
override def prepare(spark: SparkSession): Unit = {
val defaultConfig = ConfigFactory.parseMap(
Map(
"delimiter" -> " ",
"source_field" -> "raw_message",
"target_field" -> RowConstant.ROOT
)
)
conf = conf.withFallback(defaultConfig)
}
override def process(spark: SparkSession, df: Dataset[Row]): Dataset[Row] = {
val srcField = conf.getString("source_field")
val keys = conf.getStringList("fields")
// https://stackoverflow.com/a/33345698/1145750
conf.getString("target_field") match {
case RowConstant.ROOT => {
val func = udf((s: String) => {
split(s, conf.getString("delimiter"), keys.size())
})
var filterDf = df.withColumn(RowConstant.TMP, func(col(srcField)))
for (i <- 0 until keys.size()) {
filterDf = filterDf.withColumn(keys.get(i), col(RowConstant.TMP)(i))
}
filterDf.drop(RowConstant.TMP)
}
case targetField: String => {
val func = udf((s: String) => {
val values = split(s, conf.getString("delimiter"), keys.size)
val kvs = (keys zip values).toMap
kvs
})
df.withColumn(targetField, func(col(srcField)))
}
}
}
/**
* Split string by delimiter, if size of splited parts is less than fillLength,
* empty string is filled; if greater than fillLength, parts will be truncated.
* */
private def split(str: String, delimiter: String, fillLength: Int): Seq[String] = {
val parts = str.split(delimiter).map(_.trim)
val filled = (fillLength compare parts.size) match {
case 0 => parts
case 1 => parts ++ Array.fill[String](fillLength - parts.size)("")
case -1 => parts.slice(0, fillLength)
}
filled.toSeq
}
}
| InterestingLab/waterdrop | waterdrop-core/src/main/scala/io/github/interestinglab/waterdrop/filter/Split.scala | Scala | apache-2.0 | 2,776 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.util.Locale
import scala.util.control.NonFatal
import com.google.common.util.concurrent.Striped
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.{QualifiedTableName, TableIdentifier}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.datasources.parquet.{ParquetFileFormat, ParquetOptions}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.HiveCaseSensitiveInferenceMode._
import org.apache.spark.sql.types._
/**
* Legacy catalog for interacting with the Hive metastore.
*
* This is still used for things like creating data source tables, but in the future will be
* cleaned up to integrate more nicely with [[HiveExternalCatalog]].
*/
private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Logging {
// these are def_s and not val/lazy val since the latter would introduce circular references
private def sessionState = sparkSession.sessionState
private def catalogProxy = sparkSession.sessionState.catalog
import HiveMetastoreCatalog._
/** These locks guard against multiple attempts to instantiate a table, which wastes memory. */
private val tableCreationLocks = Striped.lazyWeakLock(100)
/** Acquires a lock on the table cache for the duration of `f`. */
private def withTableCreationLock[A](tableName: QualifiedTableName, f: => A): A = {
val lock = tableCreationLocks.get(tableName)
lock.lock()
try f finally {
lock.unlock()
}
}
// For testing only
private[hive] def getCachedDataSourceTable(table: TableIdentifier): LogicalPlan = {
val key = QualifiedTableName(
// scalastyle:off caselocale
table.database.getOrElse(sessionState.catalog.getCurrentDatabase).toLowerCase,
table.table.toLowerCase)
// scalastyle:on caselocale
catalogProxy.getCachedTable(key)
}
private def getCached(
tableIdentifier: QualifiedTableName,
pathsInMetastore: Seq[Path],
schemaInMetastore: StructType,
expectedFileFormat: Class[_ <: FileFormat],
partitionSchema: Option[StructType]): Option[LogicalRelation] = {
catalogProxy.getCachedTable(tableIdentifier) match {
case null => None // Cache miss
case logical @ LogicalRelation(relation: HadoopFsRelation, _, _, _) =>
val cachedRelationFileFormatClass = relation.fileFormat.getClass
expectedFileFormat match {
case `cachedRelationFileFormatClass` =>
// If we have the same paths, same schema, and same partition spec,
// we will use the cached relation.
val useCached =
relation.location.rootPaths.toSet == pathsInMetastore.toSet &&
logical.schema.sameType(schemaInMetastore) &&
// We don't support hive bucketed tables. This function `getCached` is only used for
// converting supported Hive tables to data source tables.
relation.bucketSpec.isEmpty &&
relation.partitionSchema == partitionSchema.getOrElse(StructType(Nil))
if (useCached) {
Some(logical)
} else {
// If the cached relation is not updated, we invalidate it right away.
catalogProxy.invalidateCachedTable(tableIdentifier)
None
}
case _ =>
logWarning(s"Table $tableIdentifier should be stored as $expectedFileFormat. " +
s"However, we are getting a ${relation.fileFormat} from the metastore cache. " +
"This cached entry will be invalidated.")
catalogProxy.invalidateCachedTable(tableIdentifier)
None
}
case other =>
logWarning(s"Table $tableIdentifier should be stored as $expectedFileFormat. " +
s"However, we are getting a $other from the metastore cache. " +
"This cached entry will be invalidated.")
catalogProxy.invalidateCachedTable(tableIdentifier)
None
}
}
// Return true for Apache ORC and Hive ORC-related configuration names.
// Note that Spark doesn't support configurations like `hive.merge.orcfile.stripe.level`.
private def isOrcProperty(key: String) =
key.startsWith("orc.") || key.contains(".orc.")
private def isParquetProperty(key: String) =
key.startsWith("parquet.") || key.contains(".parquet.")
def convert(relation: HiveTableRelation): LogicalRelation = {
val serde = relation.tableMeta.storage.serde.getOrElse("").toLowerCase(Locale.ROOT)
// Consider table and storage properties. For properties existing in both sides, storage
// properties will supersede table properties.
if (serde.contains("parquet")) {
val options = relation.tableMeta.properties.filterKeys(isParquetProperty) ++
relation.tableMeta.storage.properties + (ParquetOptions.MERGE_SCHEMA ->
SQLConf.get.getConf(HiveUtils.CONVERT_METASTORE_PARQUET_WITH_SCHEMA_MERGING).toString)
convertToLogicalRelation(relation, options, classOf[ParquetFileFormat], "parquet")
} else {
val options = relation.tableMeta.properties.filterKeys(isOrcProperty) ++
relation.tableMeta.storage.properties
if (SQLConf.get.getConf(SQLConf.ORC_IMPLEMENTATION) == "native") {
convertToLogicalRelation(
relation,
options,
classOf[org.apache.spark.sql.execution.datasources.orc.OrcFileFormat],
"orc")
} else {
convertToLogicalRelation(
relation,
options,
classOf[org.apache.spark.sql.hive.orc.OrcFileFormat],
"orc")
}
}
}
private def convertToLogicalRelation(
relation: HiveTableRelation,
options: Map[String, String],
fileFormatClass: Class[_ <: FileFormat],
fileType: String): LogicalRelation = {
val metastoreSchema = relation.tableMeta.schema
val tableIdentifier =
QualifiedTableName(relation.tableMeta.database, relation.tableMeta.identifier.table)
val lazyPruningEnabled = sparkSession.sqlContext.conf.manageFilesourcePartitions
val tablePath = new Path(relation.tableMeta.location)
val fileFormat = fileFormatClass.getConstructor().newInstance()
val result = if (relation.isPartitioned) {
val partitionSchema = relation.tableMeta.partitionSchema
val rootPaths: Seq[Path] = if (lazyPruningEnabled) {
Seq(tablePath)
} else {
// By convention (for example, see CatalogFileIndex), the definition of a
// partitioned table's paths depends on whether that table has any actual partitions.
// Partitioned tables without partitions use the location of the table's base path.
// Partitioned tables with partitions use the locations of those partitions' data
// locations,_omitting_ the table's base path.
val paths = sparkSession.sharedState.externalCatalog
.listPartitions(tableIdentifier.database, tableIdentifier.name)
.map(p => new Path(p.storage.locationUri.get))
if (paths.isEmpty) {
Seq(tablePath)
} else {
paths
}
}
withTableCreationLock(tableIdentifier, {
val cached = getCached(
tableIdentifier,
rootPaths,
metastoreSchema,
fileFormatClass,
Some(partitionSchema))
val logicalRelation = cached.getOrElse {
val sizeInBytes = relation.stats.sizeInBytes.toLong
val fileIndex = {
val index = new CatalogFileIndex(sparkSession, relation.tableMeta, sizeInBytes)
if (lazyPruningEnabled) {
index
} else {
index.filterPartitions(Nil) // materialize all the partitions in memory
}
}
val updatedTable = inferIfNeeded(relation, options, fileFormat, Option(fileIndex))
val fsRelation = HadoopFsRelation(
location = fileIndex,
partitionSchema = partitionSchema,
dataSchema = updatedTable.dataSchema,
bucketSpec = None,
fileFormat = fileFormat,
options = options)(sparkSession = sparkSession)
val created = LogicalRelation(fsRelation, updatedTable)
catalogProxy.cacheTable(tableIdentifier, created)
created
}
logicalRelation
})
} else {
val rootPath = tablePath
withTableCreationLock(tableIdentifier, {
val cached = getCached(
tableIdentifier,
Seq(rootPath),
metastoreSchema,
fileFormatClass,
None)
val logicalRelation = cached.getOrElse {
val updatedTable = inferIfNeeded(relation, options, fileFormat)
val created =
LogicalRelation(
DataSource(
sparkSession = sparkSession,
paths = rootPath.toString :: Nil,
userSpecifiedSchema = Option(updatedTable.dataSchema),
bucketSpec = None,
options = options,
className = fileType).resolveRelation(),
table = updatedTable)
catalogProxy.cacheTable(tableIdentifier, created)
created
}
logicalRelation
})
}
// The inferred schema may have different field names as the table schema, we should respect
// it, but also respect the exprId in table relation output.
assert(result.output.length == relation.output.length &&
result.output.zip(relation.output).forall { case (a1, a2) => a1.dataType == a2.dataType })
val newOutput = result.output.zip(relation.output).map {
case (a1, a2) => a1.withExprId(a2.exprId)
}
result.copy(output = newOutput)
}
private def inferIfNeeded(
relation: HiveTableRelation,
options: Map[String, String],
fileFormat: FileFormat,
fileIndexOpt: Option[FileIndex] = None): CatalogTable = {
val inferenceMode = sparkSession.sessionState.conf.caseSensitiveInferenceMode
val shouldInfer = (inferenceMode != NEVER_INFER) && !relation.tableMeta.schemaPreservesCase
val tableName = relation.tableMeta.identifier.unquotedString
if (shouldInfer) {
logInfo(s"Inferring case-sensitive schema for table $tableName (inference mode: " +
s"$inferenceMode)")
val fileIndex = fileIndexOpt.getOrElse {
val rootPath = new Path(relation.tableMeta.location)
new InMemoryFileIndex(sparkSession, Seq(rootPath), options, None)
}
val inferredSchema = fileFormat
.inferSchema(
sparkSession,
options,
fileIndex.listFiles(Nil, Nil).flatMap(_.files))
.map(mergeWithMetastoreSchema(relation.tableMeta.dataSchema, _))
inferredSchema match {
case Some(dataSchema) =>
if (inferenceMode == INFER_AND_SAVE) {
updateDataSchema(relation.tableMeta.identifier, dataSchema)
}
val newSchema = StructType(dataSchema ++ relation.tableMeta.partitionSchema)
relation.tableMeta.copy(schema = newSchema)
case None =>
logWarning(s"Unable to infer schema for table $tableName from file format " +
s"$fileFormat (inference mode: $inferenceMode). Using metastore schema.")
relation.tableMeta
}
} else {
relation.tableMeta
}
}
private def updateDataSchema(identifier: TableIdentifier, newDataSchema: StructType): Unit = try {
logInfo(s"Saving case-sensitive schema for table ${identifier.unquotedString}")
sparkSession.sessionState.catalog.alterTableDataSchema(identifier, newDataSchema)
} catch {
case NonFatal(ex) =>
logWarning(s"Unable to save case-sensitive schema for table ${identifier.unquotedString}", ex)
}
}
private[hive] object HiveMetastoreCatalog {
def mergeWithMetastoreSchema(
metastoreSchema: StructType,
inferredSchema: StructType): StructType = try {
// scalastyle:off caselocale
// Find any nullable fields in mestastore schema that are missing from the inferred schema.
val metastoreFields = metastoreSchema.map(f => f.name.toLowerCase -> f).toMap
val missingNullables = metastoreFields
.filterKeys(!inferredSchema.map(_.name.toLowerCase).contains(_))
.values
.filter(_.nullable)
// Merge missing nullable fields to inferred schema and build a case-insensitive field map.
val inferredFields = StructType(inferredSchema ++ missingNullables)
.map(f => f.name.toLowerCase -> f).toMap
StructType(metastoreSchema.map(f => f.copy(name = inferredFields(f.name.toLowerCase).name)))
// scalastyle:on caselocale
} catch {
case NonFatal(_) =>
val msg = s"""Detected conflicting schemas when merging the schema obtained from the Hive
| Metastore with the one inferred from the file format. Metastore schema:
|${metastoreSchema.prettyJson}
|
|Inferred schema:
|${inferredSchema.prettyJson}
""".stripMargin
throw new SparkException(msg)
}
}
| kiszk/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala | Scala | apache-2.0 | 14,127 |
package io.github.binaryfoo.lagotto.reader
import java.io._
import java.util.concurrent.ArrayBlockingQueue
import io.github.binaryfoo.lagotto._
import scala.annotation.tailrec
import scala.collection.AbstractIterator
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent._
import scala.concurrent.duration._
import scala.io.{BufferedSource, Source}
import scala.util.Try
trait SkeletonLogReader[T <: LogEntry] {
def progressMeter: ProgressMeter
def readFilesOrStdIn(args: Iterable[String], follow: Boolean = false, stdin: InputStream = System.in): Iterator[T] = {
if (args.isEmpty)
read(stdin, StdInRef())
else
read(args.map(new File(_)), follow)
}
def read(files: File*): Iterator[T] = read(files.toIterable, follow = false)
def read(files: Iterable[File], follow: Boolean): Iterator[T] = {
progressMeter.startRun(files.size)
files.toIterator.flatMap { f =>
val in = if (follow) TailInputStream(f) else FileIO.open(f)
read(in, FileRef(f))
}
}
/**
* @deprecated Use read(InputStream,String) instead.
*/
def read(source: Source): Iterator[T] = read(source, "")
/**
* @deprecated Use read(InputStream,String) instead.
*/
def read(source: Source, sourceName: String): Iterator[T] = {
source match {
case s: BufferedSource =>
val field = classOf[BufferedSource].getDeclaredField("inputStream")
field.setAccessible(true)
read(field.get(s).asInstanceOf[InputStream], FileRef(new File(sourceName)))
}
}
final def read(in: InputStream, sourceName: SourceRef = StdInRef()): Iterator[T] = {
readWithProgress(new ProgressInputStream(in, progressMeter, sourceName))
}
def readWithProgress(in: ProgressInputStream): Iterator[T]
}
/**
* Kicks off a daemon thread to read lines. Parsing is delegated to the default ExecutionContext.
*
* @param strict Whinge with an exception on unexpected input
* @param keepFullText If false keep only the parsed fields. If true keep the full text of every record. Maybe this should be removed.
* @param logType
* @tparam T
*/
case class LogReader[T <: LogEntry](strict: Boolean = false, keepFullText: Boolean = true, progressMeter: ProgressMeter = NullProgressMeter, logType: LogType[T] = JposLog) extends SkeletonLogReader[T] {
override def readWithProgress(source: ProgressInputStream): Iterator[T] = new LogEntryIterator(source)
private val processors = Runtime.getRuntime.availableProcessors()
class LogEntryIterator(source: ProgressInputStream) extends AbstractIterator[T] {
private val queue = new ArrayBlockingQueue[Future[T]](processors * processors * 2)
private var current: T = null.asInstanceOf[T]
private var started = false
private var done = false
private val reader = new Thread(new Runnable {
private val lines = new LineIterator(source, strict, keepFullText)
override def run(): Unit = {
var more = true
do {
val f = try {
val entry = logType.readLinesForNextRecord(lines)
if (entry != null) {
parseInTheFuture(entry)
} else {
more = false
Future.successful(null.asInstanceOf[T])
}
}
catch {
case e: Exception => Future.failed(e)
}
queue.put(f)
} while (more)
}
}, s"${source.sourceRef.name}-reader")
reader.setDaemon(true)
override def hasNext: Boolean = {
ensureStartedAndCurrent()
!done
}
override def next(): T = {
ensureStartedAndCurrent()
consumeNext
}
private def readNext() = {
if (!done) {
current = readNextWithRetry()
done = current == null
source.publishProgress(done)
if (done) {
source.close()
}
}
}
private def consumeNext: T = {
val v = current
current = null.asInstanceOf[T]
v
}
@tailrec
private def readNextWithRetry(): T = {
val future = queue.take()
Await.ready(future, 1.minute)
val maybe = future.value.get
if (strict && maybe.isFailure) {
maybe.get
} else {
if (maybe.isSuccess) maybe.get else readNextWithRetry()
}
}
@inline
private def ensureStartedAndCurrent() = {
if (!started) {
reader.start()
started = true
}
if (current == null)
readNext()
}
override def foreach[U](f: (T) => U): Unit = {
try {
super.foreach(f)
}
finally {
close()
}
}
override def finalize(): Unit = close()
def close(): Unit = {
reader.interrupt()
source.close()
}
@inline
private def parseInTheFuture(entry: logType.P): Future[T] = {
Future {
try {
logType.parse(entry)
}
catch {
case e: Exception => throw new IAmSorryDave(s"Failed record ending at ${entry.source}", e)
}
}
}
}
}
case class SingleThreadLogReader[T <: LogEntry](strict: Boolean = false, keepFullText: Boolean = true, progressMeter: ProgressMeter = NullProgressMeter, logType: LogType[T] = JposLog) extends SkeletonLogReader[T] {
override def readWithProgress(source: ProgressInputStream): Iterator[T] = new EntryIterator[T](source, strict, keepFullText, logType)
}
/**
* Each entry may consume more than one line.
*/
class EntryIterator[T <: LogEntry](val source: ProgressInputStream, val strict: Boolean = false, val keepFullText: Boolean = true, logType: LogType[T] = JposLog) extends AbstractIterator[T] {
private val lines = new LineIterator(source, strict, keepFullText)
private var current = readNext()
override def next(): T = {
val c = current
current = readNext()
c
}
override def hasNext: Boolean = current != null
private def readNext(): T = {
val next = readNextWithRetry()
val done = next == null
source.publishProgress(done)
if (done)
source.close()
next
}
@tailrec
private def readNextWithRetry(): T = {
val maybe = Try(logType.apply(lines))
if (strict && maybe.isFailure || maybe.isSuccess) {
maybe.get
} else {
readNextWithRetry()
}
}
}
/**
* Adds a line number, name and a single line push back over Source.getLines().
*/
class LineIterator(in: ProgressInputStream, val strict: Boolean = false, val keepFullText: Boolean = true) extends AbstractIterator[String] with BufferedIterator[String] {
private val lines = new BufferedReader(new InputStreamReader(in))
private var linesRead = 0
private var currentLineNo = 0
private var current: String = null
readNext()
/**
* Zero when next() has not been called.
* After next() has been called, the line number for the most recently returned value of next().
*/
def lineNumber: Int = currentLineNo
/**
* @return Line number and file name for most recently returned value of next().
*/
def sourceRef: SourceRef = in.sourceRef.at(currentLineNo)
/**
* @return Line number and file name for most recently returned value of head.
*/
def headRef: SourceRef = in.sourceRef.at(linesRead)
def hasNext = current != null || readNext()
def next(): String = {
if (current == null) readNext()
val c = current
currentLineNo = linesRead
current = null
c
}
def head: String = current
private def readNext(): Boolean = {
current = lines.readLine()
val readOne = current != null
if (readOne) {
linesRead += 1
}
readOne
}
}
| binaryfoo/lagotto | src/main/scala/io/github/binaryfoo/lagotto/reader/LogReader.scala | Scala | mit | 7,604 |
package ca.hyperreal.scalgorithms
import math._
object Matrices
{
final class _RaisedToMatrix[R <: Ring[R]]( r: R )
{
def *( m: Matrix[R] ): Matrix[R] = m.operation( (i: Int, j: Int) => r*m(i, j) )
}
implicit def _raiseToMatrix[R <: Ring[R]]( r: R ): _RaisedToMatrix[R] = new _RaisedToMatrix[R]( r )
def independent[R <: Ring[R]]( ms: Matrix[R]* ) = !new DotProductMatrix[R]( ms: _* ).det.isZero
// def gram[F <: Field[F]]( vs: Vector[F]* ) = new GramianMatrix[F]( vs: _* ).det
} | edadma/scalgorithms | src/main/scala/Matrices.scala | Scala | mit | 496 |
package filodb.memory.format.vectors
import java.nio.ByteBuffer
import debox.Buffer
import filodb.memory.{BlockMemFactory, MemoryStats, PageAlignedBlockManager}
import filodb.memory.format._
class IntBinaryVectorTest extends NativeVectorTest {
describe("IntAppendingVector") {
it("should append a mix of Ints and read them all back") {
val orig = Seq(1, 2, -5, 101)
val builder = IntBinaryVector(memFactory, orig)
builder.length shouldEqual 4
val frozen = builder.freeze(memFactory)
IntBinaryVector(acc, frozen).toBuffer(acc, frozen).toList shouldEqual orig
}
it("should append 16-bit Ints and read them back") {
val builder = IntBinaryVector.appendingVectorNoNA(memFactory, 5)
val orig = Seq(1, 0, -127, Short.MaxValue, Short.MinValue)
orig.foreach(x => builder.addData(x) shouldEqual Ack)
builder.length should equal (5)
val frozen = builder.freeze(memFactory)
IntBinaryVector(acc, frozen).toBuffer(acc, frozen).toList should equal (orig)
}
it("should append bytes and read them back") {
val builder = IntBinaryVector.appendingVectorNoNA(memFactory, 4)
val orig = Seq(1, 0, -128, 127)
orig.foreach(x => builder.addData(x) shouldEqual Ack)
builder.length should equal (4)
val frozen = builder.freeze(memFactory)
IntBinaryVector(acc, frozen).toBuffer(acc, frozen).toList should equal (orig)
}
it("should be able to read from on-heap IntBinaryVector") {
val builder = IntBinaryVector.appendingVectorNoNA(memFactory, 4)
val orig = Seq(1, 0, -128, 127)
orig.foreach(x => builder.addData(x) shouldEqual Ack)
val optimized = builder.optimize(memFactory)
val bytes = IntBinaryVector(acc, optimized).toBytes(acc, optimized)
val onHeapAcc = Seq(MemoryReader.fromArray(bytes),
MemoryReader.fromByteBuffer(BinaryVector.asBuffer(optimized)),
MemoryReader.fromByteBuffer(ByteBuffer.wrap(bytes)))
onHeapAcc.foreach { a =>
IntBinaryVector(a, 0).toBuffer(a, 0).toList shouldEqual orig
}
}
it("should iterate with startElement > 0") {
val orig = Seq(1, 0, -128, 127, 2, 4, 3, 7)
val builder = IntBinaryVector(memFactory, orig)
builder.length shouldEqual orig.length
val frozen = builder.freeze(memFactory)
(2 to 5).foreach { start =>
IntBinaryVector(acc, frozen).toBuffer(acc, frozen, start).toList shouldEqual orig.drop(start)
}
}
it("should be able to create new FiloVector from frozen appending vector") {
// Make sure it can freeze when primaryMaxBytes is much greater.
val builder = IntBinaryVector.appendingVectorNoNA(memFactory, 1000)
val orig = Seq(1, 0, -128, 127)
orig.foreach(x => builder.addData(x) shouldEqual Ack)
val readVect = IntBinaryVector(acc, builder.addr)
readVect.length(acc, builder.addr) shouldEqual 4
readVect.toBuffer(acc, builder.addr).toList shouldEqual orig
builder.frozenSize should equal (24)
val frozen = builder.freeze(memFactory)
IntBinaryVector(acc, frozen).length(acc, frozen) shouldEqual 4
IntBinaryVector(acc, frozen).toBuffer(acc, frozen).toList shouldEqual orig
}
it("should return VectorTooSmall if not enough space to add new items") {
val builder = IntBinaryVector.appendingVectorNoNA(memFactory, 4)
val orig = Seq(1, 2, -5, 101)
orig.foreach(x => builder.addData(x) shouldEqual Ack)
builder.addNA() shouldEqual VectorTooSmall(25, 24)
builder.length shouldEqual 4
}
}
describe("IntBinaryVector 2/4 bit") {
it("should append and read back list with nbits=4") {
val builder = IntBinaryVector.appendingVectorNoNA(memFactory, 10, nbits = 4, signed = false)
builder.length should equal (0)
builder.addData(2) shouldEqual Ack
builder.numBytes should equal (8 + 1)
builder.reader.toBuffer(acc, builder.addr).toList shouldEqual Seq(2)
builder.addData(4) shouldEqual Ack
builder.addData(3) shouldEqual Ack
builder.length should equal (3)
builder.reader.toBuffer(acc, builder.addr).toList shouldEqual Seq(2, 4, 3)
builder.frozenSize should equal (8 + 2)
val frozen = builder.freeze(memFactory)
IntBinaryVector(acc, frozen).length(acc, frozen) shouldEqual 3
IntBinaryVector(acc, frozen).toBuffer(acc, frozen).toList shouldEqual Seq(2, 4, 3)
}
it("should append and read back list with nbits=2") {
val builder = IntBinaryVector.appendingVectorNoNA(memFactory, 10, nbits = 2, signed = false)
val orig = Seq(0, 2, 1, 3, 2)
orig.foreach(x => builder.addData(x) shouldEqual Ack)
builder.reader.toBuffer(acc, builder.addr).toList shouldEqual orig
builder.numBytes shouldEqual 10
val frozen = builder.freeze(memFactory)
IntBinaryVector(acc, frozen).toBuffer(acc, frozen).toList shouldEqual orig
}
it("should append correctly when memory has previous values / was not zeroed") {
import collection.JavaConverters._
val blockStore = new PageAlignedBlockManager(10 * 1024 * 1024,
new MemoryStats(Map("test"-> "test")), null, 16) {
freeBlocks.asScala.foreach(_.set(0x55)) // initialize blocks to nonzero value
}
val blockFactory = new BlockMemFactory(blockStore, None, 24, Map("foo" -> "bar"), true)
// original values will get mixed with nonzero contents if append does not overwrite original memory
val builder = IntBinaryVector.appendingVectorNoNA(blockFactory, 10, nbits = 4, signed = false)
val orig = Seq(0, 1, 1, 3, 4)
orig.foreach(x => builder.addData(x) shouldEqual Ack)
builder.reader.toBuffer(acc, builder.addr).toList shouldEqual orig
// original values will get mixed with nonzero contents if append does not overwrite original memory
val builder2 = IntBinaryVector.appendingVectorNoNA(blockFactory, 10, nbits = 2, signed = false)
val orig2 = Seq(0, 1, 1, 0, 2)
orig2.foreach(x => builder2.addData(x) shouldEqual Ack)
builder2.reader.toBuffer(acc, builder2.addr).toList shouldEqual orig2
blockStore.releaseBlocks()
}
it("should optimize even with NoNA vectors to less nbits") {
val orig = Seq(0, 2, 1, 3, 2)
val builder1 = IntBinaryVector(memFactory, orig)
val intVect = builder1.optimize(memFactory)
IntBinaryVector(acc, intVect).toBuffer(acc, intVect).toList shouldEqual orig
BinaryVector.totalBytes(acc, intVect) shouldEqual 10
val builder2 = IntBinaryVector.appendingVectorNoNA(memFactory, 10)
orig.foreach(x => builder2.addData(x) shouldEqual Ack)
BinaryVector.totalBytes(acc, builder2.optimize(memFactory)) shouldEqual 10
}
}
describe("MaskedIntAppendingVector") {
it("should append a list of all NAs and read all NAs back") {
val builder = IntBinaryVector.appendingVector(memFactory, 100)
builder.addNA shouldEqual Ack
builder.isAllNA should be (true)
builder.noNAs should be (false)
val sc = builder.optimize(memFactory)
IntBinaryVector(acc, sc).length(acc, sc) should equal (1)
IntBinaryVector(acc, sc)(acc, sc, 0) // Just to make sure this does not throw an exception
// IntBinaryVector(sc).isAvailable(0) should equal (false)
IntBinaryVector(acc, sc).toBuffer(acc, sc) shouldEqual Buffer.empty[Int]
// IntBinaryVector(sc).optionIterator.toSeq should equal (Seq(None))
}
it("should encode a mix of NAs and Ints and decode iterate and skip NAs") {
val cb = IntBinaryVector.appendingVector(memFactory, 5)
cb.addNA shouldEqual Ack
cb.addData(101) shouldEqual Ack
cb.addData(102) shouldEqual Ack
cb.addData(103) shouldEqual Ack
cb.addNA shouldEqual Ack
cb.isAllNA should be (false)
cb.noNAs should be (false)
val sc = cb.optimize(memFactory)
val reader = IntBinaryVector(acc, sc)
reader.length(acc, sc) shouldEqual 5
// reader.isAvailable(0) should equal (false)
// reader.isAvailable(1) should equal (true)
// reader.isAvailable(4) should equal (false)
reader(acc, sc, 1) should equal (101)
// reader.get(0) should equal (None)
// reader.get(-1) should equal (None)
// reader.get(2) should equal (Some(102))
reader.toBuffer(acc, sc) shouldEqual Buffer(101, 102, 103)
}
it("should be able to append lots of ints off-heap and grow vector") {
val numInts = 1000
val builder = IntBinaryVector.appendingVector(memFactory, numInts / 2)
(0 until numInts).foreach(x => builder.addData(x) shouldEqual Ack)
builder.length should equal (numInts)
builder.isAllNA should be (false)
builder.noNAs should be (true)
}
it("should be able to grow vector even if adding all NAs") {
val numInts = 1000
val builder = IntBinaryVector.appendingVector(memFactory, numInts / 2)
builder shouldBe a[GrowableVector[_]]
(0 until numInts).foreach(i => builder.addNA shouldEqual Ack)
builder.length should equal (numInts)
builder.isAllNA should be (true)
builder.noNAs should be (false)
}
it("should be able to return minMax accurately with NAs") {
val cb = IntBinaryVector.appendingVector(memFactory, 5)
cb.addNA shouldEqual Ack
cb.addData(101) shouldEqual Ack
cb.addData(102) shouldEqual Ack
cb.addData(103) shouldEqual Ack
cb.addNA shouldEqual Ack
val inner = cb.asInstanceOf[GrowableVector[Int]].inner.asInstanceOf[MaskedIntAppendingVector]
inner.minMax should equal ((101, 103))
}
it("should be able to freeze() and minimize bytes used") {
val builder = IntBinaryVector.appendingVector(memFactory, 100)
// Test numBytes to make sure it's accurate
builder.numBytes should equal (12 + 16 + 8) // 2 long words needed for 100 bits
(0 to 4).foreach(x => builder.addData(x) shouldEqual Ack)
builder.numBytes should equal (12 + 16 + 8 + 20)
val frozen = builder.freeze(memFactory)
BinaryVector.totalBytes(acc, frozen) should equal (12 + 8 + 8 + 20) // bitmask truncated
IntBinaryVector(acc, frozen).length(acc, frozen) shouldEqual 5
IntBinaryVector(acc, frozen).toBuffer(acc, frozen).toList should equal (0 to 4)
}
it("should optimize and parse back using IntBinaryVector.apply") {
val cb = IntBinaryVector.appendingVector(memFactory, 5)
cb.addNA shouldEqual Ack
cb.addData(101) shouldEqual Ack
cb.addData(102) shouldEqual Ack
cb.addData(103) shouldEqual Ack
cb.addNA shouldEqual Ack
val buffer = cb.optimize(memFactory)
val readVect = IntBinaryVector(acc, buffer)
readVect.toBuffer(acc, buffer) shouldEqual Buffer(101, 102, 103)
}
it("should support resetting and optimizing AppendableVector multiple times") {
val cb = IntBinaryVector.appendingVector(memFactory, 5)
// Use large numbers on purpose so cannot optimized to less than 32 bits
val orig = Seq(100000, 200001, 300002)
cb.addNA() shouldEqual Ack
orig.foreach(x => cb.addData(x) shouldEqual Ack)
cb.copyToBuffer.toList shouldEqual orig
val optimized = cb.optimize(memFactory)
val readVect1 = IntBinaryVector(acc, optimized)
readVect1.toBuffer(acc, optimized).toList shouldEqual orig
// Now the optimize should not have damaged original vector
cb.copyToBuffer shouldEqual Buffer.fromIterable(orig)
cb.reset()
val orig2 = orig.map(_ * 2)
orig2.foreach(x => cb.addData(x) shouldEqual Ack)
val frozen2 = cb.optimize(memFactory)
val readVect2 = IntBinaryVector(BinaryVector.asBuffer(frozen2))
readVect2.toBuffer(acc, frozen2).toList shouldEqual orig2
cb.copyToBuffer.toList shouldEqual orig2
}
it("should be able to optimize a 32-bit appending vector to smaller size") {
val builder = IntBinaryVector.appendingVector(memFactory, 100)
(0 to 4).foreach(x => builder.addData(x) shouldEqual Ack)
val optimized = builder.optimize(memFactory)
IntBinaryVector(acc, optimized).length(acc, optimized) shouldEqual 5
IntBinaryVector(acc, optimized).toBuffer(acc, optimized) shouldEqual Buffer.fromIterable(0 to 4)
BinaryVector.totalBytes(acc, optimized) shouldEqual (8 + 3) // nbits=4, so only 3 extra bytes
}
it("should be able to optimize constant ints to an IntConstVector") {
val builder = IntBinaryVector.appendingVector(memFactory, 100)
(0 to 4).foreach(n => builder.addData(999))
val buf = builder.optimize(memFactory)
val readVect = IntBinaryVector(acc, buf)
readVect shouldEqual IntConstVector
readVect.toBuffer(acc, buf) shouldEqual Buffer(999, 999, 999, 999, 999)
}
}
} | tuplejump/FiloDB | memory/src/test/scala/filodb.memory/format/vectors/IntBinaryVectorTest.scala | Scala | apache-2.0 | 12,816 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.it
import java.util.Collections
import java.util.function.{ Function => JFunction }
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import org.scalatest.{ Inside, Matchers, WordSpecLike }
import play.api.{ Application, Configuration, Environment }
import play.inject.guice.GuiceApplicationBuilder
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.reflect.ClassTag
import akka.japi.function.Procedure
import com.google.inject.{ Binder, Module, TypeLiteral }
import com.lightbend.lagom.javadsl.testkit.ServiceTest
import com.lightbend.lagom.javadsl.testkit.ServiceTest.TestServer
import play.api.routing.Router
import java.util
import com.lightbend.lagom.internal.testkit.EmptyAdditionalRoutersModule
sealed trait HttpBackend {
final val provider: String = s"play.core.server.${codeName}ServerProvider"
val codeName: String
}
case object AkkaHttp extends HttpBackend {
val codeName = "AkkaHttp"
}
case object Netty extends HttpBackend {
val codeName = "Netty"
}
trait ServiceSupport extends WordSpecLike with Matchers with Inside {
def withServer(
configureBuilder: GuiceApplicationBuilder => GuiceApplicationBuilder
)(block: Application => Unit)(implicit httpBackend: HttpBackend): Unit = {
val jConfigureBuilder = new JFunction[GuiceApplicationBuilder, GuiceApplicationBuilder] {
override def apply(b: GuiceApplicationBuilder): GuiceApplicationBuilder = {
configureBuilder(b)
.overrides(EmptyAdditionalRoutersModule)
.configure("play.server.provider", httpBackend.provider)
}
}
val jBlock = new Procedure[TestServer] {
override def apply(server: TestServer): Unit = {
block(server.app.asScala())
}
}
val setup = ServiceTest.defaultSetup.configureBuilder(jConfigureBuilder).withCluster(false)
ServiceTest.withServer(setup, jBlock)
}
def withClient[T: ClassTag](
configureBuilder: GuiceApplicationBuilder => GuiceApplicationBuilder
)(block: Application => T => Unit)(implicit httpBackend: HttpBackend): Unit = {
withServer(configureBuilder) { application =>
val client = application.injector.instanceOf[T]
block(application)(client)
}
}
implicit def materializer(implicit app: Application): Materializer = app.materializer
def consume[T](source: Source[T, _])(implicit mat: Materializer): List[T] = {
Await.result(source.runFold(List.empty[T])((list, t) => t :: list), 10.seconds).reverse
}
}
| rstento/lagom | service/javadsl/integration-tests/src/test/scala/com/lightbend/lagom/it/ServiceSupport.scala | Scala | apache-2.0 | 2,598 |
package lossycounting
import frequencycount.Item
import frequencycount.lossycounting.LossyCountingModel
import unitspec.UnitSpec
import utils.Utils._
import testutils.TestUtils._
class LossyCountingModelSpec extends UnitSpec{
val frequency = 0.2
val error = 0.1 * frequency
"Lossy Counting" should "count correct values per batch" in {
val lossyCounting = new LossyCountingModel[String](frequency, error)
println("All items with true frequency > 20% will be output")
println("All items between 18%-20% will be output and false positives")
println("No element will be print with frequency less than 18%")
val window0 = List.concat(create(19, Item.Red), create(11, Item.Blue), create(10, Item.Yellow), create(10, Item.Brown), create(0, Item.Green))
val step0 = lossyCounting.process(window0)
val step0Output = step0.computeOutput()
// red freq = 18/50 = 0.36
// blue freq = 10/50 = 0.2,
// yellow freq = 9/50 = 0.18
// brown freq = 9/50 = 0.18
assert(step0Output.length == 4)
assertColourAndCount(step0Output(0), Item.Red.toString, 18)
assertColourAndCount(step0Output(1), Item.Blue.toString, 10)
assertColourAndCount(step0Output(2), Item.Yellow.toString, 9)
assertColourAndCount(step0Output(3), Item.Brown.toString, 9)
val window1 = List.concat(create(30, Item.Red), create(10, Item.Blue), create(10, Item.Yellow))
val step1 = lossyCounting.process(window1)
val step1Output = step1.computeOutput()
//red freq = 47 / 100
//blue freq = 19 / 100
//yellow freq = 18 / 100
//brown freq = 8/100
assert(step1Output.length === 3)
assertColourAndCount(step1Output(0), Item.Red.toString, 47)
assertColourAndCount(step1Output(1), Item.Blue.toString, 19)
assertColourAndCount(step1Output(2), Item.Yellow.toString, 18)
val window2 = List.concat(create(30, Item.Red), create(10, Item.Blue), create(0, Item.Yellow), create(5, Item.Brown), create(5, Item.Green))
val step2 = lossyCounting.process(window2)
val step2Output = step2.computeOutput()
//red freq = 76 / 150 = 50%
// blue freq = 28 / 150 = 18.6%
// yellow freq = 17/150 = 11.3%
// brown freq = 12 / 150 = 8%
assert(step2Output.length === 2)
assertColourAndCount(step2Output(0), Item.Red.toString, 76)
assertColourAndCount(step2Output(1), Item.Blue.toString, 28)
val window3 = List.concat(create(40, Item.Red), create(10, Item.Blue))
val step3 = lossyCounting.process(window3)
val step3Output = step3.computeOutput()
//red freq = 115/200 = 57.5%
//blue freq = 37/200 = 18.5%
// yellow freq = 18 / 200 = 9.5%
//brown freq = 11/200 = 5.5%
assert(step3Output.length === 2)
assertColourAndCount(step3Output(0), Item.Red.toString, 115)
assertColourAndCount(step3Output(1), Item.Blue.toString, 37)
val window4 = List.concat(create(40, Item.Red), create(10, Item.Blue))
val step4 = lossyCounting.process(window4)
val step4Output = step4.computeOutput()
//red freq = 154 / 250 = 61.6%
//blue freq = 46/250 = 18.4%
//yellow freq = 17/250 = 6.8%
//brown freq = 10 / 250 = 4%
assert(step4Output.length === 2)
assertColourAndCount(step4Output(0), Item.Red.toString, 154)
assertColourAndCount(step4Output(1), Item.Blue.toString, 46)
}
}
| mvogiatzis/freq-count | src/test/scala/lossycounting/LossyCountingModelSpec.scala | Scala | mit | 3,323 |
package spark
import scala.collection.mutable.Map
// Task result. Also contains updates to accumulator variables.
// TODO: Use of distributed cache to return result is a hack to get around
// what seems to be a bug with messages over 60KB in libprocess; fix it
private class TaskResult[T](val value: T, val accumUpdates: Map[Long, Any]) extends Serializable
| javelinjs/spark | core/src/main/scala/spark/TaskResult.scala | Scala | bsd-3-clause | 360 |
object ch8_14 {
???
}
import ch8_14._
/*
from repl you can test typing:
:load src/main/scala/fpinscala/ch6/RNG.scala
:load src/main/scala/fpinscala/ch6/State.scala
:load src/main/scala/fpinscala/ch8/Gen.scala
:load src/main/scala/fpinscala/ch8/Exercise14.scala
*/
| rucka/fpinscala | src/main/scala/fpinscala/ch8/Exercise14.scala | Scala | gpl-2.0 | 274 |
package edu.gemini.pit.ui.util
import edu.gemini.shared.gui.textComponent.TextRenderer
import swing.ComboBox
import scala.language.reflectiveCalls
// A mixin for showing the value rather than the toString
trait ValueRenderer[A <: {def value():String}] extends TextRenderer[A] { this:ComboBox[A] =>
def text(a:A):String = a.value()
}
| arturog8m/ocs | bundle/edu.gemini.pit/src/main/scala/edu/gemini/pit/ui/util/ValueRenderer.scala | Scala | bsd-3-clause | 340 |
/*
* Copyright 2016-2020 Daniel Urban and contributors listed in AUTHORS
* Copyright 2020 Nokia
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dev.tauri.seals
package tests
import java.util.UUID
import cats.Eq
import cats.kernel.laws.discipline._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.prop.Configuration
import org.scalacheck.{ Arbitrary, Gen }
import org.scalacheck.util.Buildable
import org.typelevel.discipline.scalatest.FunSuiteDiscipline
import laws.{ AnyLaws, AtomicLaws }
trait BaseLawsSpec
extends AnyFunSuite
with FunSuiteDiscipline
with Configuration
with laws.TestEqInstances
with laws.TestArbInstances {
implicit val eqForUuid: Eq[UUID] =
Eq.fromUniversalEquals
implicit def eqForJavaEnums[A <: java.lang.Enum[A]]: Eq[A] =
referenceEq[A]
def checkAtomicLaws[A](name: String)(implicit a: Arbitrary[A], e: Eq[A], at: Atomic[A]): Unit = {
checkAll(s"Atomic[$name].AnyLaws.any", AnyLaws[Atomic[A]].any)
checkAll(s"Atomic[$name].AnyLaws.equalitySerializability", AnyLaws[Atomic[A]].equalitySerializability)
checkAll(s"Atomic[$name].AnyLaws.referenceEquality", AnyLaws[Atomic[A]].referenceEquality)
checkAll(s"Atomic[$name].EqTests.eqv", EqTests[Atomic[A]].eqv)
checkAll(s"Atomic[$name].AtomicLaws.roundtrip", AtomicLaws[A].roundtrip)
}
/**
* Some generators fail to produce
* instances quite frequently. When
* generating big containers of these,
* Scalacheck tends to give up after
* a while. For these cases we provide
* `Arbitrary` instances for small
* containers. This is the max size of
* these.
*
* @see `LimitedContainers`
*/
protected val maxContainerSize = 3
object LimitedContainers {
implicit def arbCont[F[_], A](
implicit
A: Arbitrary[A],
B: Buildable[A, F[A]],
T: F[A] => Iterable[A]
): Arbitrary[F[A]] = Arbitrary {
for {
n <- Gen.choose(0, maxContainerSize)
v <- Gen.containerOfN[F, A](n, A.arbitrary)
} yield v
}
}
}
| durban/seals | tests/src/test/scala/dev/tauri/seals/tests/BaseLawsSpec.scala | Scala | apache-2.0 | 2,593 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator
import org.junit.Assert._
import kafka.api.ProducerResponseStatus
import kafka.common.{OffsetAndMetadata, TopicAndPartition}
import kafka.message.MessageSet
import kafka.server.{ReplicaManager, KafkaConfig}
import kafka.utils._
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.{OffsetCommitRequest, JoinGroupRequest}
import org.easymock.{Capture, IAnswer, EasyMock}
import org.junit.{After, Before, Test}
import org.scalatest.junit.JUnitSuite
import java.util.concurrent.TimeUnit
import scala.collection._
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future, Promise}
/**
* Test GroupCoordinator responses
*/
class GroupCoordinatorResponseTest extends JUnitSuite {
type JoinGroupCallback = JoinGroupResult => Unit
type SyncGroupCallbackParams = (Array[Byte], Short)
type SyncGroupCallback = (Array[Byte], Short) => Unit
type HeartbeatCallbackParams = Short
type HeartbeatCallback = Short => Unit
type CommitOffsetCallbackParams = Map[TopicAndPartition, Short]
type CommitOffsetCallback = Map[TopicAndPartition, Short] => Unit
type LeaveGroupCallbackParams = Short
type LeaveGroupCallback = Short => Unit
val ClientId = "consumer-test"
val ClientHost = "localhost"
val ConsumerMinSessionTimeout = 10
val ConsumerMaxSessionTimeout = 1000
val DefaultSessionTimeout = 500
var groupCoordinator: GroupCoordinator = null
var replicaManager: ReplicaManager = null
var scheduler: KafkaScheduler = null
var zkUtils: ZkUtils = null
private val groupId = "groupId"
private val protocolType = "consumer"
private val memberId = "memberId"
private val metadata = Array[Byte]()
private val protocols = List(("range", metadata))
private var groupPartitionId: Int = -1
// we use this string value since its hashcode % #.partitions is different
private val otherGroupId = "otherGroup"
@Before
def setUp() {
val props = TestUtils.createBrokerConfig(nodeId = 0, zkConnect = "")
props.setProperty(KafkaConfig.GroupMinSessionTimeoutMsProp, ConsumerMinSessionTimeout.toString)
props.setProperty(KafkaConfig.GroupMaxSessionTimeoutMsProp, ConsumerMaxSessionTimeout.toString)
// make two partitions of the group topic to make sure some partitions are not owned by the coordinator
val ret = mutable.Map[String, Map[Int, Seq[Int]]]()
ret += (GroupCoordinator.GroupMetadataTopicName -> Map(0 -> Seq(1), 1 -> Seq(1)))
replicaManager = EasyMock.createNiceMock(classOf[ReplicaManager])
zkUtils = EasyMock.createNiceMock(classOf[ZkUtils])
EasyMock.expect(zkUtils.getPartitionAssignmentForTopics(Seq(GroupCoordinator.GroupMetadataTopicName))).andReturn(ret)
EasyMock.replay(zkUtils)
groupCoordinator = GroupCoordinator.create(KafkaConfig.fromProps(props), zkUtils, replicaManager)
groupCoordinator.startup()
// add the partition into the owned partition list
groupPartitionId = groupCoordinator.partitionFor(groupId)
groupCoordinator.groupManager.addPartitionOwnership(groupPartitionId)
}
@After
def tearDown() {
EasyMock.reset(replicaManager)
groupCoordinator.shutdown()
}
@Test
def testJoinGroupWrongCoordinator() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(otherGroupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NOT_COORDINATOR_FOR_GROUP.code, joinGroupErrorCode)
}
@Test
def testJoinGroupSessionTimeoutTooSmall() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, ConsumerMinSessionTimeout - 1, protocolType, protocols)
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.INVALID_SESSION_TIMEOUT.code, joinGroupErrorCode)
}
@Test
def testJoinGroupSessionTimeoutTooLarge() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, ConsumerMaxSessionTimeout + 1, protocolType, protocols)
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.INVALID_SESSION_TIMEOUT.code, joinGroupErrorCode)
}
@Test
def testJoinGroupUnknownConsumerNewGroup() {
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.UNKNOWN_MEMBER_ID.code, joinGroupErrorCode)
}
@Test
def testInvalidGroupId() {
val groupId = ""
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
assertEquals(Errors.INVALID_GROUP_ID.code, joinGroupResult.errorCode)
}
@Test
def testValidJoinGroup() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType,
protocols)
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NONE.code, joinGroupErrorCode)
}
@Test
def testJoinGroupInconsistentProtocolType() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val otherMemberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType,
protocols)
assertEquals(Errors.NONE.code, joinGroupResult.errorCode)
EasyMock.reset(replicaManager)
val otherJoinGroupResult = joinGroup(groupId, otherMemberId, DefaultSessionTimeout, "connect", protocols)
assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL.code, otherJoinGroupResult.errorCode)
}
@Test
def testJoinGroupInconsistentGroupProtocol() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val otherMemberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, List(("range", metadata)))
assertEquals(Errors.NONE.code, joinGroupResult.errorCode)
EasyMock.reset(replicaManager)
val otherJoinGroupResult = joinGroup(groupId, otherMemberId, DefaultSessionTimeout, protocolType,
List(("roundrobin", metadata)))
assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL.code, otherJoinGroupResult.errorCode)
}
@Test
def testJoinGroupUnknownConsumerExistingGroup() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val otherMemberId = "memberId"
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
assertEquals(Errors.NONE.code, joinGroupResult.errorCode)
EasyMock.reset(replicaManager)
val otherJoinGroupResult = joinGroup(groupId, otherMemberId, DefaultSessionTimeout, protocolType, protocols)
assertEquals(Errors.UNKNOWN_MEMBER_ID.code, otherJoinGroupResult.errorCode)
}
@Test
def testHeartbeatWrongCoordinator() {
val heartbeatResult = heartbeat(otherGroupId, memberId, -1)
assertEquals(Errors.NOT_COORDINATOR_FOR_GROUP.code, heartbeatResult)
}
@Test
def testHeartbeatUnknownGroup() {
val heartbeatResult = heartbeat(groupId, memberId, -1)
assertEquals(Errors.UNKNOWN_MEMBER_ID.code, heartbeatResult)
}
@Test
def testHeartbeatUnknownConsumerExistingGroup() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val otherMemberId = "memberId"
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NONE.code, joinGroupErrorCode)
EasyMock.reset(replicaManager)
val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]()))
val syncGroupErrorCode = syncGroupResult._2
assertEquals(Errors.NONE.code, syncGroupErrorCode)
EasyMock.reset(replicaManager)
val heartbeatResult = heartbeat(groupId, otherMemberId, 1)
assertEquals(Errors.UNKNOWN_MEMBER_ID.code, heartbeatResult)
}
@Test
def testHeartbeatRebalanceInProgress() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NONE.code, joinGroupErrorCode)
EasyMock.reset(replicaManager)
val heartbeatResult = heartbeat(groupId, assignedMemberId, 2)
assertEquals(Errors.REBALANCE_IN_PROGRESS.code, heartbeatResult)
}
@Test
def testHeartbeatIllegalGeneration() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NONE.code, joinGroupErrorCode)
EasyMock.reset(replicaManager)
val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]()))
val syncGroupErrorCode = syncGroupResult._2
assertEquals(Errors.NONE.code, syncGroupErrorCode)
EasyMock.reset(replicaManager)
val heartbeatResult = heartbeat(groupId, assignedMemberId, 2)
assertEquals(Errors.ILLEGAL_GENERATION.code, heartbeatResult)
}
@Test
def testValidHeartbeat() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val assignedConsumerId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NONE.code, joinGroupErrorCode)
EasyMock.reset(replicaManager)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]()))
val syncGroupErrorCode = syncGroupResult._2
assertEquals(Errors.NONE.code, syncGroupErrorCode)
EasyMock.reset(replicaManager)
val heartbeatResult = heartbeat(groupId, assignedConsumerId, 1)
assertEquals(Errors.NONE.code, heartbeatResult)
}
@Test
def testSyncGroupEmptyAssignment() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val assignedConsumerId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NONE.code, joinGroupErrorCode)
EasyMock.reset(replicaManager)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map())
val syncGroupErrorCode = syncGroupResult._2
assertEquals(Errors.NONE.code, syncGroupErrorCode)
assertTrue(syncGroupResult._1.isEmpty)
EasyMock.reset(replicaManager)
val heartbeatResult = heartbeat(groupId, assignedConsumerId, 1)
assertEquals(Errors.NONE.code, heartbeatResult)
}
@Test
def testSyncGroupNotCoordinator() {
val generation = 1
val syncGroupResult = syncGroupFollower(otherGroupId, generation, memberId)
assertEquals(Errors.NOT_COORDINATOR_FOR_GROUP.code, syncGroupResult._2)
}
@Test
def testSyncGroupFromUnknownGroup() {
val generation = 1
val syncGroupResult = syncGroupFollower(groupId, generation, memberId)
assertEquals(Errors.UNKNOWN_MEMBER_ID.code, syncGroupResult._2)
}
@Test
def testSyncGroupFromUnknownMember() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val assignedConsumerId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
assertEquals(Errors.NONE.code, joinGroupResult.errorCode)
EasyMock.reset(replicaManager)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]()))
val syncGroupErrorCode = syncGroupResult._2
assertEquals(Errors.NONE.code, syncGroupErrorCode)
EasyMock.reset(replicaManager)
val unknownMemberId = "blah"
val unknownMemberSyncResult = syncGroupFollower(groupId, generationId, unknownMemberId)
assertEquals(Errors.UNKNOWN_MEMBER_ID.code, unknownMemberSyncResult._2)
}
@Test
def testSyncGroupFromIllegalGeneration() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val assignedConsumerId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
assertEquals(Errors.NONE.code, joinGroupResult.errorCode)
EasyMock.reset(replicaManager)
// send the sync group with an invalid generation
val syncGroupResult = syncGroupLeader(groupId, generationId+1, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]()))
assertEquals(Errors.ILLEGAL_GENERATION.code, syncGroupResult._2)
}
@Test
def testJoinGroupFromUnchangedFollowerDoesNotRebalance() {
// to get a group of two members:
// 1. join and sync with a single member (because we can't immediately join with two members)
// 2. join and sync with the first member and a new member
val firstJoinResult = joinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, DefaultSessionTimeout,
protocolType, protocols)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE.code, firstJoinResult.errorCode)
EasyMock.reset(replicaManager)
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE.code, firstSyncResult._2)
EasyMock.reset(replicaManager)
val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, DefaultSessionTimeout,
protocolType, protocols)
EasyMock.reset(replicaManager)
val joinFuture = sendJoinGroup(groupId, firstMemberId, DefaultSessionTimeout, protocolType, protocols)
val joinResult = await(joinFuture, DefaultSessionTimeout+100)
val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE.code, joinResult.errorCode)
assertEquals(Errors.NONE.code, otherJoinResult.errorCode)
assertTrue(joinResult.generationId == otherJoinResult.generationId)
assertEquals(firstMemberId, joinResult.leaderId)
assertEquals(firstMemberId, otherJoinResult.leaderId)
val nextGenerationId = joinResult.generationId
// this shouldn't cause a rebalance since protocol information hasn't changed
EasyMock.reset(replicaManager)
val followerJoinResult = joinGroup(groupId, otherJoinResult.memberId, DefaultSessionTimeout, protocolType, protocols)
assertEquals(Errors.NONE.code, followerJoinResult.errorCode)
assertEquals(nextGenerationId, followerJoinResult.generationId)
}
@Test
def testJoinGroupFromUnchangedLeaderShouldRebalance() {
val firstJoinResult = joinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, DefaultSessionTimeout,
protocolType, protocols)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE.code, firstJoinResult.errorCode)
EasyMock.reset(replicaManager)
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE.code, firstSyncResult._2)
// join groups from the leader should force the group to rebalance, which allows the
// leader to push new assignments when local metadata changes
EasyMock.reset(replicaManager)
val secondJoinResult = joinGroup(groupId, firstMemberId, DefaultSessionTimeout, protocolType, protocols)
assertEquals(Errors.NONE.code, secondJoinResult.errorCode)
assertNotEquals(firstGenerationId, secondJoinResult.generationId)
}
@Test
def testLeaderFailureInSyncGroup() {
// to get a group of two members:
// 1. join and sync with a single member (because we can't immediately join with two members)
// 2. join and sync with the first member and a new member
val firstJoinResult = joinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, DefaultSessionTimeout,
protocolType, protocols)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE.code, firstJoinResult.errorCode)
EasyMock.reset(replicaManager)
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE.code, firstSyncResult._2)
EasyMock.reset(replicaManager)
val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, DefaultSessionTimeout,
protocolType, protocols)
EasyMock.reset(replicaManager)
val joinFuture = sendJoinGroup(groupId, firstMemberId, DefaultSessionTimeout, protocolType, protocols)
val joinResult = await(joinFuture, DefaultSessionTimeout+100)
val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE.code, joinResult.errorCode)
assertEquals(Errors.NONE.code, otherJoinResult.errorCode)
assertTrue(joinResult.generationId == otherJoinResult.generationId)
assertEquals(firstMemberId, joinResult.leaderId)
assertEquals(firstMemberId, otherJoinResult.leaderId)
val nextGenerationId = joinResult.generationId
// with no leader SyncGroup, the follower's request should failure with an error indicating
// that it should rejoin
EasyMock.reset(replicaManager)
val followerSyncFuture= sendSyncGroupFollower(groupId, nextGenerationId, otherJoinResult.memberId)
val followerSyncResult = await(followerSyncFuture, DefaultSessionTimeout+100)
assertEquals(Errors.REBALANCE_IN_PROGRESS.code, followerSyncResult._2)
}
@Test
def testSyncGroupFollowerAfterLeader() {
// to get a group of two members:
// 1. join and sync with a single member (because we can't immediately join with two members)
// 2. join and sync with the first member and a new member
val firstJoinResult = joinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, DefaultSessionTimeout,
protocolType, protocols)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE.code, firstJoinResult.errorCode)
EasyMock.reset(replicaManager)
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE.code, firstSyncResult._2)
EasyMock.reset(replicaManager)
val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, DefaultSessionTimeout,
protocolType, protocols)
EasyMock.reset(replicaManager)
val joinFuture = sendJoinGroup(groupId, firstMemberId, DefaultSessionTimeout, protocolType, protocols)
val joinResult = await(joinFuture, DefaultSessionTimeout+100)
val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE.code, joinResult.errorCode)
assertEquals(Errors.NONE.code, otherJoinResult.errorCode)
assertTrue(joinResult.generationId == otherJoinResult.generationId)
assertEquals(firstMemberId, joinResult.leaderId)
assertEquals(firstMemberId, otherJoinResult.leaderId)
val nextGenerationId = joinResult.generationId
val leaderId = firstMemberId
val leaderAssignment = Array[Byte](0)
val followerId = otherJoinResult.memberId
val followerAssignment = Array[Byte](1)
EasyMock.reset(replicaManager)
val leaderSyncResult = syncGroupLeader(groupId, nextGenerationId, leaderId,
Map(leaderId -> leaderAssignment, followerId -> followerAssignment))
assertEquals(Errors.NONE.code, leaderSyncResult._2)
assertEquals(leaderAssignment, leaderSyncResult._1)
EasyMock.reset(replicaManager)
val followerSyncResult = syncGroupFollower(groupId, nextGenerationId, otherJoinResult.memberId)
assertEquals(Errors.NONE.code, followerSyncResult._2)
assertEquals(followerAssignment, followerSyncResult._1)
}
@Test
def testSyncGroupLeaderAfterFollower() {
// to get a group of two members:
// 1. join and sync with a single member (because we can't immediately join with two members)
// 2. join and sync with the first member and a new member
val joinGroupResult = joinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, DefaultSessionTimeout,
protocolType, protocols)
val firstMemberId = joinGroupResult.memberId
val firstGenerationId = joinGroupResult.generationId
assertEquals(firstMemberId, joinGroupResult.leaderId)
assertEquals(Errors.NONE.code, joinGroupResult.errorCode)
EasyMock.reset(replicaManager)
val syncGroupResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
val syncGroupErrorCode = syncGroupResult._2
assertEquals(Errors.NONE.code, syncGroupErrorCode)
EasyMock.reset(replicaManager)
val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, DefaultSessionTimeout,
protocolType, protocols)
EasyMock.reset(replicaManager)
val joinFuture = sendJoinGroup(groupId, firstMemberId, DefaultSessionTimeout, protocolType, protocols)
val joinResult = await(joinFuture, DefaultSessionTimeout+100)
val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE.code, joinResult.errorCode)
assertEquals(Errors.NONE.code, otherJoinResult.errorCode)
assertTrue(joinResult.generationId == otherJoinResult.generationId)
val nextGenerationId = joinResult.generationId
val leaderId = joinResult.leaderId
val leaderAssignment = Array[Byte](0)
val followerId = otherJoinResult.memberId
val followerAssignment = Array[Byte](1)
assertEquals(firstMemberId, joinResult.leaderId)
assertEquals(firstMemberId, otherJoinResult.leaderId)
EasyMock.reset(replicaManager)
val followerSyncFuture = sendSyncGroupFollower(groupId, nextGenerationId, followerId)
EasyMock.reset(replicaManager)
val leaderSyncResult = syncGroupLeader(groupId, nextGenerationId, leaderId,
Map(leaderId -> leaderAssignment, followerId -> followerAssignment))
assertEquals(Errors.NONE.code, leaderSyncResult._2)
assertEquals(leaderAssignment, leaderSyncResult._1)
val followerSyncResult = await(followerSyncFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE.code, followerSyncResult._2)
assertEquals(followerAssignment, followerSyncResult._1)
}
@Test
def testCommitOffsetFromUnknownGroup() {
val generationId = 1
val tp = new TopicAndPartition("topic", 0)
val offset = OffsetAndMetadata(0)
val commitOffsetResult = commitOffsets(groupId, memberId, generationId, immutable.Map(tp -> offset))
assertEquals(Errors.ILLEGAL_GENERATION.code, commitOffsetResult(tp))
}
@Test
def testCommitOffsetWithDefaultGeneration() {
val tp = new TopicAndPartition("topic", 0)
val offset = OffsetAndMetadata(0)
val commitOffsetResult = commitOffsets(groupId, OffsetCommitRequest.DEFAULT_MEMBER_ID,
OffsetCommitRequest.DEFAULT_GENERATION_ID, immutable.Map(tp -> offset))
assertEquals(Errors.NONE.code, commitOffsetResult(tp))
}
@Test
def testCommitOffsetInAwaitingSync() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val tp = new TopicAndPartition("topic", 0)
val offset = OffsetAndMetadata(0)
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NONE.code, joinGroupErrorCode)
EasyMock.reset(replicaManager)
val commitOffsetResult = commitOffsets(groupId, assignedMemberId, generationId, immutable.Map(tp -> offset))
assertEquals(Errors.REBALANCE_IN_PROGRESS.code, commitOffsetResult(tp))
}
@Test
def testHeartbeatDuringRebalanceCausesRebalanceInProgress() {
// First start up a group (with a slightly larger timeout to give us time to heartbeat when the rebalance starts)
val joinGroupResult = joinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, DefaultSessionTimeout,
protocolType, protocols)
val assignedConsumerId = joinGroupResult.memberId
val initialGenerationId = joinGroupResult.generationId
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NONE.code, joinGroupErrorCode)
// Then join with a new consumer to trigger a rebalance
EasyMock.reset(replicaManager)
sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, DefaultSessionTimeout, protocolType, protocols)
// We should be in the middle of a rebalance, so the heartbeat should return rebalance in progress
EasyMock.reset(replicaManager)
val heartbeatResult = heartbeat(groupId, assignedConsumerId, initialGenerationId)
assertEquals(Errors.REBALANCE_IN_PROGRESS.code, heartbeatResult)
}
@Test
def testGenerationIdIncrementsOnRebalance() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val otherMemberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val initialGenerationId = joinGroupResult.generationId
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(1, initialGenerationId)
assertEquals(Errors.NONE.code, joinGroupErrorCode)
EasyMock.reset(replicaManager)
val otherJoinGroupResult = joinGroup(groupId, otherMemberId, DefaultSessionTimeout, protocolType, protocols)
val nextGenerationId = otherJoinGroupResult.generationId
val otherJoinGroupErrorCode = otherJoinGroupResult.errorCode
assertEquals(2, nextGenerationId)
assertEquals(Errors.NONE.code, otherJoinGroupErrorCode)
}
@Test
def testLeaveGroupWrongCoordinator() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val leaveGroupResult = leaveGroup(otherGroupId, memberId)
assertEquals(Errors.NOT_COORDINATOR_FOR_GROUP.code, leaveGroupResult)
}
@Test
def testLeaveGroupUnknownGroup() {
val leaveGroupResult = leaveGroup(groupId, memberId)
assertEquals(Errors.UNKNOWN_MEMBER_ID.code, leaveGroupResult)
}
@Test
def testLeaveGroupUnknownConsumerExistingGroup() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val otherMemberId = "consumerId"
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NONE.code, joinGroupErrorCode)
EasyMock.reset(replicaManager)
val leaveGroupResult = leaveGroup(groupId, otherMemberId)
assertEquals(Errors.UNKNOWN_MEMBER_ID.code, leaveGroupResult)
}
@Test
def testValidLeaveGroup() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NONE.code, joinGroupErrorCode)
EasyMock.reset(replicaManager)
val leaveGroupResult = leaveGroup(groupId, assignedMemberId)
assertEquals(Errors.NONE.code, leaveGroupResult)
}
@Test
def testListGroupsIncludesStableGroups() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
assertEquals(Errors.NONE.code, joinGroupResult.errorCode)
EasyMock.reset(replicaManager)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]()))
val syncGroupErrorCode = syncGroupResult._2
assertEquals(Errors.NONE.code, syncGroupErrorCode)
val (error, groups) = groupCoordinator.handleListGroups()
assertEquals(Errors.NONE, error)
assertEquals(1, groups.size)
assertEquals(GroupOverview("groupId", "consumer"), groups(0))
}
@Test
def testListGroupsIncludesRebalancingGroups() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
assertEquals(Errors.NONE.code, joinGroupResult.errorCode)
val (error, groups) = groupCoordinator.handleListGroups()
assertEquals(Errors.NONE, error)
assertEquals(1, groups.size)
assertEquals(GroupOverview("groupId", "consumer"), groups(0))
}
@Test
def testDescribeGroupWrongCoordinator() {
EasyMock.reset(replicaManager)
val (error, _) = groupCoordinator.handleDescribeGroup(otherGroupId)
assertEquals(Errors.NOT_COORDINATOR_FOR_GROUP, error)
}
@Test
def testDescribeGroupInactiveGroup() {
EasyMock.reset(replicaManager)
val (error, summary) = groupCoordinator.handleDescribeGroup(groupId)
assertEquals(Errors.NONE, error)
assertEquals(GroupCoordinator.DeadGroup, summary)
}
@Test
def testDescribeGroupStable() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NONE.code, joinGroupErrorCode)
EasyMock.reset(replicaManager)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]()))
val syncGroupErrorCode = syncGroupResult._2
assertEquals(Errors.NONE.code, syncGroupErrorCode)
EasyMock.reset(replicaManager)
val (error, summary) = groupCoordinator.handleDescribeGroup(groupId)
assertEquals(Errors.NONE, error)
assertEquals(protocolType, summary.protocolType)
assertEquals("range", summary.protocol)
assertEquals(List(assignedMemberId), summary.members.map(_.memberId))
}
@Test
def testDescribeGroupRebalancing() {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = joinGroup(groupId, memberId, DefaultSessionTimeout, protocolType, protocols)
val joinGroupErrorCode = joinGroupResult.errorCode
assertEquals(Errors.NONE.code, joinGroupErrorCode)
EasyMock.reset(replicaManager)
val (error, summary) = groupCoordinator.handleDescribeGroup(groupId)
assertEquals(Errors.NONE, error)
assertEquals(protocolType, summary.protocolType)
assertEquals(GroupCoordinator.NoProtocol, summary.protocol)
assertEquals(AwaitingSync.toString, summary.state)
assertTrue(summary.members.map(_.memberId).contains(joinGroupResult.memberId))
assertTrue(summary.members.forall(_.metadata.isEmpty))
assertTrue(summary.members.forall(_.assignment.isEmpty))
}
private def setupJoinGroupCallback: (Future[JoinGroupResult], JoinGroupCallback) = {
val responsePromise = Promise[JoinGroupResult]
val responseFuture = responsePromise.future
val responseCallback: JoinGroupCallback = responsePromise.success(_)
(responseFuture, responseCallback)
}
private def setupSyncGroupCallback: (Future[SyncGroupCallbackParams], SyncGroupCallback) = {
val responsePromise = Promise[SyncGroupCallbackParams]
val responseFuture = responsePromise.future
val responseCallback: SyncGroupCallback = (assignment, errorCode) =>
responsePromise.success((assignment, errorCode))
(responseFuture, responseCallback)
}
private def setupHeartbeatCallback: (Future[HeartbeatCallbackParams], HeartbeatCallback) = {
val responsePromise = Promise[HeartbeatCallbackParams]
val responseFuture = responsePromise.future
val responseCallback: HeartbeatCallback = errorCode => responsePromise.success(errorCode)
(responseFuture, responseCallback)
}
private def setupCommitOffsetsCallback: (Future[CommitOffsetCallbackParams], CommitOffsetCallback) = {
val responsePromise = Promise[CommitOffsetCallbackParams]
val responseFuture = responsePromise.future
val responseCallback: CommitOffsetCallback = offsets => responsePromise.success(offsets)
(responseFuture, responseCallback)
}
private def sendJoinGroup(groupId: String,
memberId: String,
sessionTimeout: Int,
protocolType: String,
protocols: List[(String, Array[Byte])]): Future[JoinGroupResult] = {
val (responseFuture, responseCallback) = setupJoinGroupCallback
EasyMock.replay(replicaManager)
groupCoordinator.handleJoinGroup(groupId, memberId, "clientId", "clientHost", sessionTimeout,
protocolType, protocols, responseCallback)
responseFuture
}
private def sendSyncGroupLeader(groupId: String,
generation: Int,
leaderId: String,
assignment: Map[String, Array[Byte]]): Future[SyncGroupCallbackParams] = {
val (responseFuture, responseCallback) = setupSyncGroupCallback
val capturedArgument: Capture[Map[TopicAndPartition, ProducerResponseStatus] => Unit] = EasyMock.newCapture()
EasyMock.expect(replicaManager.appendMessages(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.anyBoolean(),
EasyMock.anyObject().asInstanceOf[Map[TopicAndPartition, MessageSet]],
EasyMock.capture(capturedArgument))).andAnswer(new IAnswer[Unit] {
override def answer = capturedArgument.getValue.apply(
Map(TopicAndPartition(GroupCoordinator.GroupMetadataTopicName, groupPartitionId) ->
new ProducerResponseStatus(Errors.NONE.code, 0L)
)
)})
EasyMock.replay(replicaManager)
groupCoordinator.handleSyncGroup(groupId, generation, leaderId, assignment, responseCallback)
responseFuture
}
private def sendSyncGroupFollower(groupId: String,
generation: Int,
memberId: String): Future[SyncGroupCallbackParams] = {
val (responseFuture, responseCallback) = setupSyncGroupCallback
EasyMock.replay(replicaManager)
groupCoordinator.handleSyncGroup(groupId, generation, memberId, Map.empty[String, Array[Byte]], responseCallback)
responseFuture
}
private def joinGroup(groupId: String,
memberId: String,
sessionTimeout: Int,
protocolType: String,
protocols: List[(String, Array[Byte])]): JoinGroupResult = {
val responseFuture = sendJoinGroup(groupId, memberId, sessionTimeout, protocolType, protocols)
// should only have to wait as long as session timeout, but allow some extra time in case of an unexpected delay
Await.result(responseFuture, Duration(sessionTimeout+100, TimeUnit.MILLISECONDS))
}
private def syncGroupFollower(groupId: String,
generationId: Int,
memberId: String): SyncGroupCallbackParams = {
val responseFuture = sendSyncGroupFollower(groupId, generationId, memberId)
Await.result(responseFuture, Duration(DefaultSessionTimeout+100, TimeUnit.MILLISECONDS))
}
private def syncGroupLeader(groupId: String,
generationId: Int,
memberId: String,
assignment: Map[String, Array[Byte]]): SyncGroupCallbackParams = {
val responseFuture = sendSyncGroupLeader(groupId, generationId, memberId, assignment)
Await.result(responseFuture, Duration(DefaultSessionTimeout+100, TimeUnit.MILLISECONDS))
}
private def heartbeat(groupId: String,
consumerId: String,
generationId: Int): HeartbeatCallbackParams = {
val (responseFuture, responseCallback) = setupHeartbeatCallback
EasyMock.replay(replicaManager)
groupCoordinator.handleHeartbeat(groupId, consumerId, generationId, responseCallback)
Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS))
}
private def await[T](future: Future[T], millis: Long): T = {
Await.result(future, Duration(millis, TimeUnit.MILLISECONDS))
}
private def commitOffsets(groupId: String,
consumerId: String,
generationId: Int,
offsets: immutable.Map[TopicAndPartition, OffsetAndMetadata]): CommitOffsetCallbackParams = {
val (responseFuture, responseCallback) = setupCommitOffsetsCallback
val capturedArgument: Capture[Map[TopicAndPartition, ProducerResponseStatus] => Unit] = EasyMock.newCapture()
EasyMock.expect(replicaManager.appendMessages(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.anyBoolean(),
EasyMock.anyObject().asInstanceOf[Map[TopicAndPartition, MessageSet]],
EasyMock.capture(capturedArgument))).andAnswer(new IAnswer[Unit] {
override def answer = capturedArgument.getValue.apply(
Map(TopicAndPartition(GroupCoordinator.GroupMetadataTopicName, groupPartitionId) ->
new ProducerResponseStatus(Errors.NONE.code, 0L)
)
)})
EasyMock.replay(replicaManager)
groupCoordinator.handleCommitOffsets(groupId, consumerId, generationId, offsets, responseCallback)
Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS))
}
private def leaveGroup(groupId: String, consumerId: String): LeaveGroupCallbackParams = {
val (responseFuture, responseCallback) = setupHeartbeatCallback
EasyMock.expect(replicaManager.getPartition(GroupCoordinator.GroupMetadataTopicName, groupPartitionId)).andReturn(None)
EasyMock.replay(replicaManager)
groupCoordinator.handleLeaveGroup(groupId, consumerId, responseCallback)
Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS))
}
}
| Mszak/kafka | core/src/test/scala/unit/kafka/coordinator/GroupCoordinatorResponseTest.scala | Scala | apache-2.0 | 39,563 |
/**
* Copyright 2009 Jorge Ortiz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package org.scala_tools.time
import java.util.Locale
import org.joda.time._
class RichPartialProperty(underlying: Partial.Property) {
def partial: Partial =
underlying.getPartial
def apply(value: Int): Partial = underlying.setCopy(value)
def apply(text: String): Partial = underlying.setCopy(text)
def apply(text: String, locale: Locale): Partial =
underlying.setCopy(text, locale)
}
| jorgeortiz85/scala-time | src/main/scala/org/scala_tools/time/RichPartialProperty.scala | Scala | apache-2.0 | 1,002 |
package com.taxis99.amazon.sqs
import akka.Done
import akka.stream.QueueOfferResult
import play.api.libs.json.{Json, Writes}
import scala.concurrent.{Future, Promise}
trait SqsProducer[T] extends SqsConfig {
private lazy val producer = sqs.producer(queueConfig)
/**
* Produces a new message to the queue. The message must be serializable to Json.
* @param message The message to be sent
* @return A future completed when the message was sent
*/
def produce(message: T)(implicit tjs: Writes[T]): Future[Done] = {
val done = Promise[Done]
producer flatMap { queue =>
queue.offer(Json.toJson(message) -> done) flatMap {
case QueueOfferResult.Enqueued => done.future
case r: QueueOfferResult => Future.failed(new Exception(s"Could not enqueue $r"))
}
}
}
}
| 99Taxis/common-sqs | src/main/scala/com/taxis99/amazon/sqs/SqsProducer.scala | Scala | apache-2.0 | 822 |
/*
* Copyright 2006-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package util
import common._
/**
* This is a decorator for a ThreadLocal variable that provides
* convenience methods to transform the variable to a Box and execute
* functions in a "scope" wherein the variable may hold a different value.
*/
class ThreadGlobal[T]
{
private val threadLocal = new ThreadLocal[T]
/**
* Returns the current value of this variable.
*/
def value: T = threadLocal.get
/**
* Returns a Box containing the value of this ThreadGlobal
* in a null-safe fashion.
*/
def box: Box[T] = Box !! value
/**
* Sets the value of this ThreadGlobal.
* @param v the value to set.
*/
def set(v: T): ThreadGlobal[T] = {
threadLocal.set(v)
this
}
/**
* Alias for <code>set(v: T)</code>
* @param v the value to set.
*/
def apply(v: T) = set(v)
/**
* Sets this ThreadGlobal's contents to the specified value,
* executes the specified function, and then restores the ThreadGlobal
* to its earlier value. This effectively creates a scope within
* the execution of the current thread for the execution of the specified
* function.
*
* @param x the value to temporarily set in this ThreadGlobal
* @param f the function to execute
*/
def doWith[R](x: T)(f : => R) : R = {
val original = value
try {
threadLocal.set(x)
f
} finally {
threadLocal.set(original)
}
}
}
trait DynoVar[T] {
private val threadLocal = new ThreadLocal[T]
// threadLocal.set(Empty)
def is: Box[T] = Box !! threadLocal.get
def get = is
def set(v: T): this.type = {
threadLocal.set(v)
this
}
def run[S](x: T)(f: => S): S = {
val original = threadLocal.get
try {
threadLocal.set(x)
f
} finally {
threadLocal.set(original)
}
}
}
| lzpfmh/framework-2 | core/util/src/main/scala/net/liftweb/util/ThreadGlobal.scala | Scala | apache-2.0 | 2,438 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.util.concurrent.{ExecutionException, TimeoutException}
import java.util.Properties
import kafka.integration.KafkaServerTestHarness
import kafka.log.LogConfig
import kafka.server.KafkaConfig
import kafka.utils.TestUtils
import org.apache.kafka.clients.producer._
import org.apache.kafka.common.KafkaException
import org.apache.kafka.common.errors._
import org.apache.kafka.common.internals.Topic
import org.apache.kafka.common.record.{DefaultRecord, DefaultRecordBatch}
import org.junit.Assert._
import org.junit.{After, Before, Test}
class ProducerFailureHandlingTest extends KafkaServerTestHarness {
private val producerBufferSize = 30000
private val serverMessageMaxBytes = producerBufferSize/2
private val replicaFetchMaxPartitionBytes = serverMessageMaxBytes + 200
private val replicaFetchMaxResponseBytes = replicaFetchMaxPartitionBytes + 200
val numServers = 2
val overridingProps = new Properties()
overridingProps.put(KafkaConfig.AutoCreateTopicsEnableProp, false.toString)
overridingProps.put(KafkaConfig.MessageMaxBytesProp, serverMessageMaxBytes.toString)
overridingProps.put(KafkaConfig.ReplicaFetchMaxBytesProp, replicaFetchMaxPartitionBytes.toString)
overridingProps.put(KafkaConfig.ReplicaFetchResponseMaxBytesDoc, replicaFetchMaxResponseBytes.toString)
// Set a smaller value for the number of partitions for the offset commit topic (__consumer_offset topic)
// so that the creation of that topic/partition(s) and subsequent leader assignment doesn't take relatively long
overridingProps.put(KafkaConfig.OffsetsTopicPartitionsProp, 1.toString)
def generateConfigs() =
TestUtils.createBrokerConfigs(numServers, zkConnect, false).map(KafkaConfig.fromProps(_, overridingProps))
private var producer1: KafkaProducer[Array[Byte], Array[Byte]] = null
private var producer2: KafkaProducer[Array[Byte], Array[Byte]] = null
private var producer3: KafkaProducer[Array[Byte], Array[Byte]] = null
private var producer4: KafkaProducer[Array[Byte], Array[Byte]] = null
private val topic1 = "topic-1"
private val topic2 = "topic-2"
@Before
override def setUp() {
super.setUp()
producer1 = TestUtils.createNewProducer(brokerList, acks = 0, requestTimeoutMs = 30000L, maxBlockMs = 10000L,
bufferSize = producerBufferSize)
producer2 = TestUtils.createNewProducer(brokerList, acks = 1, requestTimeoutMs = 30000L, maxBlockMs = 10000L,
bufferSize = producerBufferSize)
producer3 = TestUtils.createNewProducer(brokerList, acks = -1, requestTimeoutMs = 30000L, maxBlockMs = 10000L,
bufferSize = producerBufferSize)
}
@After
override def tearDown() {
if (producer1 != null) producer1.close()
if (producer2 != null) producer2.close()
if (producer3 != null) producer3.close()
if (producer4 != null) producer4.close()
super.tearDown()
}
/**
* With ack == 0 the future metadata will have no exceptions with offset -1
*/
@Test
def testTooLargeRecordWithAckZero() {
// create topic
TestUtils.createTopic(zkUtils, topic1, 1, numServers, servers)
// send a too-large record
val record = new ProducerRecord(topic1, null, "key".getBytes, new Array[Byte](serverMessageMaxBytes + 1))
assertEquals("Returned metadata should have offset -1", producer1.send(record).get.offset, -1L)
}
/**
* With ack == 1 the future metadata will throw ExecutionException caused by RecordTooLargeException
*/
@Test
def testTooLargeRecordWithAckOne() {
// create topic
TestUtils.createTopic(zkUtils, topic1, 1, numServers, servers)
// send a too-large record
val record = new ProducerRecord(topic1, null, "key".getBytes, new Array[Byte](serverMessageMaxBytes + 1))
intercept[ExecutionException] {
producer2.send(record).get
}
}
private def checkTooLargeRecordForReplicationWithAckAll(maxFetchSize: Int) {
val maxMessageSize = maxFetchSize + 100
val topicConfig = new Properties
topicConfig.setProperty(LogConfig.MinInSyncReplicasProp, numServers.toString)
topicConfig.setProperty(LogConfig.MaxMessageBytesProp, maxMessageSize.toString)
// create topic
val topic10 = "topic10"
TestUtils.createTopic(zkUtils, topic10, servers.size, numServers, servers, topicConfig)
// send a record that is too large for replication, but within the broker max message limit
val value = new Array[Byte](maxMessageSize - DefaultRecordBatch.RECORD_BATCH_OVERHEAD - DefaultRecord.MAX_RECORD_OVERHEAD)
val record = new ProducerRecord[Array[Byte], Array[Byte]](topic10, null, value)
val recordMetadata = producer3.send(record).get
assertEquals(topic10, recordMetadata.topic)
}
/** This should succeed as the replica fetcher thread can handle oversized messages since KIP-74 */
@Test
def testPartitionTooLargeForReplicationWithAckAll() {
checkTooLargeRecordForReplicationWithAckAll(replicaFetchMaxPartitionBytes)
}
/** This should succeed as the replica fetcher thread can handle oversized messages since KIP-74 */
@Test
def testResponseTooLargeForReplicationWithAckAll() {
checkTooLargeRecordForReplicationWithAckAll(replicaFetchMaxResponseBytes)
}
/**
* With non-exist-topic the future metadata should return ExecutionException caused by TimeoutException
*/
@Test
def testNonExistentTopic() {
// send a record with non-exist topic
val record = new ProducerRecord(topic2, null, "key".getBytes, "value".getBytes)
intercept[ExecutionException] {
producer1.send(record).get
}
}
/**
* With incorrect broker-list the future metadata should return ExecutionException caused by TimeoutException
*
* TODO: other exceptions that can be thrown in ExecutionException:
* UnknownTopicOrPartitionException
* NotLeaderForPartitionException
* LeaderNotAvailableException
* CorruptRecordException
* TimeoutException
*/
@Test
def testWrongBrokerList() {
// create topic
TestUtils.createTopic(zkUtils, topic1, 1, numServers, servers)
// producer with incorrect broker list
producer4 = TestUtils.createNewProducer("localhost:8686,localhost:4242", acks = 1, maxBlockMs = 10000L, bufferSize = producerBufferSize)
// send a record with incorrect broker list
val record = new ProducerRecord(topic1, null, "key".getBytes, "value".getBytes)
intercept[ExecutionException] {
producer4.send(record).get
}
}
/**
* Send with invalid partition id should throw KafkaException when partition is higher than the upper bound of
* partitions.
*/
@Test
def testInvalidPartition() {
// create topic with a single partition
TestUtils.createTopic(zkUtils, topic1, 1, numServers, servers)
// create a record with incorrect partition id (higher than the number of partitions), send should fail
val higherRecord = new ProducerRecord(topic1, 1, "key".getBytes, "value".getBytes)
intercept[KafkaException] {
producer1.send(higherRecord)
}
}
/**
* The send call after producer closed should throw IllegalStateException
*/
@Test
def testSendAfterClosed() {
// create topic
TestUtils.createTopic(zkUtils, topic1, 1, numServers, servers)
val record = new ProducerRecord[Array[Byte],Array[Byte]](topic1, null, "key".getBytes, "value".getBytes)
// first send a message to make sure the metadata is refreshed
producer1.send(record).get
producer2.send(record).get
producer3.send(record).get
intercept[IllegalStateException] {
producer1.close()
producer1.send(record)
}
intercept[IllegalStateException] {
producer2.close()
producer2.send(record)
}
intercept[IllegalStateException] {
producer3.close()
producer3.send(record)
}
}
@Test
def testCannotSendToInternalTopic() {
TestUtils.createOffsetsTopic(zkUtils, servers)
val thrown = intercept[ExecutionException] {
producer2.send(new ProducerRecord(Topic.GROUP_METADATA_TOPIC_NAME, "test".getBytes, "test".getBytes)).get
}
assertTrue("Unexpected exception while sending to an invalid topic " + thrown.getCause, thrown.getCause.isInstanceOf[InvalidTopicException])
}
@Test
def testNotEnoughReplicas() {
val topicName = "minisrtest"
val topicProps = new Properties()
topicProps.put("min.insync.replicas",(numServers+1).toString)
TestUtils.createTopic(zkUtils, topicName, 1, numServers, servers, topicProps)
val record = new ProducerRecord(topicName, null, "key".getBytes, "value".getBytes)
try {
producer3.send(record).get
fail("Expected exception when producing to topic with fewer brokers than min.insync.replicas")
} catch {
case e: ExecutionException =>
if (!e.getCause.isInstanceOf[NotEnoughReplicasException]) {
fail("Expected NotEnoughReplicasException when producing to topic with fewer brokers than min.insync.replicas")
}
}
}
@Test
def testNotEnoughReplicasAfterBrokerShutdown() {
val topicName = "minisrtest2"
val topicProps = new Properties()
topicProps.put("min.insync.replicas", numServers.toString)
TestUtils.createTopic(zkUtils, topicName, 1, numServers, servers,topicProps)
val record = new ProducerRecord(topicName, null, "key".getBytes, "value".getBytes)
// this should work with all brokers up and running
producer3.send(record).get
// shut down one broker
servers.head.shutdown()
servers.head.awaitShutdown()
try {
producer3.send(record).get
fail("Expected exception when producing to topic with fewer brokers than min.insync.replicas")
} catch {
case e: ExecutionException =>
if (!e.getCause.isInstanceOf[NotEnoughReplicasException] &&
!e.getCause.isInstanceOf[NotEnoughReplicasAfterAppendException] &&
!e.getCause.isInstanceOf[TimeoutException]) {
fail("Expected NotEnoughReplicasException or NotEnoughReplicasAfterAppendException when producing to topic " +
"with fewer brokers than min.insync.replicas, but saw " + e.getCause)
}
}
// restart the server
servers.head.startup()
}
}
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala | Scala | apache-2.0 | 11,005 |
// lchannels - session programming in Scala
// Copyright (c) 2016, Alceste Scalas and Imperial College London
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
/** @author Alceste Scalas <[email protected]> */
package lchannels.examples.game.demo
import lchannels._
import lchannels.examples.game.protocol.binary.{PlayA, PlayB, PlayC}
object Local extends App {
// Helper method to ease external invocation
def run() = main(Array())
Demo.start(LocalChannel.factory[PlayA],
LocalChannel.factory[PlayB],
LocalChannel.factory[PlayC])
}
object Queue extends App {
// Helper method to ease external invocation
def run() = main(Array())
import scala.concurrent.ExecutionContext.Implicits.global
Demo.start(QueueChannel.factory[PlayA],
QueueChannel.factory[PlayB],
QueueChannel.factory[PlayC])
}
object Demo {
def start(afactory: () => (In[PlayA], Out[PlayA]),
bfactory: () => (In[PlayB], Out[PlayB]),
cfactory: () => (In[PlayC], Out[PlayC])) = {
import scala.concurrent.duration._
implicit val timeout = 10.seconds
// Client/server channels for players A, B, C
val (ca, sa) = afactory()
val (cb, sb) = bfactory()
val (cc, sc) = cfactory()
val server = new lchannels.examples.game.server.Server(sa, sb, sc)
val a = new lchannels.examples.game.a.Client("Alice", ca, 3.seconds)
val b = new lchannels.examples.game.b.Client("Bob", cb, 1.second)
val c = new lchannels.examples.game.c.Client("Carol", cc, 2.seconds)
}
}
| scribble/scribble.github.io | src/main/jbake/assets/docs/lchannels/examples/src/main/scala/lchannels/examples/game/Demo.scala | Scala | apache-2.0 | 2,851 |
package com.sksamuel.elastic4s.search.aggs
class MissingAggregationHttpTest extends AbstractAggregationTest {
"missing aggregation" - {
"should return documents missing a value" in {
val resp = client.execute {
search in "aggregations/breakingbad" aggregations {
aggregation missing "agg1" field "actor"
}
}.await
resp.totalHits shouldBe 10
val aggs = resp.aggregations.missingResult("agg1")
aggs.getDocCount shouldBe 7
}
}
}
| FabienPennequin/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/search/aggs/MissingAggregationHttpTest.scala | Scala | apache-2.0 | 496 |
package jp.ne.opt.chronoscala
import java.time._
import jp.ne.opt.chronoscala.Tag.CS
import scala.language.implicitConversions
trait Implicits
extends IntImplicits
with DurationImplicits
with TimeImplicits
with OrderingImplicits
trait NamespacedImplicits
extends NamespacedIntImplicits
with NamespacedLongImplicits
with DurationImplicits
with TimeImplicits
with OrderingImplicits
trait IntImplicits {
implicit def richInt(n: Int): RichInt = new RichInt(n)
}
trait NamespacedIntImplicits {
implicit def richIntCs(n: Int): RichAny[Int] = new RichAny(n)
implicit def richCsInt(n: CS[Int]): RichInt = new RichInt(n)
}
trait NamespacedLongImplicits {
implicit def richLongCs(n: Long): RichAny[Long] = new RichAny(n)
implicit def richCsLong(n: CS[Long]): RichLong = new RichLong(n)
}
trait DurationImplicits {
implicit def richDuration(d: Duration): RichDuration = new RichDuration(d)
implicit def richPeriod(p: Period): RichPeriod = new RichPeriod(p)
}
trait TimeImplicits {
implicit def richZonedDateTime(t: ZonedDateTime): RichZonedDateTime = new RichZonedDateTime(t)
implicit def richOffsetDateTime(t: OffsetDateTime): RichOffsetDateTime = new RichOffsetDateTime(t)
implicit def richLocalDateTime(t: LocalDateTime): RichLocalDateTime = new RichLocalDateTime(t)
implicit def richLocalTime(t: LocalTime): RichLocalTime = new RichLocalTime(t)
implicit def richLocalDate(t: LocalDate): RichLocalDate = new RichLocalDate(t)
implicit def richInstant(i: Instant): RichInstant = new RichInstant(i)
}
trait OrderingImplicits {
implicit val zonedDateTimeOrdering: Ordering[ZonedDateTime] = Ordering.fromLessThan(_ isBefore _)
implicit val offsetDateTimeOrdering: Ordering[OffsetDateTime] =
Ordering.fromLessThan(_ isBefore _)
implicit val localDateTimeOrdering: Ordering[LocalDateTime] = Ordering.fromLessThan(_ isBefore _)
implicit val localDateOrdering: Ordering[LocalDate] = Ordering.fromLessThan(_ isBefore _)
implicit val localTimeOrdering: Ordering[LocalTime] = Ordering.fromLessThan(_ isBefore _)
implicit val instantOrdering: Ordering[Instant] = Ordering.fromLessThan(_ isBefore _)
}
| opt-tech/chronoscala | shared/src/main/scala/jp/ne/opt/chronoscala/Implicits.scala | Scala | mit | 2,151 |
package slinky.core
import scala.scalajs.js
import scala.language.experimental.macros
import scala.reflect.macros.whitebox
// same as PropsWriterProvider except it always returns the typeclass instead of nulling it out in fullOpt mode
trait ExternalPropsWriterProvider extends js.Object
object ExternalPropsWriterProvider {
def impl(c: whitebox.Context): c.Expr[ExternalPropsWriterProvider] = {
import c.universe._
val compName = c.internal.enclosingOwner.owner.asClass
val q"$_; val x: $typedReaderType = null" = c.typecheck(
q"@_root_.scala.annotation.unchecked.uncheckedStable val comp: $compName = null; val x: _root_.slinky.readwrite.Writer[comp.Props] = null"
) // scalafix:ok
val tpcls = c.inferImplicitValue(typedReaderType.tpe.asInstanceOf[c.Type])
c.Expr(q"$tpcls.asInstanceOf[_root_.slinky.core.ExternalPropsWriterProvider]")
}
implicit def get: ExternalPropsWriterProvider = macro impl
}
| shadaj/slinky | core/src/main/scala-2/slinky/core/ExternalPropsWriterProvider.scala | Scala | mit | 940 |
package stealthnet.scala.cryptography
import java.security.SecureRandom
import stealthnet.scala.Constants
import stealthnet.scala.util.HexDumper
/**
* ''Rijndael'' class companion object.
*
* Note: technically, it is ''Rijndael'' (a superset) because ''AES'' shall be
* limited to 128-bits block size.
*/
object RijndaelParameters {
/**
* Factory method.
*
* Generates new ''Rijndael'' parameters with:
* - default block size
* - default feedback size
* - default key size
* - default cipher mode
* - default padding mode
* - random initialization vector
* - random key
*
* @todo are random bytes secure enough for initialization vector and key, or
* should we use a specific generator ?
*/
def apply(): RijndaelParameters = {
val blockSize = Constants.RijndaelBlockSize
val feedbackSize = Constants.RijndaelFeedbackSize
val keySize = Constants.RijndaelKeySize
val cipherMode = Constants.RijndaelCipherMode
val paddingMode = Constants.RijndaelPaddingMode
val random = new SecureRandom()
val iv = new Array[Byte](blockSize / 8)
val key = new Array[Byte](keySize / 8)
random.nextBytes(iv)
random.nextBytes(key)
new RijndaelParameters(blockSize, feedbackSize, keySize, cipherMode,
paddingMode, iv, key)
}
}
/**
* ''Rijndael'' parameters.
*/
class RijndaelParameters(
val blockSize: Int,
val feedbackSize: Int,
val keySize: Int,
val cipherMode: CipherMode.Value,
val paddingMode: PaddingMode.Value,
val iv: Array[Byte],
val key: Array[Byte]
) {
override def toString: String = getClass.getSimpleName + '(' +
s"blockSize=$blockSize, feedbackSize=$feedbackSize, keySize=$keySize, " +
s"cipherMode=$cipherMode, paddingMode=$paddingMode, iv=\\n${HexDumper.dump(iv)}\\n, " +
s"key=\\n${HexDumper.dump(key)}\\n)"
}
| suiryc/StealthNet | core/src/main/scala/stealthnet/scala/cryptography/RijndaelParameters.scala | Scala | gpl-3.0 | 1,858 |
package com.sksamuel.elastic4s.requests.ingest
case class DeletePipelineResponse(acknowledged: Boolean)
| sksamuel/elastic4s | elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/ingest/DeletePipelineResponse.scala | Scala | apache-2.0 | 105 |
/**
* Copyright (c) 2012 Alexey Aksenov [email protected]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.digimead.digi.lib.ctrl.info
import org.digimead.digi.lib.ctrl.declaration.DState
import org.digimead.digi.lib.log.Logging
import android.os.Parcelable
import android.os.Parcel
case class ComponentState(val componentPackage: String,
val executableState: List[ExecutableState],
val state: DState.Value,
val reason: Option[String],
val execPath: String,
val dataPath: String,
val enabled: Boolean,
val serviceState: DState.Value,
val serviceBusy: Boolean) extends Parcelable {
def this(in: Parcel) = this(componentPackage = in.readString,
executableState =
in.readParcelableArray(classOf[ExecutableState].getClassLoader) match {
case null =>
Nil
case p =>
p.map(_.asInstanceOf[ExecutableState]).toList
},
state = DState(in.readInt),
reason = in.readString match {
case empty if empty == "" => None
case reason => Some(reason)
},
execPath = in.readString,
dataPath = in.readString,
enabled = (in.readByte == 1),
serviceState = DState(in.readInt),
serviceBusy = (in.readByte == 1))
def writeToParcel(out: Parcel, flags: Int) {
if (ComponentState.log.isTraceExtraEnabled)
ComponentState.log.trace("writeToParcel ComponentState with flags " + flags)
out.writeString(componentPackage)
out.writeParcelableArray(executableState.toArray, 0)
out.writeInt(state.id)
out.writeString(reason.getOrElse(""))
out.writeString(execPath)
out.writeString(dataPath)
out.writeByte(if (enabled) 1 else 0)
out.writeInt(serviceState.id)
out.writeByte(if (serviceBusy) 1 else 0)
}
def describeContents() = 0
}
object ComponentState extends Logging {
final val CREATOR: Parcelable.Creator[ComponentState] = new Parcelable.Creator[ComponentState]() {
def createFromParcel(in: Parcel): ComponentState = try {
if (log.isTraceExtraEnabled)
log.trace("createFromParcel new ComponentState")
new ComponentState(in)
} catch {
case e =>
log.error(e.getMessage, e)
null
}
def newArray(size: Int): Array[ComponentState] = new Array[ComponentState](size)
}
}
| ezh/digi-lib-ctrl | src/main/scala/org/digimead/digi/lib/ctrl/info/ComponentState.scala | Scala | apache-2.0 | 2,781 |
package examples.bouncing
import java.awt.{Dimension, Graphics2D, Point}
import rescala._
import scala.swing.{MainFrame, Panel, SimpleSwingApplication, Swing}
object SwitchVersion extends SimpleSwingApplication {
lazy val application = new SwitchVersion
def top = application.frame
override def main(args: Array[String]): Unit = {
super.main(args)
while (true) {
Swing onEDTWait { application.tick(()) }
Thread sleep 20
}
}
}
class SwitchVersion {
val Size = 50
val Max_X = 600
val Max_Y = 600
val initPosition = new Point(20, 10)
val speed = new Point(10,8)
val tick = Evt[Unit]
// Using switch
val x: Signal[Int] = tick.fold(initPosition.x) {(pos, _) => pos + speedX.now}
val y: Signal[Int] = tick.fold(initPosition.y) {(pos, _) => pos + speedY.now}
val xBounce = x.changed && (x => x < 0 || x + Size > Max_X)
val yBounce = y.changed && (y => y < 0 || y + Size > Max_Y)
val speedX = xBounce.toggle(Var(speed.x), Var(- speed.x))
val speedY = yBounce.toggle(Var(speed.y), Var(- speed.y))
tick += {_: Unit => frame.repaint()}
// drawing code
val frame = new MainFrame {
contents = new Panel() {
preferredSize = new Dimension(600, 600)
override def paintComponent(g: Graphics2D): Unit = {
g.fillOval(x.now, y.now, Size, Size)
}
}
}
}
| volkc/REScala | Examples/examples/src/main/scala/examples/bouncing/SwitchVersion.scala | Scala | apache-2.0 | 1,344 |
// Copyright (c) 2013-2020 Rob Norris and Contributors
// This software is licensed under the MIT License (MIT).
// For more information see LICENSE or https://opensource.org/licenses/MIT
package doobie.free
import cats.~>
import cats.effect.kernel.{ CancelScope, Poll, Sync }
import cats.free.{ Free => FF } // alias because some algebras have an op called Free
import doobie.WeakAsync
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import java.io.InputStream
import java.io.Reader
import java.lang.Class
import java.lang.String
import java.math.BigDecimal
import java.net.URL
import java.sql.Blob
import java.sql.CallableStatement
import java.sql.Clob
import java.sql.Connection
import java.sql.Date
import java.sql.NClob
import java.sql.ParameterMetaData
import java.sql.Ref
import java.sql.ResultSet
import java.sql.ResultSetMetaData
import java.sql.RowId
import java.sql.SQLType
import java.sql.SQLWarning
import java.sql.SQLXML
import java.sql.Time
import java.sql.Timestamp
import java.sql.{ Array => SqlArray }
import java.util.Calendar
import java.util.Map
object callablestatement { module =>
// Algebra of operations for CallableStatement. Each accepts a visitor as an alternative to pattern-matching.
sealed trait CallableStatementOp[A] {
def visit[F[_]](v: CallableStatementOp.Visitor[F]): F[A]
}
// Free monad over CallableStatementOp.
type CallableStatementIO[A] = FF[CallableStatementOp, A]
// Module of instances and constructors of CallableStatementOp.
object CallableStatementOp {
// Given a CallableStatement we can embed a CallableStatementIO program in any algebra that understands embedding.
implicit val CallableStatementOpEmbeddable: Embeddable[CallableStatementOp, CallableStatement] =
new Embeddable[CallableStatementOp, CallableStatement] {
def embed[A](j: CallableStatement, fa: FF[CallableStatementOp, A]) = Embedded.CallableStatement(j, fa)
}
// Interface for a natural transformation CallableStatementOp ~> F encoded via the visitor pattern.
// This approach is much more efficient than pattern-matching for large algebras.
trait Visitor[F[_]] extends (CallableStatementOp ~> F) {
final def apply[A](fa: CallableStatementOp[A]): F[A] = fa.visit(this)
// Common
def raw[A](f: CallableStatement => A): F[A]
def embed[A](e: Embedded[A]): F[A]
def raiseError[A](e: Throwable): F[A]
def handleErrorWith[A](fa: CallableStatementIO[A])(f: Throwable => CallableStatementIO[A]): F[A]
def monotonic: F[FiniteDuration]
def realTime: F[FiniteDuration]
def delay[A](thunk: => A): F[A]
def suspend[A](hint: Sync.Type)(thunk: => A): F[A]
def forceR[A, B](fa: CallableStatementIO[A])(fb: CallableStatementIO[B]): F[B]
def uncancelable[A](body: Poll[CallableStatementIO] => CallableStatementIO[A]): F[A]
def poll[A](poll: Any, fa: CallableStatementIO[A]): F[A]
def canceled: F[Unit]
def onCancel[A](fa: CallableStatementIO[A], fin: CallableStatementIO[Unit]): F[A]
def fromFuture[A](fut: CallableStatementIO[Future[A]]): F[A]
// CallableStatement
def addBatch: F[Unit]
def addBatch(a: String): F[Unit]
def cancel: F[Unit]
def clearBatch: F[Unit]
def clearParameters: F[Unit]
def clearWarnings: F[Unit]
def close: F[Unit]
def closeOnCompletion: F[Unit]
def execute: F[Boolean]
def execute(a: String): F[Boolean]
def execute(a: String, b: Array[Int]): F[Boolean]
def execute(a: String, b: Array[String]): F[Boolean]
def execute(a: String, b: Int): F[Boolean]
def executeBatch: F[Array[Int]]
def executeLargeBatch: F[Array[Long]]
def executeLargeUpdate: F[Long]
def executeLargeUpdate(a: String): F[Long]
def executeLargeUpdate(a: String, b: Array[Int]): F[Long]
def executeLargeUpdate(a: String, b: Array[String]): F[Long]
def executeLargeUpdate(a: String, b: Int): F[Long]
def executeQuery: F[ResultSet]
def executeQuery(a: String): F[ResultSet]
def executeUpdate: F[Int]
def executeUpdate(a: String): F[Int]
def executeUpdate(a: String, b: Array[Int]): F[Int]
def executeUpdate(a: String, b: Array[String]): F[Int]
def executeUpdate(a: String, b: Int): F[Int]
def getArray(a: Int): F[SqlArray]
def getArray(a: String): F[SqlArray]
def getBigDecimal(a: Int): F[BigDecimal]
def getBigDecimal(a: String): F[BigDecimal]
def getBlob(a: Int): F[Blob]
def getBlob(a: String): F[Blob]
def getBoolean(a: Int): F[Boolean]
def getBoolean(a: String): F[Boolean]
def getByte(a: Int): F[Byte]
def getByte(a: String): F[Byte]
def getBytes(a: Int): F[Array[Byte]]
def getBytes(a: String): F[Array[Byte]]
def getCharacterStream(a: Int): F[Reader]
def getCharacterStream(a: String): F[Reader]
def getClob(a: Int): F[Clob]
def getClob(a: String): F[Clob]
def getConnection: F[Connection]
def getDate(a: Int): F[Date]
def getDate(a: Int, b: Calendar): F[Date]
def getDate(a: String): F[Date]
def getDate(a: String, b: Calendar): F[Date]
def getDouble(a: Int): F[Double]
def getDouble(a: String): F[Double]
def getFetchDirection: F[Int]
def getFetchSize: F[Int]
def getFloat(a: Int): F[Float]
def getFloat(a: String): F[Float]
def getGeneratedKeys: F[ResultSet]
def getInt(a: Int): F[Int]
def getInt(a: String): F[Int]
def getLargeMaxRows: F[Long]
def getLargeUpdateCount: F[Long]
def getLong(a: Int): F[Long]
def getLong(a: String): F[Long]
def getMaxFieldSize: F[Int]
def getMaxRows: F[Int]
def getMetaData: F[ResultSetMetaData]
def getMoreResults: F[Boolean]
def getMoreResults(a: Int): F[Boolean]
def getNCharacterStream(a: Int): F[Reader]
def getNCharacterStream(a: String): F[Reader]
def getNClob(a: Int): F[NClob]
def getNClob(a: String): F[NClob]
def getNString(a: Int): F[String]
def getNString(a: String): F[String]
def getObject(a: Int): F[AnyRef]
def getObject[T](a: Int, b: Class[T]): F[T]
def getObject(a: Int, b: Map[String, Class[_]]): F[AnyRef]
def getObject(a: String): F[AnyRef]
def getObject[T](a: String, b: Class[T]): F[T]
def getObject(a: String, b: Map[String, Class[_]]): F[AnyRef]
def getParameterMetaData: F[ParameterMetaData]
def getQueryTimeout: F[Int]
def getRef(a: Int): F[Ref]
def getRef(a: String): F[Ref]
def getResultSet: F[ResultSet]
def getResultSetConcurrency: F[Int]
def getResultSetHoldability: F[Int]
def getResultSetType: F[Int]
def getRowId(a: Int): F[RowId]
def getRowId(a: String): F[RowId]
def getSQLXML(a: Int): F[SQLXML]
def getSQLXML(a: String): F[SQLXML]
def getShort(a: Int): F[Short]
def getShort(a: String): F[Short]
def getString(a: Int): F[String]
def getString(a: String): F[String]
def getTime(a: Int): F[Time]
def getTime(a: Int, b: Calendar): F[Time]
def getTime(a: String): F[Time]
def getTime(a: String, b: Calendar): F[Time]
def getTimestamp(a: Int): F[Timestamp]
def getTimestamp(a: Int, b: Calendar): F[Timestamp]
def getTimestamp(a: String): F[Timestamp]
def getTimestamp(a: String, b: Calendar): F[Timestamp]
def getURL(a: Int): F[URL]
def getURL(a: String): F[URL]
def getUpdateCount: F[Int]
def getWarnings: F[SQLWarning]
def isCloseOnCompletion: F[Boolean]
def isClosed: F[Boolean]
def isPoolable: F[Boolean]
def isWrapperFor(a: Class[_]): F[Boolean]
def registerOutParameter(a: Int, b: Int): F[Unit]
def registerOutParameter(a: Int, b: Int, c: Int): F[Unit]
def registerOutParameter(a: Int, b: Int, c: String): F[Unit]
def registerOutParameter(a: Int, b: SQLType): F[Unit]
def registerOutParameter(a: Int, b: SQLType, c: Int): F[Unit]
def registerOutParameter(a: Int, b: SQLType, c: String): F[Unit]
def registerOutParameter(a: String, b: Int): F[Unit]
def registerOutParameter(a: String, b: Int, c: Int): F[Unit]
def registerOutParameter(a: String, b: Int, c: String): F[Unit]
def registerOutParameter(a: String, b: SQLType): F[Unit]
def registerOutParameter(a: String, b: SQLType, c: Int): F[Unit]
def registerOutParameter(a: String, b: SQLType, c: String): F[Unit]
def setArray(a: Int, b: SqlArray): F[Unit]
def setAsciiStream(a: Int, b: InputStream): F[Unit]
def setAsciiStream(a: Int, b: InputStream, c: Int): F[Unit]
def setAsciiStream(a: Int, b: InputStream, c: Long): F[Unit]
def setAsciiStream(a: String, b: InputStream): F[Unit]
def setAsciiStream(a: String, b: InputStream, c: Int): F[Unit]
def setAsciiStream(a: String, b: InputStream, c: Long): F[Unit]
def setBigDecimal(a: Int, b: BigDecimal): F[Unit]
def setBigDecimal(a: String, b: BigDecimal): F[Unit]
def setBinaryStream(a: Int, b: InputStream): F[Unit]
def setBinaryStream(a: Int, b: InputStream, c: Int): F[Unit]
def setBinaryStream(a: Int, b: InputStream, c: Long): F[Unit]
def setBinaryStream(a: String, b: InputStream): F[Unit]
def setBinaryStream(a: String, b: InputStream, c: Int): F[Unit]
def setBinaryStream(a: String, b: InputStream, c: Long): F[Unit]
def setBlob(a: Int, b: Blob): F[Unit]
def setBlob(a: Int, b: InputStream): F[Unit]
def setBlob(a: Int, b: InputStream, c: Long): F[Unit]
def setBlob(a: String, b: Blob): F[Unit]
def setBlob(a: String, b: InputStream): F[Unit]
def setBlob(a: String, b: InputStream, c: Long): F[Unit]
def setBoolean(a: Int, b: Boolean): F[Unit]
def setBoolean(a: String, b: Boolean): F[Unit]
def setByte(a: Int, b: Byte): F[Unit]
def setByte(a: String, b: Byte): F[Unit]
def setBytes(a: Int, b: Array[Byte]): F[Unit]
def setBytes(a: String, b: Array[Byte]): F[Unit]
def setCharacterStream(a: Int, b: Reader): F[Unit]
def setCharacterStream(a: Int, b: Reader, c: Int): F[Unit]
def setCharacterStream(a: Int, b: Reader, c: Long): F[Unit]
def setCharacterStream(a: String, b: Reader): F[Unit]
def setCharacterStream(a: String, b: Reader, c: Int): F[Unit]
def setCharacterStream(a: String, b: Reader, c: Long): F[Unit]
def setClob(a: Int, b: Clob): F[Unit]
def setClob(a: Int, b: Reader): F[Unit]
def setClob(a: Int, b: Reader, c: Long): F[Unit]
def setClob(a: String, b: Clob): F[Unit]
def setClob(a: String, b: Reader): F[Unit]
def setClob(a: String, b: Reader, c: Long): F[Unit]
def setCursorName(a: String): F[Unit]
def setDate(a: Int, b: Date): F[Unit]
def setDate(a: Int, b: Date, c: Calendar): F[Unit]
def setDate(a: String, b: Date): F[Unit]
def setDate(a: String, b: Date, c: Calendar): F[Unit]
def setDouble(a: Int, b: Double): F[Unit]
def setDouble(a: String, b: Double): F[Unit]
def setEscapeProcessing(a: Boolean): F[Unit]
def setFetchDirection(a: Int): F[Unit]
def setFetchSize(a: Int): F[Unit]
def setFloat(a: Int, b: Float): F[Unit]
def setFloat(a: String, b: Float): F[Unit]
def setInt(a: Int, b: Int): F[Unit]
def setInt(a: String, b: Int): F[Unit]
def setLargeMaxRows(a: Long): F[Unit]
def setLong(a: Int, b: Long): F[Unit]
def setLong(a: String, b: Long): F[Unit]
def setMaxFieldSize(a: Int): F[Unit]
def setMaxRows(a: Int): F[Unit]
def setNCharacterStream(a: Int, b: Reader): F[Unit]
def setNCharacterStream(a: Int, b: Reader, c: Long): F[Unit]
def setNCharacterStream(a: String, b: Reader): F[Unit]
def setNCharacterStream(a: String, b: Reader, c: Long): F[Unit]
def setNClob(a: Int, b: NClob): F[Unit]
def setNClob(a: Int, b: Reader): F[Unit]
def setNClob(a: Int, b: Reader, c: Long): F[Unit]
def setNClob(a: String, b: NClob): F[Unit]
def setNClob(a: String, b: Reader): F[Unit]
def setNClob(a: String, b: Reader, c: Long): F[Unit]
def setNString(a: Int, b: String): F[Unit]
def setNString(a: String, b: String): F[Unit]
def setNull(a: Int, b: Int): F[Unit]
def setNull(a: Int, b: Int, c: String): F[Unit]
def setNull(a: String, b: Int): F[Unit]
def setNull(a: String, b: Int, c: String): F[Unit]
def setObject(a: Int, b: AnyRef): F[Unit]
def setObject(a: Int, b: AnyRef, c: Int): F[Unit]
def setObject(a: Int, b: AnyRef, c: Int, d: Int): F[Unit]
def setObject(a: Int, b: AnyRef, c: SQLType): F[Unit]
def setObject(a: Int, b: AnyRef, c: SQLType, d: Int): F[Unit]
def setObject(a: String, b: AnyRef): F[Unit]
def setObject(a: String, b: AnyRef, c: Int): F[Unit]
def setObject(a: String, b: AnyRef, c: Int, d: Int): F[Unit]
def setObject(a: String, b: AnyRef, c: SQLType): F[Unit]
def setObject(a: String, b: AnyRef, c: SQLType, d: Int): F[Unit]
def setPoolable(a: Boolean): F[Unit]
def setQueryTimeout(a: Int): F[Unit]
def setRef(a: Int, b: Ref): F[Unit]
def setRowId(a: Int, b: RowId): F[Unit]
def setRowId(a: String, b: RowId): F[Unit]
def setSQLXML(a: Int, b: SQLXML): F[Unit]
def setSQLXML(a: String, b: SQLXML): F[Unit]
def setShort(a: Int, b: Short): F[Unit]
def setShort(a: String, b: Short): F[Unit]
def setString(a: Int, b: String): F[Unit]
def setString(a: String, b: String): F[Unit]
def setTime(a: Int, b: Time): F[Unit]
def setTime(a: Int, b: Time, c: Calendar): F[Unit]
def setTime(a: String, b: Time): F[Unit]
def setTime(a: String, b: Time, c: Calendar): F[Unit]
def setTimestamp(a: Int, b: Timestamp): F[Unit]
def setTimestamp(a: Int, b: Timestamp, c: Calendar): F[Unit]
def setTimestamp(a: String, b: Timestamp): F[Unit]
def setTimestamp(a: String, b: Timestamp, c: Calendar): F[Unit]
def setURL(a: Int, b: URL): F[Unit]
def setURL(a: String, b: URL): F[Unit]
def unwrap[T](a: Class[T]): F[T]
def wasNull: F[Boolean]
}
// Common operations for all algebras.
final case class Raw[A](f: CallableStatement => A) extends CallableStatementOp[A] {
def visit[F[_]](v: Visitor[F]) = v.raw(f)
}
final case class Embed[A](e: Embedded[A]) extends CallableStatementOp[A] {
def visit[F[_]](v: Visitor[F]) = v.embed(e)
}
final case class RaiseError[A](e: Throwable) extends CallableStatementOp[A] {
def visit[F[_]](v: Visitor[F]) = v.raiseError(e)
}
final case class HandleErrorWith[A](fa: CallableStatementIO[A], f: Throwable => CallableStatementIO[A]) extends CallableStatementOp[A] {
def visit[F[_]](v: Visitor[F]) = v.handleErrorWith(fa)(f)
}
case object Monotonic extends CallableStatementOp[FiniteDuration] {
def visit[F[_]](v: Visitor[F]) = v.monotonic
}
case object Realtime extends CallableStatementOp[FiniteDuration] {
def visit[F[_]](v: Visitor[F]) = v.realTime
}
case class Suspend[A](hint: Sync.Type, thunk: () => A) extends CallableStatementOp[A] {
def visit[F[_]](v: Visitor[F]) = v.suspend(hint)(thunk())
}
case class ForceR[A, B](fa: CallableStatementIO[A], fb: CallableStatementIO[B]) extends CallableStatementOp[B] {
def visit[F[_]](v: Visitor[F]) = v.forceR(fa)(fb)
}
case class Uncancelable[A](body: Poll[CallableStatementIO] => CallableStatementIO[A]) extends CallableStatementOp[A] {
def visit[F[_]](v: Visitor[F]) = v.uncancelable(body)
}
case class Poll1[A](poll: Any, fa: CallableStatementIO[A]) extends CallableStatementOp[A] {
def visit[F[_]](v: Visitor[F]) = v.poll(poll, fa)
}
case object Canceled extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.canceled
}
case class OnCancel[A](fa: CallableStatementIO[A], fin: CallableStatementIO[Unit]) extends CallableStatementOp[A] {
def visit[F[_]](v: Visitor[F]) = v.onCancel(fa, fin)
}
case class FromFuture[A](fut: CallableStatementIO[Future[A]]) extends CallableStatementOp[A] {
def visit[F[_]](v: Visitor[F]) = v.fromFuture(fut)
}
// CallableStatement-specific operations.
case object AddBatch extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.addBatch
}
final case class AddBatch1(a: String) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.addBatch(a)
}
case object Cancel extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.cancel
}
case object ClearBatch extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.clearBatch
}
case object ClearParameters extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.clearParameters
}
case object ClearWarnings extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.clearWarnings
}
case object Close extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.close
}
case object CloseOnCompletion extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.closeOnCompletion
}
case object Execute extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.execute
}
final case class Execute1(a: String) extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.execute(a)
}
final case class Execute2(a: String, b: Array[Int]) extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.execute(a, b)
}
final case class Execute3(a: String, b: Array[String]) extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.execute(a, b)
}
final case class Execute4(a: String, b: Int) extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.execute(a, b)
}
case object ExecuteBatch extends CallableStatementOp[Array[Int]] {
def visit[F[_]](v: Visitor[F]) = v.executeBatch
}
case object ExecuteLargeBatch extends CallableStatementOp[Array[Long]] {
def visit[F[_]](v: Visitor[F]) = v.executeLargeBatch
}
case object ExecuteLargeUpdate extends CallableStatementOp[Long] {
def visit[F[_]](v: Visitor[F]) = v.executeLargeUpdate
}
final case class ExecuteLargeUpdate1(a: String) extends CallableStatementOp[Long] {
def visit[F[_]](v: Visitor[F]) = v.executeLargeUpdate(a)
}
final case class ExecuteLargeUpdate2(a: String, b: Array[Int]) extends CallableStatementOp[Long] {
def visit[F[_]](v: Visitor[F]) = v.executeLargeUpdate(a, b)
}
final case class ExecuteLargeUpdate3(a: String, b: Array[String]) extends CallableStatementOp[Long] {
def visit[F[_]](v: Visitor[F]) = v.executeLargeUpdate(a, b)
}
final case class ExecuteLargeUpdate4(a: String, b: Int) extends CallableStatementOp[Long] {
def visit[F[_]](v: Visitor[F]) = v.executeLargeUpdate(a, b)
}
case object ExecuteQuery extends CallableStatementOp[ResultSet] {
def visit[F[_]](v: Visitor[F]) = v.executeQuery
}
final case class ExecuteQuery1(a: String) extends CallableStatementOp[ResultSet] {
def visit[F[_]](v: Visitor[F]) = v.executeQuery(a)
}
case object ExecuteUpdate extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.executeUpdate
}
final case class ExecuteUpdate1(a: String) extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.executeUpdate(a)
}
final case class ExecuteUpdate2(a: String, b: Array[Int]) extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.executeUpdate(a, b)
}
final case class ExecuteUpdate3(a: String, b: Array[String]) extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.executeUpdate(a, b)
}
final case class ExecuteUpdate4(a: String, b: Int) extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.executeUpdate(a, b)
}
final case class GetArray(a: Int) extends CallableStatementOp[SqlArray] {
def visit[F[_]](v: Visitor[F]) = v.getArray(a)
}
final case class GetArray1(a: String) extends CallableStatementOp[SqlArray] {
def visit[F[_]](v: Visitor[F]) = v.getArray(a)
}
final case class GetBigDecimal(a: Int) extends CallableStatementOp[BigDecimal] {
def visit[F[_]](v: Visitor[F]) = v.getBigDecimal(a)
}
final case class GetBigDecimal1(a: String) extends CallableStatementOp[BigDecimal] {
def visit[F[_]](v: Visitor[F]) = v.getBigDecimal(a)
}
final case class GetBlob(a: Int) extends CallableStatementOp[Blob] {
def visit[F[_]](v: Visitor[F]) = v.getBlob(a)
}
final case class GetBlob1(a: String) extends CallableStatementOp[Blob] {
def visit[F[_]](v: Visitor[F]) = v.getBlob(a)
}
final case class GetBoolean(a: Int) extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.getBoolean(a)
}
final case class GetBoolean1(a: String) extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.getBoolean(a)
}
final case class GetByte(a: Int) extends CallableStatementOp[Byte] {
def visit[F[_]](v: Visitor[F]) = v.getByte(a)
}
final case class GetByte1(a: String) extends CallableStatementOp[Byte] {
def visit[F[_]](v: Visitor[F]) = v.getByte(a)
}
final case class GetBytes(a: Int) extends CallableStatementOp[Array[Byte]] {
def visit[F[_]](v: Visitor[F]) = v.getBytes(a)
}
final case class GetBytes1(a: String) extends CallableStatementOp[Array[Byte]] {
def visit[F[_]](v: Visitor[F]) = v.getBytes(a)
}
final case class GetCharacterStream(a: Int) extends CallableStatementOp[Reader] {
def visit[F[_]](v: Visitor[F]) = v.getCharacterStream(a)
}
final case class GetCharacterStream1(a: String) extends CallableStatementOp[Reader] {
def visit[F[_]](v: Visitor[F]) = v.getCharacterStream(a)
}
final case class GetClob(a: Int) extends CallableStatementOp[Clob] {
def visit[F[_]](v: Visitor[F]) = v.getClob(a)
}
final case class GetClob1(a: String) extends CallableStatementOp[Clob] {
def visit[F[_]](v: Visitor[F]) = v.getClob(a)
}
case object GetConnection extends CallableStatementOp[Connection] {
def visit[F[_]](v: Visitor[F]) = v.getConnection
}
final case class GetDate(a: Int) extends CallableStatementOp[Date] {
def visit[F[_]](v: Visitor[F]) = v.getDate(a)
}
final case class GetDate1(a: Int, b: Calendar) extends CallableStatementOp[Date] {
def visit[F[_]](v: Visitor[F]) = v.getDate(a, b)
}
final case class GetDate2(a: String) extends CallableStatementOp[Date] {
def visit[F[_]](v: Visitor[F]) = v.getDate(a)
}
final case class GetDate3(a: String, b: Calendar) extends CallableStatementOp[Date] {
def visit[F[_]](v: Visitor[F]) = v.getDate(a, b)
}
final case class GetDouble(a: Int) extends CallableStatementOp[Double] {
def visit[F[_]](v: Visitor[F]) = v.getDouble(a)
}
final case class GetDouble1(a: String) extends CallableStatementOp[Double] {
def visit[F[_]](v: Visitor[F]) = v.getDouble(a)
}
case object GetFetchDirection extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.getFetchDirection
}
case object GetFetchSize extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.getFetchSize
}
final case class GetFloat(a: Int) extends CallableStatementOp[Float] {
def visit[F[_]](v: Visitor[F]) = v.getFloat(a)
}
final case class GetFloat1(a: String) extends CallableStatementOp[Float] {
def visit[F[_]](v: Visitor[F]) = v.getFloat(a)
}
case object GetGeneratedKeys extends CallableStatementOp[ResultSet] {
def visit[F[_]](v: Visitor[F]) = v.getGeneratedKeys
}
final case class GetInt(a: Int) extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.getInt(a)
}
final case class GetInt1(a: String) extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.getInt(a)
}
case object GetLargeMaxRows extends CallableStatementOp[Long] {
def visit[F[_]](v: Visitor[F]) = v.getLargeMaxRows
}
case object GetLargeUpdateCount extends CallableStatementOp[Long] {
def visit[F[_]](v: Visitor[F]) = v.getLargeUpdateCount
}
final case class GetLong(a: Int) extends CallableStatementOp[Long] {
def visit[F[_]](v: Visitor[F]) = v.getLong(a)
}
final case class GetLong1(a: String) extends CallableStatementOp[Long] {
def visit[F[_]](v: Visitor[F]) = v.getLong(a)
}
case object GetMaxFieldSize extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.getMaxFieldSize
}
case object GetMaxRows extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.getMaxRows
}
case object GetMetaData extends CallableStatementOp[ResultSetMetaData] {
def visit[F[_]](v: Visitor[F]) = v.getMetaData
}
case object GetMoreResults extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.getMoreResults
}
final case class GetMoreResults1(a: Int) extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.getMoreResults(a)
}
final case class GetNCharacterStream(a: Int) extends CallableStatementOp[Reader] {
def visit[F[_]](v: Visitor[F]) = v.getNCharacterStream(a)
}
final case class GetNCharacterStream1(a: String) extends CallableStatementOp[Reader] {
def visit[F[_]](v: Visitor[F]) = v.getNCharacterStream(a)
}
final case class GetNClob(a: Int) extends CallableStatementOp[NClob] {
def visit[F[_]](v: Visitor[F]) = v.getNClob(a)
}
final case class GetNClob1(a: String) extends CallableStatementOp[NClob] {
def visit[F[_]](v: Visitor[F]) = v.getNClob(a)
}
final case class GetNString(a: Int) extends CallableStatementOp[String] {
def visit[F[_]](v: Visitor[F]) = v.getNString(a)
}
final case class GetNString1(a: String) extends CallableStatementOp[String] {
def visit[F[_]](v: Visitor[F]) = v.getNString(a)
}
final case class GetObject(a: Int) extends CallableStatementOp[AnyRef] {
def visit[F[_]](v: Visitor[F]) = v.getObject(a)
}
final case class GetObject1[T](a: Int, b: Class[T]) extends CallableStatementOp[T] {
def visit[F[_]](v: Visitor[F]) = v.getObject(a, b)
}
final case class GetObject2(a: Int, b: Map[String, Class[_]]) extends CallableStatementOp[AnyRef] {
def visit[F[_]](v: Visitor[F]) = v.getObject(a, b)
}
final case class GetObject3(a: String) extends CallableStatementOp[AnyRef] {
def visit[F[_]](v: Visitor[F]) = v.getObject(a)
}
final case class GetObject4[T](a: String, b: Class[T]) extends CallableStatementOp[T] {
def visit[F[_]](v: Visitor[F]) = v.getObject(a, b)
}
final case class GetObject5(a: String, b: Map[String, Class[_]]) extends CallableStatementOp[AnyRef] {
def visit[F[_]](v: Visitor[F]) = v.getObject(a, b)
}
case object GetParameterMetaData extends CallableStatementOp[ParameterMetaData] {
def visit[F[_]](v: Visitor[F]) = v.getParameterMetaData
}
case object GetQueryTimeout extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.getQueryTimeout
}
final case class GetRef(a: Int) extends CallableStatementOp[Ref] {
def visit[F[_]](v: Visitor[F]) = v.getRef(a)
}
final case class GetRef1(a: String) extends CallableStatementOp[Ref] {
def visit[F[_]](v: Visitor[F]) = v.getRef(a)
}
case object GetResultSet extends CallableStatementOp[ResultSet] {
def visit[F[_]](v: Visitor[F]) = v.getResultSet
}
case object GetResultSetConcurrency extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.getResultSetConcurrency
}
case object GetResultSetHoldability extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.getResultSetHoldability
}
case object GetResultSetType extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.getResultSetType
}
final case class GetRowId(a: Int) extends CallableStatementOp[RowId] {
def visit[F[_]](v: Visitor[F]) = v.getRowId(a)
}
final case class GetRowId1(a: String) extends CallableStatementOp[RowId] {
def visit[F[_]](v: Visitor[F]) = v.getRowId(a)
}
final case class GetSQLXML(a: Int) extends CallableStatementOp[SQLXML] {
def visit[F[_]](v: Visitor[F]) = v.getSQLXML(a)
}
final case class GetSQLXML1(a: String) extends CallableStatementOp[SQLXML] {
def visit[F[_]](v: Visitor[F]) = v.getSQLXML(a)
}
final case class GetShort(a: Int) extends CallableStatementOp[Short] {
def visit[F[_]](v: Visitor[F]) = v.getShort(a)
}
final case class GetShort1(a: String) extends CallableStatementOp[Short] {
def visit[F[_]](v: Visitor[F]) = v.getShort(a)
}
final case class GetString(a: Int) extends CallableStatementOp[String] {
def visit[F[_]](v: Visitor[F]) = v.getString(a)
}
final case class GetString1(a: String) extends CallableStatementOp[String] {
def visit[F[_]](v: Visitor[F]) = v.getString(a)
}
final case class GetTime(a: Int) extends CallableStatementOp[Time] {
def visit[F[_]](v: Visitor[F]) = v.getTime(a)
}
final case class GetTime1(a: Int, b: Calendar) extends CallableStatementOp[Time] {
def visit[F[_]](v: Visitor[F]) = v.getTime(a, b)
}
final case class GetTime2(a: String) extends CallableStatementOp[Time] {
def visit[F[_]](v: Visitor[F]) = v.getTime(a)
}
final case class GetTime3(a: String, b: Calendar) extends CallableStatementOp[Time] {
def visit[F[_]](v: Visitor[F]) = v.getTime(a, b)
}
final case class GetTimestamp(a: Int) extends CallableStatementOp[Timestamp] {
def visit[F[_]](v: Visitor[F]) = v.getTimestamp(a)
}
final case class GetTimestamp1(a: Int, b: Calendar) extends CallableStatementOp[Timestamp] {
def visit[F[_]](v: Visitor[F]) = v.getTimestamp(a, b)
}
final case class GetTimestamp2(a: String) extends CallableStatementOp[Timestamp] {
def visit[F[_]](v: Visitor[F]) = v.getTimestamp(a)
}
final case class GetTimestamp3(a: String, b: Calendar) extends CallableStatementOp[Timestamp] {
def visit[F[_]](v: Visitor[F]) = v.getTimestamp(a, b)
}
final case class GetURL(a: Int) extends CallableStatementOp[URL] {
def visit[F[_]](v: Visitor[F]) = v.getURL(a)
}
final case class GetURL1(a: String) extends CallableStatementOp[URL] {
def visit[F[_]](v: Visitor[F]) = v.getURL(a)
}
case object GetUpdateCount extends CallableStatementOp[Int] {
def visit[F[_]](v: Visitor[F]) = v.getUpdateCount
}
case object GetWarnings extends CallableStatementOp[SQLWarning] {
def visit[F[_]](v: Visitor[F]) = v.getWarnings
}
case object IsCloseOnCompletion extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.isCloseOnCompletion
}
case object IsClosed extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.isClosed
}
case object IsPoolable extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.isPoolable
}
final case class IsWrapperFor(a: Class[_]) extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.isWrapperFor(a)
}
final case class RegisterOutParameter(a: Int, b: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.registerOutParameter(a, b)
}
final case class RegisterOutParameter1(a: Int, b: Int, c: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.registerOutParameter(a, b, c)
}
final case class RegisterOutParameter2(a: Int, b: Int, c: String) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.registerOutParameter(a, b, c)
}
final case class RegisterOutParameter3(a: Int, b: SQLType) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.registerOutParameter(a, b)
}
final case class RegisterOutParameter4(a: Int, b: SQLType, c: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.registerOutParameter(a, b, c)
}
final case class RegisterOutParameter5(a: Int, b: SQLType, c: String) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.registerOutParameter(a, b, c)
}
final case class RegisterOutParameter6(a: String, b: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.registerOutParameter(a, b)
}
final case class RegisterOutParameter7(a: String, b: Int, c: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.registerOutParameter(a, b, c)
}
final case class RegisterOutParameter8(a: String, b: Int, c: String) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.registerOutParameter(a, b, c)
}
final case class RegisterOutParameter9(a: String, b: SQLType) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.registerOutParameter(a, b)
}
final case class RegisterOutParameter10(a: String, b: SQLType, c: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.registerOutParameter(a, b, c)
}
final case class RegisterOutParameter11(a: String, b: SQLType, c: String) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.registerOutParameter(a, b, c)
}
final case class SetArray(a: Int, b: SqlArray) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setArray(a, b)
}
final case class SetAsciiStream(a: Int, b: InputStream) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setAsciiStream(a, b)
}
final case class SetAsciiStream1(a: Int, b: InputStream, c: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setAsciiStream(a, b, c)
}
final case class SetAsciiStream2(a: Int, b: InputStream, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setAsciiStream(a, b, c)
}
final case class SetAsciiStream3(a: String, b: InputStream) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setAsciiStream(a, b)
}
final case class SetAsciiStream4(a: String, b: InputStream, c: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setAsciiStream(a, b, c)
}
final case class SetAsciiStream5(a: String, b: InputStream, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setAsciiStream(a, b, c)
}
final case class SetBigDecimal(a: Int, b: BigDecimal) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBigDecimal(a, b)
}
final case class SetBigDecimal1(a: String, b: BigDecimal) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBigDecimal(a, b)
}
final case class SetBinaryStream(a: Int, b: InputStream) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBinaryStream(a, b)
}
final case class SetBinaryStream1(a: Int, b: InputStream, c: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBinaryStream(a, b, c)
}
final case class SetBinaryStream2(a: Int, b: InputStream, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBinaryStream(a, b, c)
}
final case class SetBinaryStream3(a: String, b: InputStream) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBinaryStream(a, b)
}
final case class SetBinaryStream4(a: String, b: InputStream, c: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBinaryStream(a, b, c)
}
final case class SetBinaryStream5(a: String, b: InputStream, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBinaryStream(a, b, c)
}
final case class SetBlob(a: Int, b: Blob) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBlob(a, b)
}
final case class SetBlob1(a: Int, b: InputStream) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBlob(a, b)
}
final case class SetBlob2(a: Int, b: InputStream, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBlob(a, b, c)
}
final case class SetBlob3(a: String, b: Blob) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBlob(a, b)
}
final case class SetBlob4(a: String, b: InputStream) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBlob(a, b)
}
final case class SetBlob5(a: String, b: InputStream, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBlob(a, b, c)
}
final case class SetBoolean(a: Int, b: Boolean) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBoolean(a, b)
}
final case class SetBoolean1(a: String, b: Boolean) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBoolean(a, b)
}
final case class SetByte(a: Int, b: Byte) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setByte(a, b)
}
final case class SetByte1(a: String, b: Byte) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setByte(a, b)
}
final case class SetBytes(a: Int, b: Array[Byte]) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBytes(a, b)
}
final case class SetBytes1(a: String, b: Array[Byte]) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setBytes(a, b)
}
final case class SetCharacterStream(a: Int, b: Reader) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setCharacterStream(a, b)
}
final case class SetCharacterStream1(a: Int, b: Reader, c: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setCharacterStream(a, b, c)
}
final case class SetCharacterStream2(a: Int, b: Reader, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setCharacterStream(a, b, c)
}
final case class SetCharacterStream3(a: String, b: Reader) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setCharacterStream(a, b)
}
final case class SetCharacterStream4(a: String, b: Reader, c: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setCharacterStream(a, b, c)
}
final case class SetCharacterStream5(a: String, b: Reader, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setCharacterStream(a, b, c)
}
final case class SetClob(a: Int, b: Clob) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setClob(a, b)
}
final case class SetClob1(a: Int, b: Reader) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setClob(a, b)
}
final case class SetClob2(a: Int, b: Reader, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setClob(a, b, c)
}
final case class SetClob3(a: String, b: Clob) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setClob(a, b)
}
final case class SetClob4(a: String, b: Reader) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setClob(a, b)
}
final case class SetClob5(a: String, b: Reader, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setClob(a, b, c)
}
final case class SetCursorName(a: String) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setCursorName(a)
}
final case class SetDate(a: Int, b: Date) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setDate(a, b)
}
final case class SetDate1(a: Int, b: Date, c: Calendar) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setDate(a, b, c)
}
final case class SetDate2(a: String, b: Date) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setDate(a, b)
}
final case class SetDate3(a: String, b: Date, c: Calendar) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setDate(a, b, c)
}
final case class SetDouble(a: Int, b: Double) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setDouble(a, b)
}
final case class SetDouble1(a: String, b: Double) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setDouble(a, b)
}
final case class SetEscapeProcessing(a: Boolean) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setEscapeProcessing(a)
}
final case class SetFetchDirection(a: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setFetchDirection(a)
}
final case class SetFetchSize(a: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setFetchSize(a)
}
final case class SetFloat(a: Int, b: Float) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setFloat(a, b)
}
final case class SetFloat1(a: String, b: Float) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setFloat(a, b)
}
final case class SetInt(a: Int, b: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setInt(a, b)
}
final case class SetInt1(a: String, b: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setInt(a, b)
}
final case class SetLargeMaxRows(a: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setLargeMaxRows(a)
}
final case class SetLong(a: Int, b: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setLong(a, b)
}
final case class SetLong1(a: String, b: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setLong(a, b)
}
final case class SetMaxFieldSize(a: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setMaxFieldSize(a)
}
final case class SetMaxRows(a: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setMaxRows(a)
}
final case class SetNCharacterStream(a: Int, b: Reader) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNCharacterStream(a, b)
}
final case class SetNCharacterStream1(a: Int, b: Reader, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNCharacterStream(a, b, c)
}
final case class SetNCharacterStream2(a: String, b: Reader) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNCharacterStream(a, b)
}
final case class SetNCharacterStream3(a: String, b: Reader, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNCharacterStream(a, b, c)
}
final case class SetNClob(a: Int, b: NClob) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNClob(a, b)
}
final case class SetNClob1(a: Int, b: Reader) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNClob(a, b)
}
final case class SetNClob2(a: Int, b: Reader, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNClob(a, b, c)
}
final case class SetNClob3(a: String, b: NClob) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNClob(a, b)
}
final case class SetNClob4(a: String, b: Reader) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNClob(a, b)
}
final case class SetNClob5(a: String, b: Reader, c: Long) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNClob(a, b, c)
}
final case class SetNString(a: Int, b: String) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNString(a, b)
}
final case class SetNString1(a: String, b: String) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNString(a, b)
}
final case class SetNull(a: Int, b: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNull(a, b)
}
final case class SetNull1(a: Int, b: Int, c: String) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNull(a, b, c)
}
final case class SetNull2(a: String, b: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNull(a, b)
}
final case class SetNull3(a: String, b: Int, c: String) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setNull(a, b, c)
}
final case class SetObject(a: Int, b: AnyRef) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setObject(a, b)
}
final case class SetObject1(a: Int, b: AnyRef, c: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setObject(a, b, c)
}
final case class SetObject2(a: Int, b: AnyRef, c: Int, d: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setObject(a, b, c, d)
}
final case class SetObject3(a: Int, b: AnyRef, c: SQLType) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setObject(a, b, c)
}
final case class SetObject4(a: Int, b: AnyRef, c: SQLType, d: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setObject(a, b, c, d)
}
final case class SetObject5(a: String, b: AnyRef) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setObject(a, b)
}
final case class SetObject6(a: String, b: AnyRef, c: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setObject(a, b, c)
}
final case class SetObject7(a: String, b: AnyRef, c: Int, d: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setObject(a, b, c, d)
}
final case class SetObject8(a: String, b: AnyRef, c: SQLType) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setObject(a, b, c)
}
final case class SetObject9(a: String, b: AnyRef, c: SQLType, d: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setObject(a, b, c, d)
}
final case class SetPoolable(a: Boolean) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setPoolable(a)
}
final case class SetQueryTimeout(a: Int) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setQueryTimeout(a)
}
final case class SetRef(a: Int, b: Ref) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setRef(a, b)
}
final case class SetRowId(a: Int, b: RowId) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setRowId(a, b)
}
final case class SetRowId1(a: String, b: RowId) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setRowId(a, b)
}
final case class SetSQLXML(a: Int, b: SQLXML) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setSQLXML(a, b)
}
final case class SetSQLXML1(a: String, b: SQLXML) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setSQLXML(a, b)
}
final case class SetShort(a: Int, b: Short) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setShort(a, b)
}
final case class SetShort1(a: String, b: Short) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setShort(a, b)
}
final case class SetString(a: Int, b: String) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setString(a, b)
}
final case class SetString1(a: String, b: String) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setString(a, b)
}
final case class SetTime(a: Int, b: Time) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setTime(a, b)
}
final case class SetTime1(a: Int, b: Time, c: Calendar) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setTime(a, b, c)
}
final case class SetTime2(a: String, b: Time) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setTime(a, b)
}
final case class SetTime3(a: String, b: Time, c: Calendar) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setTime(a, b, c)
}
final case class SetTimestamp(a: Int, b: Timestamp) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setTimestamp(a, b)
}
final case class SetTimestamp1(a: Int, b: Timestamp, c: Calendar) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setTimestamp(a, b, c)
}
final case class SetTimestamp2(a: String, b: Timestamp) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setTimestamp(a, b)
}
final case class SetTimestamp3(a: String, b: Timestamp, c: Calendar) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setTimestamp(a, b, c)
}
final case class SetURL(a: Int, b: URL) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setURL(a, b)
}
final case class SetURL1(a: String, b: URL) extends CallableStatementOp[Unit] {
def visit[F[_]](v: Visitor[F]) = v.setURL(a, b)
}
final case class Unwrap[T](a: Class[T]) extends CallableStatementOp[T] {
def visit[F[_]](v: Visitor[F]) = v.unwrap(a)
}
case object WasNull extends CallableStatementOp[Boolean] {
def visit[F[_]](v: Visitor[F]) = v.wasNull
}
}
import CallableStatementOp._
// Smart constructors for operations common to all algebras.
val unit: CallableStatementIO[Unit] = FF.pure[CallableStatementOp, Unit](())
def pure[A](a: A): CallableStatementIO[A] = FF.pure[CallableStatementOp, A](a)
def raw[A](f: CallableStatement => A): CallableStatementIO[A] = FF.liftF(Raw(f))
def embed[F[_], J, A](j: J, fa: FF[F, A])(implicit ev: Embeddable[F, J]): FF[CallableStatementOp, A] = FF.liftF(Embed(ev.embed(j, fa)))
def raiseError[A](err: Throwable): CallableStatementIO[A] = FF.liftF[CallableStatementOp, A](RaiseError(err))
def handleErrorWith[A](fa: CallableStatementIO[A])(f: Throwable => CallableStatementIO[A]): CallableStatementIO[A] = FF.liftF[CallableStatementOp, A](HandleErrorWith(fa, f))
val monotonic = FF.liftF[CallableStatementOp, FiniteDuration](Monotonic)
val realtime = FF.liftF[CallableStatementOp, FiniteDuration](Realtime)
def delay[A](thunk: => A) = FF.liftF[CallableStatementOp, A](Suspend(Sync.Type.Delay, () => thunk))
def suspend[A](hint: Sync.Type)(thunk: => A) = FF.liftF[CallableStatementOp, A](Suspend(hint, () => thunk))
def forceR[A, B](fa: CallableStatementIO[A])(fb: CallableStatementIO[B]) = FF.liftF[CallableStatementOp, B](ForceR(fa, fb))
def uncancelable[A](body: Poll[CallableStatementIO] => CallableStatementIO[A]) = FF.liftF[CallableStatementOp, A](Uncancelable(body))
def capturePoll[M[_]](mpoll: Poll[M]) = new Poll[CallableStatementIO] {
def apply[A](fa: CallableStatementIO[A]) = FF.liftF[CallableStatementOp, A](Poll1(mpoll, fa))
}
val canceled = FF.liftF[CallableStatementOp, Unit](Canceled)
def onCancel[A](fa: CallableStatementIO[A], fin: CallableStatementIO[Unit]) = FF.liftF[CallableStatementOp, A](OnCancel(fa, fin))
def fromFuture[A](fut: CallableStatementIO[Future[A]]) = FF.liftF[CallableStatementOp, A](FromFuture(fut))
// Smart constructors for CallableStatement-specific operations.
val addBatch: CallableStatementIO[Unit] = FF.liftF(AddBatch)
def addBatch(a: String): CallableStatementIO[Unit] = FF.liftF(AddBatch1(a))
val cancel: CallableStatementIO[Unit] = FF.liftF(Cancel)
val clearBatch: CallableStatementIO[Unit] = FF.liftF(ClearBatch)
val clearParameters: CallableStatementIO[Unit] = FF.liftF(ClearParameters)
val clearWarnings: CallableStatementIO[Unit] = FF.liftF(ClearWarnings)
val close: CallableStatementIO[Unit] = FF.liftF(Close)
val closeOnCompletion: CallableStatementIO[Unit] = FF.liftF(CloseOnCompletion)
val execute: CallableStatementIO[Boolean] = FF.liftF(Execute)
def execute(a: String): CallableStatementIO[Boolean] = FF.liftF(Execute1(a))
def execute(a: String, b: Array[Int]): CallableStatementIO[Boolean] = FF.liftF(Execute2(a, b))
def execute(a: String, b: Array[String]): CallableStatementIO[Boolean] = FF.liftF(Execute3(a, b))
def execute(a: String, b: Int): CallableStatementIO[Boolean] = FF.liftF(Execute4(a, b))
val executeBatch: CallableStatementIO[Array[Int]] = FF.liftF(ExecuteBatch)
val executeLargeBatch: CallableStatementIO[Array[Long]] = FF.liftF(ExecuteLargeBatch)
val executeLargeUpdate: CallableStatementIO[Long] = FF.liftF(ExecuteLargeUpdate)
def executeLargeUpdate(a: String): CallableStatementIO[Long] = FF.liftF(ExecuteLargeUpdate1(a))
def executeLargeUpdate(a: String, b: Array[Int]): CallableStatementIO[Long] = FF.liftF(ExecuteLargeUpdate2(a, b))
def executeLargeUpdate(a: String, b: Array[String]): CallableStatementIO[Long] = FF.liftF(ExecuteLargeUpdate3(a, b))
def executeLargeUpdate(a: String, b: Int): CallableStatementIO[Long] = FF.liftF(ExecuteLargeUpdate4(a, b))
val executeQuery: CallableStatementIO[ResultSet] = FF.liftF(ExecuteQuery)
def executeQuery(a: String): CallableStatementIO[ResultSet] = FF.liftF(ExecuteQuery1(a))
val executeUpdate: CallableStatementIO[Int] = FF.liftF(ExecuteUpdate)
def executeUpdate(a: String): CallableStatementIO[Int] = FF.liftF(ExecuteUpdate1(a))
def executeUpdate(a: String, b: Array[Int]): CallableStatementIO[Int] = FF.liftF(ExecuteUpdate2(a, b))
def executeUpdate(a: String, b: Array[String]): CallableStatementIO[Int] = FF.liftF(ExecuteUpdate3(a, b))
def executeUpdate(a: String, b: Int): CallableStatementIO[Int] = FF.liftF(ExecuteUpdate4(a, b))
def getArray(a: Int): CallableStatementIO[SqlArray] = FF.liftF(GetArray(a))
def getArray(a: String): CallableStatementIO[SqlArray] = FF.liftF(GetArray1(a))
def getBigDecimal(a: Int): CallableStatementIO[BigDecimal] = FF.liftF(GetBigDecimal(a))
def getBigDecimal(a: String): CallableStatementIO[BigDecimal] = FF.liftF(GetBigDecimal1(a))
def getBlob(a: Int): CallableStatementIO[Blob] = FF.liftF(GetBlob(a))
def getBlob(a: String): CallableStatementIO[Blob] = FF.liftF(GetBlob1(a))
def getBoolean(a: Int): CallableStatementIO[Boolean] = FF.liftF(GetBoolean(a))
def getBoolean(a: String): CallableStatementIO[Boolean] = FF.liftF(GetBoolean1(a))
def getByte(a: Int): CallableStatementIO[Byte] = FF.liftF(GetByte(a))
def getByte(a: String): CallableStatementIO[Byte] = FF.liftF(GetByte1(a))
def getBytes(a: Int): CallableStatementIO[Array[Byte]] = FF.liftF(GetBytes(a))
def getBytes(a: String): CallableStatementIO[Array[Byte]] = FF.liftF(GetBytes1(a))
def getCharacterStream(a: Int): CallableStatementIO[Reader] = FF.liftF(GetCharacterStream(a))
def getCharacterStream(a: String): CallableStatementIO[Reader] = FF.liftF(GetCharacterStream1(a))
def getClob(a: Int): CallableStatementIO[Clob] = FF.liftF(GetClob(a))
def getClob(a: String): CallableStatementIO[Clob] = FF.liftF(GetClob1(a))
val getConnection: CallableStatementIO[Connection] = FF.liftF(GetConnection)
def getDate(a: Int): CallableStatementIO[Date] = FF.liftF(GetDate(a))
def getDate(a: Int, b: Calendar): CallableStatementIO[Date] = FF.liftF(GetDate1(a, b))
def getDate(a: String): CallableStatementIO[Date] = FF.liftF(GetDate2(a))
def getDate(a: String, b: Calendar): CallableStatementIO[Date] = FF.liftF(GetDate3(a, b))
def getDouble(a: Int): CallableStatementIO[Double] = FF.liftF(GetDouble(a))
def getDouble(a: String): CallableStatementIO[Double] = FF.liftF(GetDouble1(a))
val getFetchDirection: CallableStatementIO[Int] = FF.liftF(GetFetchDirection)
val getFetchSize: CallableStatementIO[Int] = FF.liftF(GetFetchSize)
def getFloat(a: Int): CallableStatementIO[Float] = FF.liftF(GetFloat(a))
def getFloat(a: String): CallableStatementIO[Float] = FF.liftF(GetFloat1(a))
val getGeneratedKeys: CallableStatementIO[ResultSet] = FF.liftF(GetGeneratedKeys)
def getInt(a: Int): CallableStatementIO[Int] = FF.liftF(GetInt(a))
def getInt(a: String): CallableStatementIO[Int] = FF.liftF(GetInt1(a))
val getLargeMaxRows: CallableStatementIO[Long] = FF.liftF(GetLargeMaxRows)
val getLargeUpdateCount: CallableStatementIO[Long] = FF.liftF(GetLargeUpdateCount)
def getLong(a: Int): CallableStatementIO[Long] = FF.liftF(GetLong(a))
def getLong(a: String): CallableStatementIO[Long] = FF.liftF(GetLong1(a))
val getMaxFieldSize: CallableStatementIO[Int] = FF.liftF(GetMaxFieldSize)
val getMaxRows: CallableStatementIO[Int] = FF.liftF(GetMaxRows)
val getMetaData: CallableStatementIO[ResultSetMetaData] = FF.liftF(GetMetaData)
val getMoreResults: CallableStatementIO[Boolean] = FF.liftF(GetMoreResults)
def getMoreResults(a: Int): CallableStatementIO[Boolean] = FF.liftF(GetMoreResults1(a))
def getNCharacterStream(a: Int): CallableStatementIO[Reader] = FF.liftF(GetNCharacterStream(a))
def getNCharacterStream(a: String): CallableStatementIO[Reader] = FF.liftF(GetNCharacterStream1(a))
def getNClob(a: Int): CallableStatementIO[NClob] = FF.liftF(GetNClob(a))
def getNClob(a: String): CallableStatementIO[NClob] = FF.liftF(GetNClob1(a))
def getNString(a: Int): CallableStatementIO[String] = FF.liftF(GetNString(a))
def getNString(a: String): CallableStatementIO[String] = FF.liftF(GetNString1(a))
def getObject(a: Int): CallableStatementIO[AnyRef] = FF.liftF(GetObject(a))
def getObject[T](a: Int, b: Class[T]): CallableStatementIO[T] = FF.liftF(GetObject1(a, b))
def getObject(a: Int, b: Map[String, Class[_]]): CallableStatementIO[AnyRef] = FF.liftF(GetObject2(a, b))
def getObject(a: String): CallableStatementIO[AnyRef] = FF.liftF(GetObject3(a))
def getObject[T](a: String, b: Class[T]): CallableStatementIO[T] = FF.liftF(GetObject4(a, b))
def getObject(a: String, b: Map[String, Class[_]]): CallableStatementIO[AnyRef] = FF.liftF(GetObject5(a, b))
val getParameterMetaData: CallableStatementIO[ParameterMetaData] = FF.liftF(GetParameterMetaData)
val getQueryTimeout: CallableStatementIO[Int] = FF.liftF(GetQueryTimeout)
def getRef(a: Int): CallableStatementIO[Ref] = FF.liftF(GetRef(a))
def getRef(a: String): CallableStatementIO[Ref] = FF.liftF(GetRef1(a))
val getResultSet: CallableStatementIO[ResultSet] = FF.liftF(GetResultSet)
val getResultSetConcurrency: CallableStatementIO[Int] = FF.liftF(GetResultSetConcurrency)
val getResultSetHoldability: CallableStatementIO[Int] = FF.liftF(GetResultSetHoldability)
val getResultSetType: CallableStatementIO[Int] = FF.liftF(GetResultSetType)
def getRowId(a: Int): CallableStatementIO[RowId] = FF.liftF(GetRowId(a))
def getRowId(a: String): CallableStatementIO[RowId] = FF.liftF(GetRowId1(a))
def getSQLXML(a: Int): CallableStatementIO[SQLXML] = FF.liftF(GetSQLXML(a))
def getSQLXML(a: String): CallableStatementIO[SQLXML] = FF.liftF(GetSQLXML1(a))
def getShort(a: Int): CallableStatementIO[Short] = FF.liftF(GetShort(a))
def getShort(a: String): CallableStatementIO[Short] = FF.liftF(GetShort1(a))
def getString(a: Int): CallableStatementIO[String] = FF.liftF(GetString(a))
def getString(a: String): CallableStatementIO[String] = FF.liftF(GetString1(a))
def getTime(a: Int): CallableStatementIO[Time] = FF.liftF(GetTime(a))
def getTime(a: Int, b: Calendar): CallableStatementIO[Time] = FF.liftF(GetTime1(a, b))
def getTime(a: String): CallableStatementIO[Time] = FF.liftF(GetTime2(a))
def getTime(a: String, b: Calendar): CallableStatementIO[Time] = FF.liftF(GetTime3(a, b))
def getTimestamp(a: Int): CallableStatementIO[Timestamp] = FF.liftF(GetTimestamp(a))
def getTimestamp(a: Int, b: Calendar): CallableStatementIO[Timestamp] = FF.liftF(GetTimestamp1(a, b))
def getTimestamp(a: String): CallableStatementIO[Timestamp] = FF.liftF(GetTimestamp2(a))
def getTimestamp(a: String, b: Calendar): CallableStatementIO[Timestamp] = FF.liftF(GetTimestamp3(a, b))
def getURL(a: Int): CallableStatementIO[URL] = FF.liftF(GetURL(a))
def getURL(a: String): CallableStatementIO[URL] = FF.liftF(GetURL1(a))
val getUpdateCount: CallableStatementIO[Int] = FF.liftF(GetUpdateCount)
val getWarnings: CallableStatementIO[SQLWarning] = FF.liftF(GetWarnings)
val isCloseOnCompletion: CallableStatementIO[Boolean] = FF.liftF(IsCloseOnCompletion)
val isClosed: CallableStatementIO[Boolean] = FF.liftF(IsClosed)
val isPoolable: CallableStatementIO[Boolean] = FF.liftF(IsPoolable)
def isWrapperFor(a: Class[_]): CallableStatementIO[Boolean] = FF.liftF(IsWrapperFor(a))
def registerOutParameter(a: Int, b: Int): CallableStatementIO[Unit] = FF.liftF(RegisterOutParameter(a, b))
def registerOutParameter(a: Int, b: Int, c: Int): CallableStatementIO[Unit] = FF.liftF(RegisterOutParameter1(a, b, c))
def registerOutParameter(a: Int, b: Int, c: String): CallableStatementIO[Unit] = FF.liftF(RegisterOutParameter2(a, b, c))
def registerOutParameter(a: Int, b: SQLType): CallableStatementIO[Unit] = FF.liftF(RegisterOutParameter3(a, b))
def registerOutParameter(a: Int, b: SQLType, c: Int): CallableStatementIO[Unit] = FF.liftF(RegisterOutParameter4(a, b, c))
def registerOutParameter(a: Int, b: SQLType, c: String): CallableStatementIO[Unit] = FF.liftF(RegisterOutParameter5(a, b, c))
def registerOutParameter(a: String, b: Int): CallableStatementIO[Unit] = FF.liftF(RegisterOutParameter6(a, b))
def registerOutParameter(a: String, b: Int, c: Int): CallableStatementIO[Unit] = FF.liftF(RegisterOutParameter7(a, b, c))
def registerOutParameter(a: String, b: Int, c: String): CallableStatementIO[Unit] = FF.liftF(RegisterOutParameter8(a, b, c))
def registerOutParameter(a: String, b: SQLType): CallableStatementIO[Unit] = FF.liftF(RegisterOutParameter9(a, b))
def registerOutParameter(a: String, b: SQLType, c: Int): CallableStatementIO[Unit] = FF.liftF(RegisterOutParameter10(a, b, c))
def registerOutParameter(a: String, b: SQLType, c: String): CallableStatementIO[Unit] = FF.liftF(RegisterOutParameter11(a, b, c))
def setArray(a: Int, b: SqlArray): CallableStatementIO[Unit] = FF.liftF(SetArray(a, b))
def setAsciiStream(a: Int, b: InputStream): CallableStatementIO[Unit] = FF.liftF(SetAsciiStream(a, b))
def setAsciiStream(a: Int, b: InputStream, c: Int): CallableStatementIO[Unit] = FF.liftF(SetAsciiStream1(a, b, c))
def setAsciiStream(a: Int, b: InputStream, c: Long): CallableStatementIO[Unit] = FF.liftF(SetAsciiStream2(a, b, c))
def setAsciiStream(a: String, b: InputStream): CallableStatementIO[Unit] = FF.liftF(SetAsciiStream3(a, b))
def setAsciiStream(a: String, b: InputStream, c: Int): CallableStatementIO[Unit] = FF.liftF(SetAsciiStream4(a, b, c))
def setAsciiStream(a: String, b: InputStream, c: Long): CallableStatementIO[Unit] = FF.liftF(SetAsciiStream5(a, b, c))
def setBigDecimal(a: Int, b: BigDecimal): CallableStatementIO[Unit] = FF.liftF(SetBigDecimal(a, b))
def setBigDecimal(a: String, b: BigDecimal): CallableStatementIO[Unit] = FF.liftF(SetBigDecimal1(a, b))
def setBinaryStream(a: Int, b: InputStream): CallableStatementIO[Unit] = FF.liftF(SetBinaryStream(a, b))
def setBinaryStream(a: Int, b: InputStream, c: Int): CallableStatementIO[Unit] = FF.liftF(SetBinaryStream1(a, b, c))
def setBinaryStream(a: Int, b: InputStream, c: Long): CallableStatementIO[Unit] = FF.liftF(SetBinaryStream2(a, b, c))
def setBinaryStream(a: String, b: InputStream): CallableStatementIO[Unit] = FF.liftF(SetBinaryStream3(a, b))
def setBinaryStream(a: String, b: InputStream, c: Int): CallableStatementIO[Unit] = FF.liftF(SetBinaryStream4(a, b, c))
def setBinaryStream(a: String, b: InputStream, c: Long): CallableStatementIO[Unit] = FF.liftF(SetBinaryStream5(a, b, c))
def setBlob(a: Int, b: Blob): CallableStatementIO[Unit] = FF.liftF(SetBlob(a, b))
def setBlob(a: Int, b: InputStream): CallableStatementIO[Unit] = FF.liftF(SetBlob1(a, b))
def setBlob(a: Int, b: InputStream, c: Long): CallableStatementIO[Unit] = FF.liftF(SetBlob2(a, b, c))
def setBlob(a: String, b: Blob): CallableStatementIO[Unit] = FF.liftF(SetBlob3(a, b))
def setBlob(a: String, b: InputStream): CallableStatementIO[Unit] = FF.liftF(SetBlob4(a, b))
def setBlob(a: String, b: InputStream, c: Long): CallableStatementIO[Unit] = FF.liftF(SetBlob5(a, b, c))
def setBoolean(a: Int, b: Boolean): CallableStatementIO[Unit] = FF.liftF(SetBoolean(a, b))
def setBoolean(a: String, b: Boolean): CallableStatementIO[Unit] = FF.liftF(SetBoolean1(a, b))
def setByte(a: Int, b: Byte): CallableStatementIO[Unit] = FF.liftF(SetByte(a, b))
def setByte(a: String, b: Byte): CallableStatementIO[Unit] = FF.liftF(SetByte1(a, b))
def setBytes(a: Int, b: Array[Byte]): CallableStatementIO[Unit] = FF.liftF(SetBytes(a, b))
def setBytes(a: String, b: Array[Byte]): CallableStatementIO[Unit] = FF.liftF(SetBytes1(a, b))
def setCharacterStream(a: Int, b: Reader): CallableStatementIO[Unit] = FF.liftF(SetCharacterStream(a, b))
def setCharacterStream(a: Int, b: Reader, c: Int): CallableStatementIO[Unit] = FF.liftF(SetCharacterStream1(a, b, c))
def setCharacterStream(a: Int, b: Reader, c: Long): CallableStatementIO[Unit] = FF.liftF(SetCharacterStream2(a, b, c))
def setCharacterStream(a: String, b: Reader): CallableStatementIO[Unit] = FF.liftF(SetCharacterStream3(a, b))
def setCharacterStream(a: String, b: Reader, c: Int): CallableStatementIO[Unit] = FF.liftF(SetCharacterStream4(a, b, c))
def setCharacterStream(a: String, b: Reader, c: Long): CallableStatementIO[Unit] = FF.liftF(SetCharacterStream5(a, b, c))
def setClob(a: Int, b: Clob): CallableStatementIO[Unit] = FF.liftF(SetClob(a, b))
def setClob(a: Int, b: Reader): CallableStatementIO[Unit] = FF.liftF(SetClob1(a, b))
def setClob(a: Int, b: Reader, c: Long): CallableStatementIO[Unit] = FF.liftF(SetClob2(a, b, c))
def setClob(a: String, b: Clob): CallableStatementIO[Unit] = FF.liftF(SetClob3(a, b))
def setClob(a: String, b: Reader): CallableStatementIO[Unit] = FF.liftF(SetClob4(a, b))
def setClob(a: String, b: Reader, c: Long): CallableStatementIO[Unit] = FF.liftF(SetClob5(a, b, c))
def setCursorName(a: String): CallableStatementIO[Unit] = FF.liftF(SetCursorName(a))
def setDate(a: Int, b: Date): CallableStatementIO[Unit] = FF.liftF(SetDate(a, b))
def setDate(a: Int, b: Date, c: Calendar): CallableStatementIO[Unit] = FF.liftF(SetDate1(a, b, c))
def setDate(a: String, b: Date): CallableStatementIO[Unit] = FF.liftF(SetDate2(a, b))
def setDate(a: String, b: Date, c: Calendar): CallableStatementIO[Unit] = FF.liftF(SetDate3(a, b, c))
def setDouble(a: Int, b: Double): CallableStatementIO[Unit] = FF.liftF(SetDouble(a, b))
def setDouble(a: String, b: Double): CallableStatementIO[Unit] = FF.liftF(SetDouble1(a, b))
def setEscapeProcessing(a: Boolean): CallableStatementIO[Unit] = FF.liftF(SetEscapeProcessing(a))
def setFetchDirection(a: Int): CallableStatementIO[Unit] = FF.liftF(SetFetchDirection(a))
def setFetchSize(a: Int): CallableStatementIO[Unit] = FF.liftF(SetFetchSize(a))
def setFloat(a: Int, b: Float): CallableStatementIO[Unit] = FF.liftF(SetFloat(a, b))
def setFloat(a: String, b: Float): CallableStatementIO[Unit] = FF.liftF(SetFloat1(a, b))
def setInt(a: Int, b: Int): CallableStatementIO[Unit] = FF.liftF(SetInt(a, b))
def setInt(a: String, b: Int): CallableStatementIO[Unit] = FF.liftF(SetInt1(a, b))
def setLargeMaxRows(a: Long): CallableStatementIO[Unit] = FF.liftF(SetLargeMaxRows(a))
def setLong(a: Int, b: Long): CallableStatementIO[Unit] = FF.liftF(SetLong(a, b))
def setLong(a: String, b: Long): CallableStatementIO[Unit] = FF.liftF(SetLong1(a, b))
def setMaxFieldSize(a: Int): CallableStatementIO[Unit] = FF.liftF(SetMaxFieldSize(a))
def setMaxRows(a: Int): CallableStatementIO[Unit] = FF.liftF(SetMaxRows(a))
def setNCharacterStream(a: Int, b: Reader): CallableStatementIO[Unit] = FF.liftF(SetNCharacterStream(a, b))
def setNCharacterStream(a: Int, b: Reader, c: Long): CallableStatementIO[Unit] = FF.liftF(SetNCharacterStream1(a, b, c))
def setNCharacterStream(a: String, b: Reader): CallableStatementIO[Unit] = FF.liftF(SetNCharacterStream2(a, b))
def setNCharacterStream(a: String, b: Reader, c: Long): CallableStatementIO[Unit] = FF.liftF(SetNCharacterStream3(a, b, c))
def setNClob(a: Int, b: NClob): CallableStatementIO[Unit] = FF.liftF(SetNClob(a, b))
def setNClob(a: Int, b: Reader): CallableStatementIO[Unit] = FF.liftF(SetNClob1(a, b))
def setNClob(a: Int, b: Reader, c: Long): CallableStatementIO[Unit] = FF.liftF(SetNClob2(a, b, c))
def setNClob(a: String, b: NClob): CallableStatementIO[Unit] = FF.liftF(SetNClob3(a, b))
def setNClob(a: String, b: Reader): CallableStatementIO[Unit] = FF.liftF(SetNClob4(a, b))
def setNClob(a: String, b: Reader, c: Long): CallableStatementIO[Unit] = FF.liftF(SetNClob5(a, b, c))
def setNString(a: Int, b: String): CallableStatementIO[Unit] = FF.liftF(SetNString(a, b))
def setNString(a: String, b: String): CallableStatementIO[Unit] = FF.liftF(SetNString1(a, b))
def setNull(a: Int, b: Int): CallableStatementIO[Unit] = FF.liftF(SetNull(a, b))
def setNull(a: Int, b: Int, c: String): CallableStatementIO[Unit] = FF.liftF(SetNull1(a, b, c))
def setNull(a: String, b: Int): CallableStatementIO[Unit] = FF.liftF(SetNull2(a, b))
def setNull(a: String, b: Int, c: String): CallableStatementIO[Unit] = FF.liftF(SetNull3(a, b, c))
def setObject(a: Int, b: AnyRef): CallableStatementIO[Unit] = FF.liftF(SetObject(a, b))
def setObject(a: Int, b: AnyRef, c: Int): CallableStatementIO[Unit] = FF.liftF(SetObject1(a, b, c))
def setObject(a: Int, b: AnyRef, c: Int, d: Int): CallableStatementIO[Unit] = FF.liftF(SetObject2(a, b, c, d))
def setObject(a: Int, b: AnyRef, c: SQLType): CallableStatementIO[Unit] = FF.liftF(SetObject3(a, b, c))
def setObject(a: Int, b: AnyRef, c: SQLType, d: Int): CallableStatementIO[Unit] = FF.liftF(SetObject4(a, b, c, d))
def setObject(a: String, b: AnyRef): CallableStatementIO[Unit] = FF.liftF(SetObject5(a, b))
def setObject(a: String, b: AnyRef, c: Int): CallableStatementIO[Unit] = FF.liftF(SetObject6(a, b, c))
def setObject(a: String, b: AnyRef, c: Int, d: Int): CallableStatementIO[Unit] = FF.liftF(SetObject7(a, b, c, d))
def setObject(a: String, b: AnyRef, c: SQLType): CallableStatementIO[Unit] = FF.liftF(SetObject8(a, b, c))
def setObject(a: String, b: AnyRef, c: SQLType, d: Int): CallableStatementIO[Unit] = FF.liftF(SetObject9(a, b, c, d))
def setPoolable(a: Boolean): CallableStatementIO[Unit] = FF.liftF(SetPoolable(a))
def setQueryTimeout(a: Int): CallableStatementIO[Unit] = FF.liftF(SetQueryTimeout(a))
def setRef(a: Int, b: Ref): CallableStatementIO[Unit] = FF.liftF(SetRef(a, b))
def setRowId(a: Int, b: RowId): CallableStatementIO[Unit] = FF.liftF(SetRowId(a, b))
def setRowId(a: String, b: RowId): CallableStatementIO[Unit] = FF.liftF(SetRowId1(a, b))
def setSQLXML(a: Int, b: SQLXML): CallableStatementIO[Unit] = FF.liftF(SetSQLXML(a, b))
def setSQLXML(a: String, b: SQLXML): CallableStatementIO[Unit] = FF.liftF(SetSQLXML1(a, b))
def setShort(a: Int, b: Short): CallableStatementIO[Unit] = FF.liftF(SetShort(a, b))
def setShort(a: String, b: Short): CallableStatementIO[Unit] = FF.liftF(SetShort1(a, b))
def setString(a: Int, b: String): CallableStatementIO[Unit] = FF.liftF(SetString(a, b))
def setString(a: String, b: String): CallableStatementIO[Unit] = FF.liftF(SetString1(a, b))
def setTime(a: Int, b: Time): CallableStatementIO[Unit] = FF.liftF(SetTime(a, b))
def setTime(a: Int, b: Time, c: Calendar): CallableStatementIO[Unit] = FF.liftF(SetTime1(a, b, c))
def setTime(a: String, b: Time): CallableStatementIO[Unit] = FF.liftF(SetTime2(a, b))
def setTime(a: String, b: Time, c: Calendar): CallableStatementIO[Unit] = FF.liftF(SetTime3(a, b, c))
def setTimestamp(a: Int, b: Timestamp): CallableStatementIO[Unit] = FF.liftF(SetTimestamp(a, b))
def setTimestamp(a: Int, b: Timestamp, c: Calendar): CallableStatementIO[Unit] = FF.liftF(SetTimestamp1(a, b, c))
def setTimestamp(a: String, b: Timestamp): CallableStatementIO[Unit] = FF.liftF(SetTimestamp2(a, b))
def setTimestamp(a: String, b: Timestamp, c: Calendar): CallableStatementIO[Unit] = FF.liftF(SetTimestamp3(a, b, c))
def setURL(a: Int, b: URL): CallableStatementIO[Unit] = FF.liftF(SetURL(a, b))
def setURL(a: String, b: URL): CallableStatementIO[Unit] = FF.liftF(SetURL1(a, b))
def unwrap[T](a: Class[T]): CallableStatementIO[T] = FF.liftF(Unwrap(a))
val wasNull: CallableStatementIO[Boolean] = FF.liftF(WasNull)
// Typeclass instances for CallableStatementIO
implicit val WeakAsyncCallableStatementIO: WeakAsync[CallableStatementIO] =
new WeakAsync[CallableStatementIO] {
val monad = FF.catsFreeMonadForFree[CallableStatementOp]
override val applicative = monad
override val rootCancelScope = CancelScope.Cancelable
override def pure[A](x: A): CallableStatementIO[A] = monad.pure(x)
override def flatMap[A, B](fa: CallableStatementIO[A])(f: A => CallableStatementIO[B]): CallableStatementIO[B] = monad.flatMap(fa)(f)
override def tailRecM[A, B](a: A)(f: A => CallableStatementIO[Either[A, B]]): CallableStatementIO[B] = monad.tailRecM(a)(f)
override def raiseError[A](e: Throwable): CallableStatementIO[A] = module.raiseError(e)
override def handleErrorWith[A](fa: CallableStatementIO[A])(f: Throwable => CallableStatementIO[A]): CallableStatementIO[A] = module.handleErrorWith(fa)(f)
override def monotonic: CallableStatementIO[FiniteDuration] = module.monotonic
override def realTime: CallableStatementIO[FiniteDuration] = module.realtime
override def suspend[A](hint: Sync.Type)(thunk: => A): CallableStatementIO[A] = module.suspend(hint)(thunk)
override def forceR[A, B](fa: CallableStatementIO[A])(fb: CallableStatementIO[B]): CallableStatementIO[B] = module.forceR(fa)(fb)
override def uncancelable[A](body: Poll[CallableStatementIO] => CallableStatementIO[A]): CallableStatementIO[A] = module.uncancelable(body)
override def canceled: CallableStatementIO[Unit] = module.canceled
override def onCancel[A](fa: CallableStatementIO[A], fin: CallableStatementIO[Unit]): CallableStatementIO[A] = module.onCancel(fa, fin)
override def fromFuture[A](fut: CallableStatementIO[Future[A]]): CallableStatementIO[A] = module.fromFuture(fut)
}
}
| tpolecat/doobie | modules/free/src/main/scala/doobie/free/callablestatement.scala | Scala | mit | 75,774 |
package foo.test
object Foo { val foo = 5 } | xeno-by/old-scalameta-sbt | sbt/src/sbt-test/tests/fork-uncaught/src/main/scala/Foo.scala | Scala | bsd-3-clause | 44 |
package scalydomain.core
import scala.collection._
import scala.util.Random
import scala.util.control.Breaks._
import java.io.File
import java.security.MessageDigest
import java.util.concurrent.atomic._
import org.msgpack.annotation.Message
import org.msgpack.ScalaMessagePack
import org.iq80.leveldb.{Options}
import org.fusesource.leveldbjni.JniDBFactory.{factory}
class ModelDbWriter(val path: String) {
val ngrams: concurrent.Map[String, NgramEntry] = new concurrent.TrieMap()
val file = new File(path)
def addNgramNextSymbol(ngram: String, nextSymbol: String) {
val entry = getOrAddNgram(ngram)
//Perform a thread-safe increment of the symbol count
//Note how we retry if the underlying value changed
var nextSymbolCount = 0l
do {
//Get the current count, adding a new entry with a zero count if this symbol hasn't
//been seen before. Remember kids, concurrency is hard.
nextSymbolCount = entry.nextSymbols.get(nextSymbol) match {
case Some(x) => x
case None => {
entry.nextSymbols.putIfAbsent(nextSymbol, 0l) match {
case Some(y) => y
case None => 0l
}
}
}
} while (!entry.nextSymbols.replace(nextSymbol, nextSymbolCount, nextSymbolCount + 1))
}
def getOrAddNgram(ngram: String) = {
ngrams.get(ngram) match {
case Some(entry) => entry
case None => {
//There is no entry for this ngram. Create a new one and add it to the map, but
//bear in mind concurrency is hard, and another thread might do the same thing, so
//handle that case
val empty = new NgramEntry()
ngrams.putIfAbsent(ngram, empty) match {
case Some(existingEntry) => existingEntry //Some other thread must have added this when we weren't looking; use what's already there
case None => empty //There was nothing in the hash table, so 'empty' was added successfully
}
}
}
}
def incrementNgramOccurrenceCount(ngram: String) {
getOrAddNgram(ngram).occurrenceCount.getAndIncrement()
}
def saveToDisk(ngramSize: Int) {
val options = new Options()
options.createIfMissing(true)
options.cacheSize(ModelDb.CacheSize)
options.blockSize(ModelDb.BlockSize)
val db = factory.open(file, options)
try {
val info = new ModelInfo()
info.n = ngramSize
info.totalOccurrenceCount = ngrams.map(_._2.occurrenceCount.get).sum
db.put(ModelDb.ModelInfoKey, ScalaMessagePack.write(info))
ngrams.foreach { pair =>
val (key, entry) = pair
//Compute the total count of all next symbols prior to writing to disk
entry.allNextSymbolsSum = entry.nextSymbols.map(_._2).sum
//Compute the probability of this ngram appearing in the training corpus
entry.p = entry.occurrenceCount.get.toDouble / info.totalOccurrenceCount.toDouble
//Now write to the database
db.put(key.getBytes("UTF-8"), writeEntry(entry))
}
db.compactRange(null, null)
//println(db.getProperty("leveldb.stats"))
} finally {
db.close
}
}
def writeEntry(entry: NgramEntry) = {
ScalaMessagePack.write(entry)
}
} | anelson/scalydomain | core/src/main/scala/ModelDbWriter.scala | Scala | apache-2.0 | 3,024 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.executor
import java.io.{File, NotSerializableException}
import java.lang.Thread.UncaughtExceptionHandler
import java.lang.management.ManagementFactory
import java.net.{URI, URL}
import java.nio.ByteBuffer
import java.util.Properties
import java.util.concurrent._
import javax.annotation.concurrent.GuardedBy
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, HashMap, Map}
import scala.util.control.NonFatal
import com.google.common.util.concurrent.ThreadFactoryBuilder
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.memory.TaskMemoryManager
import org.apache.spark.rpc.RpcTimeout
import org.apache.spark.scheduler.{DirectTaskResult, IndirectTaskResult, Task, TaskDescription}
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.storage.{StorageLevel, TaskResultBlockId}
import org.apache.spark.util._
import org.apache.spark.util.io.ChunkedByteBuffer
/**
* Spark executor, backed by a threadpool to run tasks.
*
* This can be used with Mesos, YARN, and the standalone scheduler.
* An internal RPC interface is used for communication with the driver,
* except in the case of Mesos fine-grained mode.
*/
private[spark] class Executor(
executorId: String,
executorHostname: String,
env: SparkEnv,
userClassPath: Seq[URL] = Nil,
isLocal: Boolean = false,
uncaughtExceptionHandler: UncaughtExceptionHandler = SparkUncaughtExceptionHandler)
extends Logging {
logInfo(s"Starting executor ID $executorId on host $executorHostname")
// Application dependencies (added through SparkContext) that we've fetched so far on this node.
// Each map holds the master's timestamp for the version of that file or JAR we got.
private val currentFiles: HashMap[String, Long] = new HashMap[String, Long]()
private val currentJars: HashMap[String, Long] = new HashMap[String, Long]()
private val EMPTY_BYTE_BUFFER = ByteBuffer.wrap(new Array[Byte](0))
private val conf = env.conf
// No ip or host:port - just hostname
Utils.checkHost(executorHostname, "Expected executed slave to be a hostname")
// must not have port specified.
assert (0 == Utils.parseHostPort(executorHostname)._2)
// Make sure the local hostname we report matches the cluster scheduler's name for this host
Utils.setCustomHostname(executorHostname)
if (!isLocal) {
// Setup an uncaught exception handler for non-local mode.
// Make any thread terminations due to uncaught exceptions kill the entire
// executor process to avoid surprising stalls.
Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler)
}
// Start worker thread pool
private val threadPool = {
val threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("Executor task launch worker-%d")
.setThreadFactory(new ThreadFactory {
override def newThread(r: Runnable): Thread =
// Use UninterruptibleThread to run tasks so that we can allow running codes without being
// interrupted by `Thread.interrupt()`. Some issues, such as KAFKA-1894, HADOOP-10622,
// will hang forever if some methods are interrupted.
new UninterruptibleThread(r, "unused") // thread name will be set by ThreadFactoryBuilder
})
.build()
Executors.newCachedThreadPool(threadFactory).asInstanceOf[ThreadPoolExecutor]
}
private val executorSource = new ExecutorSource(threadPool, executorId)
// Pool used for threads that supervise task killing / cancellation
private val taskReaperPool = ThreadUtils.newDaemonCachedThreadPool("Task reaper")
// For tasks which are in the process of being killed, this map holds the most recently created
// TaskReaper. All accesses to this map should be synchronized on the map itself (this isn't
// a ConcurrentHashMap because we use the synchronization for purposes other than simply guarding
// the integrity of the map's internal state). The purpose of this map is to prevent the creation
// of a separate TaskReaper for every killTask() of a given task. Instead, this map allows us to
// track whether an existing TaskReaper fulfills the role of a TaskReaper that we would otherwise
// create. The map key is a task id.
private val taskReaperForTask: HashMap[Long, TaskReaper] = HashMap[Long, TaskReaper]()
if (!isLocal) {
env.metricsSystem.registerSource(executorSource)
env.blockManager.initialize(conf.getAppId)
}
// Whether to load classes in user jars before those in Spark jars
private val userClassPathFirst = conf.getBoolean("spark.executor.userClassPathFirst", false)
// Whether to monitor killed / interrupted tasks
private val taskReaperEnabled = conf.getBoolean("spark.task.reaper.enabled", false)
// Create our ClassLoader
// do this after SparkEnv creation so can access the SecurityManager
private val urlClassLoader = createClassLoader()
private val replClassLoader = addReplClassLoaderIfNeeded(urlClassLoader)
// Set the classloader for serializer
env.serializer.setDefaultClassLoader(replClassLoader)
// Max size of direct result. If task result is bigger than this, we use the block manager
// to send the result back.
private val maxDirectResultSize = Math.min(
conf.getSizeAsBytes("spark.task.maxDirectResultSize", 1L << 20),
RpcUtils.maxMessageSizeBytes(conf))
// Limit of bytes for total size of results (default is 1GB)
private val maxResultSize = Utils.getMaxResultSize(conf)
// Maintains the list of running tasks.
private val runningTasks = new ConcurrentHashMap[Long, TaskRunner]
// Executor for the heartbeat task.
private val heartbeater = ThreadUtils.newDaemonSingleThreadScheduledExecutor("driver-heartbeater")
// must be initialized before running startDriverHeartbeat()
private val heartbeatReceiverRef =
RpcUtils.makeDriverRef(HeartbeatReceiver.ENDPOINT_NAME, conf, env.rpcEnv)
/**
* When an executor is unable to send heartbeats to the driver more than `HEARTBEAT_MAX_FAILURES`
* times, it should kill itself. The default value is 60. It means we will retry to send
* heartbeats about 10 minutes because the heartbeat interval is 10s.
*/
private val HEARTBEAT_MAX_FAILURES = conf.getInt("spark.executor.heartbeat.maxFailures", 60)
/**
* Count the failure times of heartbeat. It should only be accessed in the heartbeat thread. Each
* successful heartbeat will reset it to 0.
*/
private var heartbeatFailures = 0
startDriverHeartbeater()
private[executor] def numRunningTasks: Int = runningTasks.size()
def launchTask(context: ExecutorBackend, taskDescription: TaskDescription): Unit = {
val tr = new TaskRunner(context, taskDescription)
runningTasks.put(taskDescription.taskId, tr)
threadPool.execute(tr)
}
def killTask(taskId: Long, interruptThread: Boolean, reason: String): Unit = {
val taskRunner = runningTasks.get(taskId)
if (taskRunner != null) {
if (taskReaperEnabled) {
val maybeNewTaskReaper: Option[TaskReaper] = taskReaperForTask.synchronized {
val shouldCreateReaper = taskReaperForTask.get(taskId) match {
case None => true
case Some(existingReaper) => interruptThread && !existingReaper.interruptThread
}
if (shouldCreateReaper) {
val taskReaper = new TaskReaper(
taskRunner, interruptThread = interruptThread, reason = reason)
taskReaperForTask(taskId) = taskReaper
Some(taskReaper)
} else {
None
}
}
// Execute the TaskReaper from outside of the synchronized block.
maybeNewTaskReaper.foreach(taskReaperPool.execute)
} else {
taskRunner.kill(interruptThread = interruptThread, reason = reason)
}
}
}
/**
* Function to kill the running tasks in an executor.
* This can be called by executor back-ends to kill the
* tasks instead of taking the JVM down.
* @param interruptThread whether to interrupt the task thread
*/
def killAllTasks(interruptThread: Boolean, reason: String) : Unit = {
runningTasks.keys().asScala.foreach(t =>
killTask(t, interruptThread = interruptThread, reason = reason))
}
def stop(): Unit = {
env.metricsSystem.report()
heartbeater.shutdown()
heartbeater.awaitTermination(10, TimeUnit.SECONDS)
threadPool.shutdown()
if (!isLocal) {
env.stop()
}
}
/** Returns the total amount of time this JVM process has spent in garbage collection. */
private def computeTotalGcTime(): Long = {
ManagementFactory.getGarbageCollectorMXBeans.asScala.map(_.getCollectionTime).sum
}
class TaskRunner(
execBackend: ExecutorBackend,
private val taskDescription: TaskDescription)
extends Runnable {
val taskId = taskDescription.taskId
val threadName = s"Executor task launch worker for task $taskId"
private val taskName = taskDescription.name
/** If specified, this task has been killed and this option contains the reason. */
@volatile private var reasonIfKilled: Option[String] = None
@volatile private var threadId: Long = -1
def getThreadId: Long = threadId
/** Whether this task has been finished. */
@GuardedBy("TaskRunner.this")
private var finished = false
def isFinished: Boolean = synchronized { finished }
/** How much the JVM process has spent in GC when the task starts to run. */
@volatile var startGCTime: Long = _
/**
* The task to run. This will be set in run() by deserializing the task binary coming
* from the driver. Once it is set, it will never be changed.
*/
@volatile var task: Task[Any] = _
def kill(interruptThread: Boolean, reason: String): Unit = {
logInfo(s"Executor is trying to kill $taskName (TID $taskId), reason: $reason")
reasonIfKilled = Some(reason)
if (task != null) {
synchronized {
if (!finished) {
task.kill(interruptThread, reason)
}
}
}
}
/**
* Set the finished flag to true and clear the current thread's interrupt status
*/
private def setTaskFinishedAndClearInterruptStatus(): Unit = synchronized {
this.finished = true
// SPARK-14234 - Reset the interrupted status of the thread to avoid the
// ClosedByInterruptException during execBackend.statusUpdate which causes
// Executor to crash
Thread.interrupted()
// Notify any waiting TaskReapers. Generally there will only be one reaper per task but there
// is a rare corner-case where one task can have two reapers in case cancel(interrupt=False)
// is followed by cancel(interrupt=True). Thus we use notifyAll() to avoid a lost wakeup:
notifyAll()
}
override def run(): Unit = {
threadId = Thread.currentThread.getId
Thread.currentThread.setName(threadName)
val threadMXBean = ManagementFactory.getThreadMXBean
val taskMemoryManager = new TaskMemoryManager(env.memoryManager, taskId)
val deserializeStartTime = System.currentTimeMillis()
val deserializeStartCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
threadMXBean.getCurrentThreadCpuTime
} else 0L
Thread.currentThread.setContextClassLoader(replClassLoader)
val ser = env.closureSerializer.newInstance()
logInfo(s"Running $taskName (TID $taskId)")
execBackend.statusUpdate(taskId, TaskState.RUNNING, EMPTY_BYTE_BUFFER)
var taskStart: Long = 0
var taskStartCpu: Long = 0
startGCTime = computeTotalGcTime()
try {
// Must be set before updateDependencies() is called, in case fetching dependencies
// requires access to properties contained within (e.g. for access control).
Executor.taskDeserializationProps.set(taskDescription.properties)
updateDependencies(taskDescription.addedFiles, taskDescription.addedJars)
task = ser.deserialize[Task[Any]](
taskDescription.serializedTask, Thread.currentThread.getContextClassLoader)
task.localProperties = taskDescription.properties
task.setTaskMemoryManager(taskMemoryManager)
// If this task has been killed before we deserialized it, let's quit now. Otherwise,
// continue executing the task.
val killReason = reasonIfKilled
if (killReason.isDefined) {
// Throw an exception rather than returning, because returning within a try{} block
// causes a NonLocalReturnControl exception to be thrown. The NonLocalReturnControl
// exception will be caught by the catch block, leading to an incorrect ExceptionFailure
// for the task.
throw new TaskKilledException(killReason.get)
}
logDebug("Task " + taskId + "'s epoch is " + task.epoch)
env.mapOutputTracker.updateEpoch(task.epoch)
// Run the actual task and measure its runtime.
taskStart = System.currentTimeMillis()
taskStartCpu = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
threadMXBean.getCurrentThreadCpuTime
} else 0L
var threwException = true
val value = try {
val res = task.run(
taskAttemptId = taskId,
attemptNumber = taskDescription.attemptNumber,
metricsSystem = env.metricsSystem)
threwException = false
res
} finally {
val releasedLocks = env.blockManager.releaseAllLocksForTask(taskId)
val freedMemory = taskMemoryManager.cleanUpAllAllocatedMemory()
if (freedMemory > 0 && !threwException) {
val errMsg = s"Managed memory leak detected; size = $freedMemory bytes, TID = $taskId"
if (conf.getBoolean("spark.unsafe.exceptionOnMemoryLeak", false)) {
throw new SparkException(errMsg)
} else {
logWarning(errMsg)
}
}
if (releasedLocks.nonEmpty && !threwException) {
val errMsg =
s"${releasedLocks.size} block locks were not released by TID = $taskId:\\n" +
releasedLocks.mkString("[", ", ", "]")
if (conf.getBoolean("spark.storage.exceptionOnPinLeak", false)) {
throw new SparkException(errMsg)
} else {
logInfo(errMsg)
}
}
}
task.context.fetchFailed.foreach { fetchFailure =>
// uh-oh. it appears the user code has caught the fetch-failure without throwing any
// other exceptions. Its *possible* this is what the user meant to do (though highly
// unlikely). So we will log an error and keep going.
logError(s"TID ${taskId} completed successfully though internally it encountered " +
s"unrecoverable fetch failures! Most likely this means user code is incorrectly " +
s"swallowing Spark's internal ${classOf[FetchFailedException]}", fetchFailure)
}
val taskFinish = System.currentTimeMillis()
val taskFinishCpu = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
threadMXBean.getCurrentThreadCpuTime
} else 0L
// If the task has been killed, let's fail it.
task.context.killTaskIfInterrupted()
val resultSer = env.serializer.newInstance()
val beforeSerialization = System.currentTimeMillis()
val valueBytes = resultSer.serialize(value)
val afterSerialization = System.currentTimeMillis()
// Deserialization happens in two parts: first, we deserialize a Task object, which
// includes the Partition. Second, Task.run() deserializes the RDD and function to be run.
task.metrics.setExecutorDeserializeTime(
(taskStart - deserializeStartTime) + task.executorDeserializeTime)
task.metrics.setExecutorDeserializeCpuTime(
(taskStartCpu - deserializeStartCpuTime) + task.executorDeserializeCpuTime)
// We need to subtract Task.run()'s deserialization time to avoid double-counting
task.metrics.setExecutorRunTime((taskFinish - taskStart) - task.executorDeserializeTime)
task.metrics.setExecutorCpuTime(
(taskFinishCpu - taskStartCpu) - task.executorDeserializeCpuTime)
task.metrics.setJvmGCTime(computeTotalGcTime() - startGCTime)
task.metrics.setResultSerializationTime(afterSerialization - beforeSerialization)
// Note: accumulator updates must be collected after TaskMetrics is updated
val accumUpdates = task.collectAccumulatorUpdates()
// TODO: do not serialize value twice
val directResult = new DirectTaskResult(valueBytes, accumUpdates)
val serializedDirectResult = ser.serialize(directResult)
val resultSize = serializedDirectResult.limit
// directSend = sending directly back to the driver
val serializedResult: ByteBuffer = {
if (maxResultSize > 0 && resultSize > maxResultSize) {
logWarning(s"Finished $taskName (TID $taskId). Result is larger than maxResultSize " +
s"(${Utils.bytesToString(resultSize)} > ${Utils.bytesToString(maxResultSize)}), " +
s"dropping it.")
ser.serialize(new IndirectTaskResult[Any](TaskResultBlockId(taskId), resultSize))
} else if (resultSize > maxDirectResultSize) {
val blockId = TaskResultBlockId(taskId)
env.blockManager.putBytes(
blockId,
new ChunkedByteBuffer(serializedDirectResult.duplicate()),
StorageLevel.MEMORY_AND_DISK_SER)
logInfo(
s"Finished $taskName (TID $taskId). $resultSize bytes result sent via BlockManager)")
ser.serialize(new IndirectTaskResult[Any](blockId, resultSize))
} else {
logInfo(s"Finished $taskName (TID $taskId). $resultSize bytes result sent to driver")
serializedDirectResult
}
}
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.FINISHED, serializedResult)
} catch {
case t: Throwable if hasFetchFailure && !Utils.isFatalError(t) =>
val reason = task.context.fetchFailed.get.toTaskFailedReason
if (!t.isInstanceOf[FetchFailedException]) {
// there was a fetch failure in the task, but some user code wrapped that exception
// and threw something else. Regardless, we treat it as a fetch failure.
val fetchFailedCls = classOf[FetchFailedException].getName
logWarning(s"TID ${taskId} encountered a ${fetchFailedCls} and " +
s"failed, but the ${fetchFailedCls} was hidden by another " +
s"exception. Spark is handling this like a fetch failure and ignoring the " +
s"other exception: $t")
}
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.FAILED, ser.serialize(reason))
case t: TaskKilledException =>
logInfo(s"Executor killed $taskName (TID $taskId), reason: ${t.reason}")
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.KILLED, ser.serialize(TaskKilled(t.reason)))
case _: InterruptedException | NonFatal(_) if
task != null && task.reasonIfKilled.isDefined =>
val killReason = task.reasonIfKilled.getOrElse("unknown reason")
logInfo(s"Executor interrupted and killed $taskName (TID $taskId), reason: $killReason")
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(
taskId, TaskState.KILLED, ser.serialize(TaskKilled(killReason)))
case CausedBy(cDE: CommitDeniedException) =>
val reason = cDE.toTaskFailedReason
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.FAILED, ser.serialize(reason))
case t: Throwable =>
// Attempt to exit cleanly by informing the driver of our failure.
// If anything goes wrong (or this was a fatal exception), we will delegate to
// the default uncaught exception handler, which will terminate the Executor.
logError(s"Exception in $taskName (TID $taskId)", t)
// SPARK-20904: Do not report failure to driver if if happened during shut down. Because
// libraries may set up shutdown hooks that race with running tasks during shutdown,
// spurious failures may occur and can result in improper accounting in the driver (e.g.
// the task failure would not be ignored if the shutdown happened because of premption,
// instead of an app issue).
if (!ShutdownHookManager.inShutdown()) {
// Collect latest accumulator values to report back to the driver
val accums: Seq[AccumulatorV2[_, _]] =
if (task != null) {
task.metrics.setExecutorRunTime(System.currentTimeMillis() - taskStart)
task.metrics.setJvmGCTime(computeTotalGcTime() - startGCTime)
task.collectAccumulatorUpdates(taskFailed = true)
} else {
Seq.empty
}
val accUpdates = accums.map(acc => acc.toInfo(Some(acc.value), None))
val serializedTaskEndReason = {
try {
ser.serialize(new ExceptionFailure(t, accUpdates).withAccums(accums))
} catch {
case _: NotSerializableException =>
// t is not serializable so just send the stacktrace
ser.serialize(new ExceptionFailure(t, accUpdates, false).withAccums(accums))
}
}
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.FAILED, serializedTaskEndReason)
} else {
logInfo("Not reporting error to driver during JVM shutdown.")
}
// Don't forcibly exit unless the exception was inherently fatal, to avoid
// stopping other tasks unnecessarily.
if (Utils.isFatalError(t)) {
uncaughtExceptionHandler.uncaughtException(Thread.currentThread(), t)
}
} finally {
runningTasks.remove(taskId)
}
}
private def hasFetchFailure: Boolean = {
task != null && task.context != null && task.context.fetchFailed.isDefined
}
}
/**
* Supervises the killing / cancellation of a task by sending the interrupted flag, optionally
* sending a Thread.interrupt(), and monitoring the task until it finishes.
*
* Spark's current task cancellation / task killing mechanism is "best effort" because some tasks
* may not be interruptable or may not respond to their "killed" flags being set. If a significant
* fraction of a cluster's task slots are occupied by tasks that have been marked as killed but
* remain running then this can lead to a situation where new jobs and tasks are starved of
* resources that are being used by these zombie tasks.
*
* The TaskReaper was introduced in SPARK-18761 as a mechanism to monitor and clean up zombie
* tasks. For backwards-compatibility / backportability this component is disabled by default
* and must be explicitly enabled by setting `spark.task.reaper.enabled=true`.
*
* A TaskReaper is created for a particular task when that task is killed / cancelled. Typically
* a task will have only one TaskReaper, but it's possible for a task to have up to two reapers
* in case kill is called twice with different values for the `interrupt` parameter.
*
* Once created, a TaskReaper will run until its supervised task has finished running. If the
* TaskReaper has not been configured to kill the JVM after a timeout (i.e. if
* `spark.task.reaper.killTimeout < 0`) then this implies that the TaskReaper may run indefinitely
* if the supervised task never exits.
*/
private class TaskReaper(
taskRunner: TaskRunner,
val interruptThread: Boolean,
val reason: String)
extends Runnable {
private[this] val taskId: Long = taskRunner.taskId
private[this] val killPollingIntervalMs: Long =
conf.getTimeAsMs("spark.task.reaper.pollingInterval", "10s")
private[this] val killTimeoutMs: Long = conf.getTimeAsMs("spark.task.reaper.killTimeout", "-1")
private[this] val takeThreadDump: Boolean =
conf.getBoolean("spark.task.reaper.threadDump", true)
override def run(): Unit = {
val startTimeMs = System.currentTimeMillis()
def elapsedTimeMs = System.currentTimeMillis() - startTimeMs
def timeoutExceeded(): Boolean = killTimeoutMs > 0 && elapsedTimeMs > killTimeoutMs
try {
// Only attempt to kill the task once. If interruptThread = false then a second kill
// attempt would be a no-op and if interruptThread = true then it may not be safe or
// effective to interrupt multiple times:
taskRunner.kill(interruptThread = interruptThread, reason = reason)
// Monitor the killed task until it exits. The synchronization logic here is complicated
// because we don't want to synchronize on the taskRunner while possibly taking a thread
// dump, but we also need to be careful to avoid races between checking whether the task
// has finished and wait()ing for it to finish.
var finished: Boolean = false
while (!finished && !timeoutExceeded()) {
taskRunner.synchronized {
// We need to synchronize on the TaskRunner while checking whether the task has
// finished in order to avoid a race where the task is marked as finished right after
// we check and before we call wait().
if (taskRunner.isFinished) {
finished = true
} else {
taskRunner.wait(killPollingIntervalMs)
}
}
if (taskRunner.isFinished) {
finished = true
} else {
logWarning(s"Killed task $taskId is still running after $elapsedTimeMs ms")
if (takeThreadDump) {
try {
Utils.getThreadDumpForThread(taskRunner.getThreadId).foreach { thread =>
if (thread.threadName == taskRunner.threadName) {
logWarning(s"Thread dump from task $taskId:\\n${thread.stackTrace}")
}
}
} catch {
case NonFatal(e) =>
logWarning("Exception thrown while obtaining thread dump: ", e)
}
}
}
}
if (!taskRunner.isFinished && timeoutExceeded()) {
if (isLocal) {
logError(s"Killed task $taskId could not be stopped within $killTimeoutMs ms; " +
"not killing JVM because we are running in local mode.")
} else {
// In non-local-mode, the exception thrown here will bubble up to the uncaught exception
// handler and cause the executor JVM to exit.
throw new SparkException(
s"Killing executor JVM because killed task $taskId could not be stopped within " +
s"$killTimeoutMs ms.")
}
}
} finally {
// Clean up entries in the taskReaperForTask map.
taskReaperForTask.synchronized {
taskReaperForTask.get(taskId).foreach { taskReaperInMap =>
if (taskReaperInMap eq this) {
taskReaperForTask.remove(taskId)
} else {
// This must have been a TaskReaper where interruptThread == false where a subsequent
// killTask() call for the same task had interruptThread == true and overwrote the
// map entry.
}
}
}
}
}
}
/**
* Create a ClassLoader for use in tasks, adding any JARs specified by the user or any classes
* created by the interpreter to the search path
*/
private def createClassLoader(): MutableURLClassLoader = {
// Bootstrap the list of jars with the user class path.
val now = System.currentTimeMillis()
userClassPath.foreach { url =>
currentJars(url.getPath().split("/").last) = now
}
val currentLoader = Utils.getContextOrSparkClassLoader
// For each of the jars in the jarSet, add them to the class loader.
// We assume each of the files has already been fetched.
val urls = userClassPath.toArray ++ currentJars.keySet.map { uri =>
new File(uri.split("/").last).toURI.toURL
}
if (userClassPathFirst) {
new ChildFirstURLClassLoader(urls, currentLoader)
} else {
new MutableURLClassLoader(urls, currentLoader)
}
}
/**
* If the REPL is in use, add another ClassLoader that will read
* new classes defined by the REPL as the user types code
*/
private def addReplClassLoaderIfNeeded(parent: ClassLoader): ClassLoader = {
val classUri = conf.get("spark.repl.class.uri", null)
if (classUri != null) {
logInfo("Using REPL class URI: " + classUri)
try {
val _userClassPathFirst: java.lang.Boolean = userClassPathFirst
val klass = Utils.classForName("org.apache.spark.repl.ExecutorClassLoader")
.asInstanceOf[Class[_ <: ClassLoader]]
val constructor = klass.getConstructor(classOf[SparkConf], classOf[SparkEnv],
classOf[String], classOf[ClassLoader], classOf[Boolean])
constructor.newInstance(conf, env, classUri, parent, _userClassPathFirst)
} catch {
case _: ClassNotFoundException =>
logError("Could not find org.apache.spark.repl.ExecutorClassLoader on classpath!")
System.exit(1)
null
}
} else {
parent
}
}
/**
* Download any missing dependencies if we receive a new set of files and JARs from the
* SparkContext. Also adds any new JARs we fetched to the class loader.
*/
private def updateDependencies(newFiles: Map[String, Long], newJars: Map[String, Long]) {
lazy val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)
synchronized {
// Fetch missing dependencies
for ((name, timestamp) <- newFiles if currentFiles.getOrElse(name, -1L) < timestamp) {
logInfo("Fetching " + name + " with timestamp " + timestamp)
// Fetch file with useCache mode, close cache for local mode.
Utils.fetchFile(name, new File(SparkFiles.getRootDirectory()), conf,
env.securityManager, hadoopConf, timestamp, useCache = !isLocal)
currentFiles(name) = timestamp
}
for ((name, timestamp) <- newJars) {
val localName = new URI(name).getPath.split("/").last
val currentTimeStamp = currentJars.get(name)
.orElse(currentJars.get(localName))
.getOrElse(-1L)
if (currentTimeStamp < timestamp) {
logInfo("Fetching " + name + " with timestamp " + timestamp)
// Fetch file with useCache mode, close cache for local mode.
Utils.fetchFile(name, new File(SparkFiles.getRootDirectory()), conf,
env.securityManager, hadoopConf, timestamp, useCache = !isLocal)
currentJars(name) = timestamp
// Add it to our class loader
val url = new File(SparkFiles.getRootDirectory(), localName).toURI.toURL
if (!urlClassLoader.getURLs().contains(url)) {
logInfo("Adding " + url + " to class loader")
urlClassLoader.addURL(url)
}
}
}
}
}
/** Reports heartbeat and metrics for active tasks to the driver. */
private def reportHeartBeat(): Unit = {
// list of (task id, accumUpdates) to send back to the driver
val accumUpdates = new ArrayBuffer[(Long, Seq[AccumulatorV2[_, _]])]()
val curGCTime = computeTotalGcTime()
for (taskRunner <- runningTasks.values().asScala) {
if (taskRunner.task != null) {
taskRunner.task.metrics.mergeShuffleReadMetrics()
taskRunner.task.metrics.setJvmGCTime(curGCTime - taskRunner.startGCTime)
accumUpdates += ((taskRunner.taskId, taskRunner.task.metrics.accumulators()))
}
}
val message = Heartbeat(executorId, accumUpdates.toArray, env.blockManager.blockManagerId)
try {
val response = heartbeatReceiverRef.askSync[HeartbeatResponse](
message, RpcTimeout(conf, "spark.executor.heartbeatInterval", "10s"))
if (response.reregisterBlockManager) {
logInfo("Told to re-register on heartbeat")
env.blockManager.reregister()
}
heartbeatFailures = 0
} catch {
case NonFatal(e) =>
logWarning("Issue communicating with driver in heartbeater", e)
heartbeatFailures += 1
if (heartbeatFailures >= HEARTBEAT_MAX_FAILURES) {
logError(s"Exit as unable to send heartbeats to driver " +
s"more than $HEARTBEAT_MAX_FAILURES times")
System.exit(ExecutorExitCode.HEARTBEAT_FAILURE)
}
}
}
/**
* Schedules a task to report heartbeat and partial metrics for active tasks to driver.
*/
private def startDriverHeartbeater(): Unit = {
val intervalMs = conf.getTimeAsMs("spark.executor.heartbeatInterval", "10s")
// Wait a random interval so the heartbeats don't end up in sync
val initialDelay = intervalMs + (math.random * intervalMs).asInstanceOf[Int]
val heartbeatTask = new Runnable() {
override def run(): Unit = Utils.logUncaughtExceptions(reportHeartBeat())
}
heartbeater.scheduleAtFixedRate(heartbeatTask, initialDelay, intervalMs, TimeUnit.MILLISECONDS)
}
}
private[spark] object Executor {
// This is reserved for internal use by components that need to read task properties before a
// task is fully deserialized. When possible, the TaskContext.getLocalProperty call should be
// used instead.
val taskDeserializationProps: ThreadLocal[Properties] = new ThreadLocal[Properties]
}
| jlopezmalla/spark | core/src/main/scala/org/apache/spark/executor/Executor.scala | Scala | apache-2.0 | 34,964 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kylin.engine.spark.job
import java.nio.{BufferOverflowException, ByteBuffer}
import org.apache.kylin.measure.{MeasureAggregator, MeasureIngester, MeasureTypeFactory}
import org.apache.kylin.metadata.datatype.{DataTypeSerializer, DataType => KyDataType}
import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
sealed abstract class MeasureUDAF extends UserDefinedAggregateFunction {
override def inputSchema: StructType = initInputSchema
override def bufferSchema: StructType = initBufferSchema
override def deterministic: Boolean = true
override def dataType: DataType = initDataType
protected var measureAggregator: MeasureAggregator[Any] = initMeasureAgg
protected var serializer: DataTypeSerializer[Any] = initSer
protected var byteBuffer: ByteBuffer = _
protected var measureIngester: MeasureIngester[Any] = initMeasureIngester
protected var encoder: MeasureEncoder[Any, Any] = _
protected var isFirst: Boolean = _
protected var dataTpName: String = _
def initInputSchema: StructType
def initBufferSchema: StructType
def initDataType: DataType
def initMeasureAgg: MeasureAggregator[Any]
def initSer: DataTypeSerializer[Any]
def initOutPutDataType: DataType
def initMeasureIngester: MeasureIngester[Any]
override def initialize(buffer: MutableAggregationBuffer): Unit = {
if (byteBuffer == null) {
byteBuffer = ByteBuffer.allocate(serializer.maxLength())
val dataType = KyDataType.getType(dataTpName)
dataTpName match {
case tp if tp.startsWith("hllc") =>
encoder = new HLLCCountEnc(dataType).asInstanceOf[MeasureEncoder[Any, Any]]
case tp if tp.startsWith("percentile") =>
encoder = new PercentileCountEnc(dataType).asInstanceOf[MeasureEncoder[Any, Any]]
}
}
buffer.update(0, null)
}
override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
byteBuffer.clear()
try {
var inputValue: Any = null
if (isFirst) {
inputValue = encoder.encoder(input.apply(0))
} else {
inputValue = serializer.deserialize(ByteBuffer.wrap(input.apply(0).asInstanceOf[Array[Byte]]))
}
if (buffer.isNullAt(0)) {
serializer.serialize(inputValue, byteBuffer)
buffer.update(0, byteBuffer.array().slice(0, byteBuffer.position()))
} else {
measureAggregator.reset()
val bufferValue = serializer.deserialize(ByteBuffer.wrap(buffer.apply(0).asInstanceOf[Array[Byte]]))
measureAggregator.aggregate(bufferValue)
measureAggregator.aggregate(inputValue)
serializer.serialize(measureAggregator.getState, byteBuffer)
buffer.update(0, byteBuffer.array().slice(0, byteBuffer.position()))
}
} catch {
case th: BufferOverflowException =>
byteBuffer = ByteBuffer.allocate(byteBuffer.array().length * 2)
update(buffer, input)
case th: Throwable => throw th
}
}
override def merge(buffer: MutableAggregationBuffer, input: Row): Unit = {
try {
if (!input.isNullAt(0)) {
byteBuffer.clear()
var inputValue = serializer.deserialize(ByteBuffer.wrap(input.apply(0).asInstanceOf[Array[Byte]]))
if (buffer.isNullAt(0)) {
serializer.serialize(inputValue, byteBuffer)
buffer.update(0, byteBuffer.array().slice(0, byteBuffer.position()))
} else {
measureAggregator.reset()
val bufferValue = serializer.deserialize(ByteBuffer.wrap(buffer.apply(0).asInstanceOf[Array[Byte]]))
measureAggregator.aggregate(bufferValue)
measureAggregator.aggregate(inputValue)
serializer.serialize(measureAggregator.getState, byteBuffer)
buffer.update(0, byteBuffer.array().slice(0, byteBuffer.position()))
}
}
} catch {
case th: BufferOverflowException =>
byteBuffer = ByteBuffer.allocate(byteBuffer.array().length * 2)
merge(buffer, input)
case th: Throwable => throw th
}
}
override def evaluate(buffer: Row): Any = {
buffer.apply(0)
}
}
class FirstUDAF(expression: String, dataTp: KyDataType, isfi: Boolean) extends MeasureUDAF {
isFirst = isfi
dataTpName = dataTp.toString
override def initInputSchema: StructType = {
if (isFirst) {
StructType(Seq(StructField("input", StringType)))
} else {
StructType(Seq(StructField("input", BinaryType)))
}
}
override def initBufferSchema: StructType = {
StructType(Seq(StructField("init", BinaryType)))
}
override def initDataType: DataType = {
BinaryType
}
override def initMeasureAgg: MeasureAggregator[Any] = {
MeasureAggregator
.create(if (expression == "$SUM0") "COUNT" else expression, dataTp).asInstanceOf[MeasureAggregator[Any]]
}
override def initSer: DataTypeSerializer[Any] = {
val ser = DataTypeSerializer.create(dataTp).asInstanceOf[DataTypeSerializer[Any]]
ser
}
override def initOutPutDataType: DataType = {
BinaryType
}
override def initMeasureIngester: MeasureIngester[Any] = {
MeasureTypeFactory.create(expression, dataTp).newIngester().asInstanceOf[MeasureIngester[Any]]
}
}
| apache/kylin | kylin-spark-project/kylin-spark-engine/src/main/scala/org/apache/kylin/engine/spark/job/MeasureUDAF.scala | Scala | apache-2.0 | 6,113 |
// Copyright (c) 2016 PSForever.net to present
package net.psforever.packet.game
import net.psforever.packet.{GamePacketOpcode, Marshallable, PlanetSideGamePacket}
import scodec.Codec
import scodec.codecs._
/**
* Force a player model to change its exo-suit.
* Set all GUI elements and functional elements to be associated with that type of exo-suit.
* Inventory and holster contents are discarded.<br>
* <br>
* Due to the way armor is handled internally, a player of one faction may not spawn in the exo-suit of another faction.
* That style of exo-suit is never available through this packet.
* As MAX units do not get their weapon by default, all the MAX values produce the same faction-appropriate mechanized exo-suit body visually.
* (The MAX weapons are supplied in subsequent packets.)
* `
* 0, 0 - Agile<br>
* 1, 0 - Reinforced<br>
* 2, 0 - MAX<br>
* 2, 1 - AI MAX<br>
* 2, 2 - AV MAX<br>
* 2, 3 - AA MAX<br>
* 3, 0 - Infiltration<br>
* 4, 0 - Standard
* `
* @param player_guid the player
* @param armor the type of exo-suit
* @param subtype the exo-suit subtype, if any
*/
final case class ArmorChangedMessage(player_guid : PlanetSideGUID,
armor : Int,
subtype : Int)
extends PlanetSideGamePacket {
type Packet = ArmorChangedMessage
def opcode = GamePacketOpcode.ArmorChangedMessage
def encode = ArmorChangedMessage.encode(this)
}
object ArmorChangedMessage extends Marshallable[ArmorChangedMessage] {
implicit val codec : Codec[ArmorChangedMessage] = (
("player_guid" | PlanetSideGUID.codec) ::
("armor" | uintL(3)) ::
("subtype" | uintL(3))
).as[ArmorChangedMessage]
}
| Fate-JH/PSF-Server | common/src/main/scala/net/psforever/packet/game/ArmorChangedMessage.scala | Scala | gpl-3.0 | 1,728 |
package cakesolutions
import akka.actor.{Actor, ActorLogging, ReceiveTimeout}
import akka.contrib.pattern.ShardRegion.Passivate
import akka.event.LoggingReceive
import java.util.concurrent.TimeUnit
import scala.concurrent.duration._
trait AutoPassivation extends ActorLogging {
this: Actor with Configuration =>
context.setReceiveTimeout(config.getDuration("application.passivate", TimeUnit.SECONDS).seconds)
private val passivationReceive: Receive = LoggingReceive {
case ReceiveTimeout =>
context.parent ! Passivate(stopMessage = 'stop)
case 'stop =>
context.stop(self)
}
protected def withPassivation(receive: Receive): Receive = receive.orElse(passivationReceive)
}
| carlpulley/coreos-example-application | lib/persistence/src/main/scala/cakesolutions/AutoPassivation.scala | Scala | apache-2.0 | 707 |
package com.github.j5ik2o.forseti.domain.exception
import scalaz.Maybe
/**
* 認可サーバーがリクエストの処理ができないような予期しない状況に遭遇した.
* (500 Internal Server Error のHTTPステータスコードをHTTPのリダイレクトでクライアントに返すことができないため,
* このエラーコードは必要である)
*
* 認可コード/
*
* @param description
*/
class ServerException(
description: Maybe[String] = Maybe.empty,
cause: Maybe[Throwable] = Maybe.empty
) extends OAuthException(500, description, cause) {
override val errorType = "server_error"
}
| j5ik2o/forseti | domain/src/main/scala/com/github/j5ik2o/forseti/domain/exception/ServerException.scala | Scala | mit | 651 |
package scrabble
import util.Random
import scala.util.{ Try, Success, Failure }
/** tiles: The current tiles in the bag */
case class LetterBag(letters: List[Tile], size: Int, tileSet: Map[Char, Tile]) {
override def toString = letters.toString
lazy val lettersAsString = letters.map(_.letter).mkString
/**
* Remove @num letters from the letter bag. Returns a list of removed letters (if available)
* and the resulting letter bag
*/
def remove(num: Int): (List[Tile], LetterBag) = {
val(removedLetters, newLetters) = letters.splitAt(num)
val newSize = if (size - num <= 0) 0 else size - num
val newBag = copy(newLetters, size = newSize)
(removedLetters, newBag)
}
/**
* Exchange @exchanged letters for the same number of letters from the bag. Returns None if there is
* not enough letters in the bag to exchange. Otherwise, returns the new bag after shuffling its contents.
*/
def exchange(exchanged: List[Tile]): Try[(List[Tile], LetterBag)] = {
if (exchanged.size > size) Failure(BagNotFullEnoughToExchange()) else {
val (given, bag) = remove(exchanged.size)
val newLetters = exchanged ::: bag.letters
Success((given, copy(letters = newLetters).shuffle))
}
}
def letterFor(letter: Char): Option[Tile] = tileSet.get(letter)
def shuffle : LetterBag = copy(letters = Random.shuffle(letters))
}
object LetterBag {
private val englishLetters: List[Tile] = {
// (Letter, Value, Distribution)
val blankPoints = List(('_', 0, 2))
val onePoints = List('E', 'A', 'I', 'O', 'N', 'R', 'T', 'L', 'S', 'U') zip List(12, 9, 9, 8, 6, 6, 6, 4, 4, 4) map { case (x, y) => (x, 1, y) }
val twoPoints = List(('D', 2, 4), ('G', 2, 3))
val threePoints = List('B', 'C', 'M', 'P').map(ch => (ch, 3, 2))
val fourPoints = List('F', 'H', 'V', 'W', 'Y').map(ch => (ch, 4, 2))
val fivePoints = List('K').map(ch => (ch, 5, 1))
val eightPoints = List('J', 'X').map(ch => (ch, 8, 1))
val tenPoints = List('Q', 'Z').map(ch => (ch, 10, 1))
val all: List[(Char, Int, Int)] = blankPoints ::: onePoints ::: twoPoints ::: threePoints ::: fourPoints ::: fivePoints ::: eightPoints ::: tenPoints
// Yield a list of all the letters in the bag, using the distribution to yield the right number of letters
all.foldLeft(List.empty[Tile]) {
case (list, (chr: Char, vl: Int, dst: Int)) =>
List.fill(dst)(if (chr == '_') BlankLetter(chr) else Letter(chr, vl)) ::: list
}
}
private val tileSet: Map[Char, Tile] = englishLetters.map { tile => tile.letter -> tile } toMap
private val englishBag = LetterBag(englishLetters, 100, tileSet)
/** Returns a new LetterBag in its initial state. List is in randomised order. */
def apply(): LetterBag = englishBag.shuffle
/**
* Constructs a letter bag from a string of letters in the order they should be taken from the bag.
* Returns None if a character in the string is not part of the tile set
*/
def fromLetters(letters: String, tileSet: Map[Char, Tile]): Option[LetterBag] = {
def buildLetterBag(letters: List[Char], bag: LetterBag): Option[LetterBag] = {
letters match {
case Nil => Some(bag.copy(letters = bag.letters))
case c :: cs =>
val tile = tileSet.get(c)
tile.fold[Option[LetterBag]](None) {
case t =>
buildLetterBag(cs, bag.copy(letters = t :: bag.letters, size = bag.size + 1))
}
}
}
buildLetterBag(letters.toList.reverse, LetterBag(Nil, 0, tileSet))
}
def main(args: Array[String]) {
val bag = LetterBag()
println(bag.lettersAsString)
}
} | Happy0/scalascrabble | src/main/scala/LetterBag.scala | Scala | gpl-2.0 | 3,656 |
package com.github.diegopacheco.sandbox.scala.pkge
case class Fruit(name: String, color: String)
object apple extends Fruit("Apple", "green")
object plum extends Fruit("Plum", "blue")
object banana extends Fruit("Banana", "yellow")
| diegopacheco/scala-playground | scala-pkge/src/main/scala/com/github/diegopacheco/sandbox/scala/pkge/Fruit.scala | Scala | unlicense | 236 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.