code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.text
import org.apache.hadoop.fs.FileStatus
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.connector.write.WriteBuilder
import org.apache.spark.sql.execution.datasources.FileFormat
import org.apache.spark.sql.execution.datasources.v2.FileTable
import org.apache.spark.sql.types.{DataType, StringType, StructField, StructType}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
case class TextTable(
name: String,
sparkSession: SparkSession,
options: CaseInsensitiveStringMap,
paths: Seq[String],
userSpecifiedSchema: Option[StructType],
fallbackFileFormat: Class[_ <: FileFormat])
extends FileTable(sparkSession, options, paths, userSpecifiedSchema) {
override def newScanBuilder(options: CaseInsensitiveStringMap): TextScanBuilder =
TextScanBuilder(sparkSession, fileIndex, schema, dataSchema, options)
override def inferSchema(files: Seq[FileStatus]): Option[StructType] =
Some(StructType(Seq(StructField("value", StringType))))
override def newWriteBuilder(options: CaseInsensitiveStringMap): WriteBuilder =
new TextWriteBuilder(options, paths, formatName, supportsDataType)
override def supportsDataType(dataType: DataType): Boolean = dataType == StringType
override def formatName: String = "Text"
}
| jkbradley/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/text/TextTable.scala | Scala | apache-2.0 | 2,144 |
package ee.cone.c4gate
import ee.cone.c4actor._
import ee.cone.c4actor_kafka_impl.{KafkaConsumerApp, KafkaProducerApp, LZ4DeCompressorApp}
import ee.cone.c4actor_logback_impl.BasicLoggingApp
import ee.cone.c4di.c4app
@c4app class DumperAppBase extends EnvConfigCompApp with VMExecutionApp with NoAssembleProfilerCompApp
with ExecutableApp with RichDataCompApp
with RemoteRawSnapshotApp
with AlienProtocolApp
with HttpProtocolApp
with SnapshotLoaderImplApp
with LZ4DeCompressorApp
@c4app class KafkaLatTestAppBase extends EnvConfigCompApp with VMExecutionApp with NoAssembleProfilerCompApp
with ExecutableApp with RichDataCompApp
with KafkaProducerApp with KafkaConsumerApp
trait TestServerApp extends EnvConfigCompApp with VMExecutionApp with NoAssembleProfilerCompApp
with ServerCompApp with BasicLoggingApp
with KafkaProducerApp with KafkaConsumerApp
with RemoteRawSnapshotApp
with PublisherApp
@c4app class TestConsumerAppBase extends TestServerApp
with ManagementApp
with AlienProtocolApp
with TcpProtocolApp
with ParallelObserversApp
@c4app class HiRateTxAppBase extends TestServerApp with ParallelObserversApp
trait TestTxTransformAppBase extends TestServerApp
@c4app class TestSerialApp extends TestTxTransformApp with SerialObserversApp
@c4app class TestParallelApp extends TestTxTransformApp with ParallelObserversApp
| conecenter/c4proto | base_examples/src/main/scala/ee/cone/c4gate/ConsumerExamplesMix.scala | Scala | apache-2.0 | 1,367 |
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
import java.time.Duration
import java.time.Instant
import java.time.temporal.ChronoUnit
/**
* Interval that moves over time in increments of a given unit. When in the middle of
* a unit it will round to the next even boundary. For example, if the unit is HOURS
* and it is 10:37, then it will round too 11:00.
*
* The offset and duration must be an even multiple of the unit.
*
* @param offset
* Offset subtracted from the current time, `now - offset` is used as the end
* time for this interval.
* @param duration
* The length of the interval. The start time is `now - offset - duration`.
* @param unit
* The unit to use when moving along. This is typically HOURS or DAYS.
*/
case class RollingInterval(offset: Duration, duration: Duration, unit: ChronoUnit) {
import RollingInterval._
checkParams(offset, duration, unit)
/** Current end time for this interval. */
def end: Instant = ceil(Instant.now(), unit).minus(offset)
/** Current start time for this interval. */
def start: Instant = end.minus(duration)
private def currentRange: (Long, Long) = {
val e = end
val s = e.minus(duration)
s.toEpochMilli -> e.toEpochMilli
}
/** Returns true if the instant is currently within this interval. */
def contains(instant: Instant): Boolean = {
val t = instant.toEpochMilli
val (s, e) = currentRange
s <= t && t <= e
}
private def isContainedBy(s: Instant, e: Instant): Boolean = {
val s1 = s.toEpochMilli
val e1 = e.toEpochMilli
val (s2, e2) = currentRange
s1 < s2 && e1 > e2
}
/** Returns true if the interval [s, e] overlaps with this interval. */
def overlaps(s: Instant, e: Instant): Boolean = {
contains(s) || contains(e) || isContainedBy(s, e)
}
}
object RollingInterval {
/**
* Create a new interval from a string representation. The string value should have the
* format: `offset,duration,unit`. Offset and duration will be parsed by using the
* Strings.parseDuration helper. Unit should be `HOURS` or `DAYS`.
*/
def apply(interval: String): RollingInterval = {
val parts = interval.split("\\\\s*,\\\\s*")
require(parts.length == 3, s"invalid rolling interval: $interval")
val s = Strings.parseDuration(parts(0))
val e = Strings.parseDuration(parts(1))
val unit = ChronoUnit.valueOf(parts(2))
RollingInterval(s, e, unit)
}
private def checkParams(offset: Duration, duration: Duration, unit: ChronoUnit): Unit = {
val unitDuration = Duration.of(1, unit)
require(duration.toMillis >= unitDuration.toMillis, s"duration $duration <= $unitDuration")
require(offset.toMillis % unitDuration.toMillis == 0, s"offset must be multiple of $unit")
require(duration.toMillis % unitDuration.toMillis == 0, s"duration must be multiple of $unit")
}
private[util] def ceil(t: Instant, unit: ChronoUnit): Instant = {
val truncated = t.truncatedTo(unit)
if (truncated == t) t else truncated.plus(1, unit)
}
}
| Netflix/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/util/RollingInterval.scala | Scala | apache-2.0 | 3,642 |
/*
* Copyright (c) 2012 Pongr, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pongr.fourarms.mail
import org.apache.mailet._
import javax.mail.{ Multipart, Header, Address }
import javax.mail.internet._
import javax.mail.{ Part, Multipart }
import scala.collection.JavaConversions._
import org.apache.commons.io.IOUtils
import org.apache.commons.lang.StringUtils.isBlank
import com.pongr.fourarms.util.FromMethods
object EmailAddress extends FromMethods {
def apply(addr: Address): EmailAddress = if (addr != null) {
val (firstName, lastName) = getFromName(addr.asInstanceOf[InternetAddress])
val a = new MailAddress(new InternetAddress(fixRecipientAddress(addr.toString)))
EmailAddress(a.getLocalPart, a.getDomain, firstName, lastName)
} else EmailAddress("", "", None, None)
def fixRecipientAddress(address: String): String = """(\\.)(?=>|$)""".r.replaceAllIn(address, "")
}
/**
* Represents an email address.
* @param localPart username from "username@host.com".
* @param domain domain without "@". For example: pongr.com
* @param firstName First name
* @param lastName Last name
*/
case class EmailAddress(
localPart: String,
domain: String,
firstName: Option[String],
lastName: Option[String]
) {
val address: String = "%s@%s" format (localPart, domain)
}
/**
* Representation of the message's part.
* @param contentType "Content-Type" header field.
* @param data Array[Byte] representation of this part's content.
* @param fileName Represents the "filename" part of "Content-Disposition" header field.
* @param description "Content-Description" header field.
* @param disposition Represents the "disposition" part of "Content-Disposition" header field.
* @param headers All headers associated with this part.
*/
case class EmailPart(
contentType: String,
data: Array[Byte],
fileName: Option[String],
description: Option[String],
disposition: Option[String],
headers: Map[String, Seq[String]]
)
/**
* Contains useful information extracted from MimeMessage.
*/
@SerialVersionUID(159423l)
case class Email(
from: EmailAddress,
to: Seq[EmailAddress],
subject: String,
parts: Seq[EmailPart],
headers: Map[String, Seq[String]],
remoteHost: String,
remoteAddr: String
) {
/**
* This private field helps the compiler to load EmailPart class
* when it deserializes Seq[EmailPart] using Java's native serialization.
*/
private val email = EmailPart("", Array(), None, None, None, Map())
def getText(mimeType: String) = {
parts.filter(p => isMimeEqual(p.contentType, mimeType)).headOption match {
case Some(p) => Some(new String(p.data))
case _ => None
}
}
def bodyPlain: Option[String] = getText("text/plain")
def bodyHtml : Option[String] = getText("text/html")
def isMimeEqual(mime1: String, mime2: String): Boolean = {
try {
val c = new ContentType(mime1)
c.`match`(mime2)
} catch { case e =>
mime1.equalsIgnoreCase(mime2)
}
}
}
object Email extends FromMethods {
def apply(m: Mail): Email = {
val (firstName, lastName) = getFromName(m)
val addr = new MailAddress(getFromEmail(m))
val message = m.getMessage
Email(EmailAddress(addr.getLocalPart, addr.getDomain, firstName, lastName),
Option(message.getAllRecipients).map(_.map(EmailAddress(_))).flatten.toList,
message.getSubject,
getEmailParts(message),
getHeaders(message.getAllHeaders, Map()),
m.getRemoteHost,
m.getRemoteAddr)
}
def getEmailParts(part: Part): Seq[EmailPart] = try {
if (part.isMimeType("multipart/*")) {
val multipart = part.getContent().asInstanceOf[Multipart]
var images: Seq[EmailPart] = List()
for (i <- 0 until multipart.getCount) {
images = images ++ getEmailParts(multipart.getBodyPart(i))
}
images
} else {
val headers = part.getAllHeaders
List(EmailPart(part.getContentType,
IOUtils.toByteArray(part.getInputStream),
Option(part.getFileName),
Option(part.getDescription),
Option(part.getDisposition),
getHeaders(part.getAllHeaders, Map())))
}
} catch {
case e: javax.mail.internet.ParseException => Nil
}
@scala.annotation.tailrec
def getHeaders(enum: java.util.Enumeration[_], headers: Map[String, Seq[String]]): Map[String, Seq[String]] = {
if (!enum.hasMoreElements) headers
else {
val tmp = enum.nextElement.asInstanceOf[Header]
val newHeader = headers.filter(_._1 == tmp.getName).headOption match {
case Some((_, existing)) => Map(tmp.getName -> (tmp.getValue +: existing))
case _ => Map(tmp.getName -> List(tmp.getValue))
}
getHeaders(enum, headers ++ newHeader)
}
}
}
| pongr/fourarms | src/main/scala/email.scala | Scala | apache-2.0 | 5,393 |
package juju.sample
import akka.actor.ActorRef
import juju.domain.Saga
import juju.domain.resolvers.{BindAll, ActivatedBy}
import juju.messages.{Activate, Command, DomainEvent, WakeUp}
import juju.sample.PersonAggregate.WeightChanged
case class AveragePersonWeightActivate(correlationId: String) extends Activate
case class PublishWakeUp() extends WakeUp
case class PublishAverageWeight(weight: Int) extends Command
case class PublishHello(correlationId: String, text: String) extends Command
@SerialVersionUID(1L) case class HelloRequested(text: String) extends DomainEvent
@SerialVersionUID(1L) case class PublishRequested() extends DomainEvent
@ActivatedBy(message = classOf[AveragePersonWeightActivate])
class AveragePersonWeightSaga(correlationId: String, commandRouter: ActorRef) extends Saga {
var weights : Map[String, Int] = Map.empty
var average = 0
var changed = true
def apply(event: WeightChanged): Unit = {
weights = weights.filterNot(_._1 == event.name) + (event.name -> event.weight)
val newAverage = weights.values.sum / weights.toList.length
changed = newAverage != average
average = newAverage
}
def apply(event: PublishRequested): Unit =
if (changed) {
deliverCommand(commandRouter, PublishAverageWeight(average))
changed = false
}
@BindAll def apply(event: HelloRequested): Unit =
deliverCommand(commandRouter, PublishHello(correlationId, event.text))
def wakeup(wakeup: PublishWakeUp): Unit = raise(PublishRequested())
} | brokersquare/juju | core/src/test/scala/juju/sample/AveragePersonWeightSaga.scala | Scala | apache-2.0 | 1,506 |
package lmxml
package transforms
package json
package test
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
class JSONTest extends FlatSpec with ShouldMatchers {
val source = """
test
h1 will
p "Various degrees of: {determination}"
ul .people
people
li "{id}: {firstname} {lastname}"
"""
"JSON" should "be dynamically templated" in {
val test = """{
"test": {
"will": "strong",
"determination": "good"
},
"people": [
{ "id": 1, "firstname": "Philip", "lastname": "Cali" },
{ "id": 2, "firstname": "Anna", "lastname": "Cali" }
]
}"""
val expected = List(
EmptyNode(List(
LmxmlNode("h1", children = List(TextNode("strong"))),
LmxmlNode("p", children = List(
TextNode("Various degrees of: good")
))
)),
LmxmlNode("ul", Map("class" -> "people"), List(
EmptyNode(List(
LmxmlNode("li", children = List(
TextNode("1: Philip Cali")
)),
LmxmlNode("li", children = List(
TextNode("2: Anna Cali")
))
))
))
)
import JSTransform.Filters._
val transform = JSTransform("people" -> onArray(_.size > 1)).parse(test)
DefaultLmxmlParser.fullParse(source)(transform) should be === expected
}
}
| philcali/lmxml | json/src/test/scala/json.scala | Scala | mit | 1,309 |
package fr.acinq.eclair.payment
import fr.acinq.bitcoin.ByteVector32
import fr.acinq.bitcoin.Crypto.{PrivateKey, PublicKey}
import fr.acinq.eclair.channel.CMD_FAIL_HTLC
import fr.acinq.eclair.crypto.Sphinx
import fr.acinq.eclair.router.Router.{ChannelHop, Hop, NodeHop}
import fr.acinq.eclair.wire._
import fr.acinq.eclair.{CltvExpiry, CltvExpiryDelta, MilliSatoshi, UInt64}
import scodec.bits.ByteVector
import scodec.{Attempt, Codec, DecodeResult}
import scala.util.Try
sealed trait IncomingPaymentPacket
/** Helpers to handle incoming payment packets. */
object IncomingPaymentPacket {
// @formatter:off
/** We are the final recipient. */
case class FinalPacket(add: UpdateAddHtlc, payload: PaymentOnion.FinalPayload) extends IncomingPaymentPacket
/** We are an intermediate node. */
sealed trait RelayPacket extends IncomingPaymentPacket
/** We must relay the payment to a direct peer. */
case class ChannelRelayPacket(add: UpdateAddHtlc, payload: PaymentOnion.ChannelRelayPayload, nextPacket: OnionRoutingPacket) extends RelayPacket {
val relayFeeMsat: MilliSatoshi = add.amountMsat - payload.amountToForward
val expiryDelta: CltvExpiryDelta = add.cltvExpiry - payload.outgoingCltv
}
/** We must relay the payment to a remote node. */
case class NodeRelayPacket(add: UpdateAddHtlc, outerPayload: PaymentOnion.FinalPayload, innerPayload: PaymentOnion.NodeRelayPayload, nextPacket: OnionRoutingPacket) extends RelayPacket
// @formatter:on
case class DecodedOnionPacket[T <: PaymentOnion.PacketType](payload: T, next: OnionRoutingPacket)
private[payment] def decryptOnion[T <: PaymentOnion.PacketType](paymentHash: ByteVector32, privateKey: PrivateKey, packet: OnionRoutingPacket, perHopPayloadCodec: Boolean => Codec[T]): Either[FailureMessage, DecodedOnionPacket[T]] =
Sphinx.peel(privateKey, Some(paymentHash), packet) match {
case Right(p@Sphinx.DecryptedPacket(payload, nextPacket, _)) =>
perHopPayloadCodec(p.isLastPacket).decode(payload.bits) match {
case Attempt.Successful(DecodeResult(perHopPayload, _)) => Right(DecodedOnionPacket(perHopPayload, nextPacket))
case Attempt.Failure(e: OnionRoutingCodecs.MissingRequiredTlv) => Left(e.failureMessage)
// Onion is correctly encrypted but the content of the per-hop payload couldn't be parsed.
// It's hard to provide tag and offset information from scodec failures, so we currently don't do it.
case Attempt.Failure(_) => Left(InvalidOnionPayload(UInt64(0), 0))
}
case Left(badOnion) => Left(badOnion)
}
/**
* Decrypt the onion packet of a received htlc. If we are the final recipient, we validate that the HTLC fields match
* the onion fields (this prevents intermediate nodes from sending an invalid amount or expiry).
*
* NB: we can't fully validate RelayPackets because it requires knowing the channel/route we'll be using, which we
* don't know yet. Such validation is the responsibility of downstream components.
*
* @param add incoming htlc
* @param privateKey this node's private key
* @return whether the payment is to be relayed or if our node is the final recipient (or an error).
*/
def decrypt(add: UpdateAddHtlc, privateKey: PrivateKey): Either[FailureMessage, IncomingPaymentPacket] = {
decryptOnion(add.paymentHash, privateKey, add.onionRoutingPacket, PaymentOnionCodecs.paymentOnionPerHopPayloadCodec) match {
case Left(failure) => Left(failure)
// NB: we don't validate the ChannelRelayPacket here because its fees and cltv depend on what channel we'll choose to use.
case Right(DecodedOnionPacket(payload: PaymentOnion.ChannelRelayPayload, next)) => Right(ChannelRelayPacket(add, payload, next))
case Right(DecodedOnionPacket(payload: PaymentOnion.FinalTlvPayload, _)) => payload.records.get[OnionPaymentPayloadTlv.TrampolineOnion] match {
case Some(OnionPaymentPayloadTlv.TrampolineOnion(trampolinePacket)) => decryptOnion(add.paymentHash, privateKey, trampolinePacket, PaymentOnionCodecs.trampolineOnionPerHopPayloadCodec) match {
case Left(failure) => Left(failure)
case Right(DecodedOnionPacket(innerPayload: PaymentOnion.NodeRelayPayload, next)) => validateNodeRelay(add, payload, innerPayload, next)
case Right(DecodedOnionPacket(innerPayload: PaymentOnion.FinalPayload, _)) => validateFinal(add, payload, innerPayload)
}
case None => validateFinal(add, payload)
}
}
}
private def validateFinal(add: UpdateAddHtlc, payload: PaymentOnion.FinalPayload): Either[FailureMessage, IncomingPaymentPacket] = {
if (add.amountMsat != payload.amount) {
Left(FinalIncorrectHtlcAmount(add.amountMsat))
} else if (add.cltvExpiry != payload.expiry) {
Left(FinalIncorrectCltvExpiry(add.cltvExpiry))
} else {
Right(FinalPacket(add, payload))
}
}
private def validateFinal(add: UpdateAddHtlc, outerPayload: PaymentOnion.FinalPayload, innerPayload: PaymentOnion.FinalPayload): Either[FailureMessage, IncomingPaymentPacket] = {
if (add.amountMsat != outerPayload.amount) {
Left(FinalIncorrectHtlcAmount(add.amountMsat))
} else if (add.cltvExpiry != outerPayload.expiry) {
Left(FinalIncorrectCltvExpiry(add.cltvExpiry))
} else if (outerPayload.expiry != innerPayload.expiry) {
Left(FinalIncorrectCltvExpiry(add.cltvExpiry)) // previous trampoline didn't forward the right expiry
} else if (outerPayload.totalAmount != innerPayload.amount) {
Left(FinalIncorrectHtlcAmount(outerPayload.totalAmount)) // previous trampoline didn't forward the right amount
} else {
// We merge contents from the outer and inner payloads.
// We must use the inner payload's total amount and payment secret because the payment may be split between multiple trampoline payments (#reckless).
Right(FinalPacket(add, PaymentOnion.createMultiPartPayload(outerPayload.amount, innerPayload.totalAmount, outerPayload.expiry, innerPayload.paymentSecret, innerPayload.paymentMetadata)))
}
}
private def validateNodeRelay(add: UpdateAddHtlc, outerPayload: PaymentOnion.FinalPayload, innerPayload: PaymentOnion.NodeRelayPayload, next: OnionRoutingPacket): Either[FailureMessage, IncomingPaymentPacket] = {
if (add.amountMsat < outerPayload.amount) {
Left(FinalIncorrectHtlcAmount(add.amountMsat))
} else if (add.cltvExpiry != outerPayload.expiry) {
Left(FinalIncorrectCltvExpiry(add.cltvExpiry))
} else {
Right(NodeRelayPacket(add, outerPayload, innerPayload, next))
}
}
}
/** Helpers to create outgoing payment packets. */
object OutgoingPaymentPacket {
/**
* Build an encrypted onion packet from onion payloads and node public keys.
*/
private def buildOnion(sessionKey: PrivateKey, packetPayloadLength: Int, nodes: Seq[PublicKey], payloads: Seq[PaymentOnion.PerHopPayload], associatedData: ByteVector32): Try[Sphinx.PacketAndSecrets] = {
require(nodes.size == payloads.size)
val payloadsBin: Seq[ByteVector] = payloads
.map {
case p: PaymentOnion.FinalPayload => PaymentOnionCodecs.finalPerHopPayloadCodec.encode(p)
case p: PaymentOnion.ChannelRelayPayload => PaymentOnionCodecs.channelRelayPerHopPayloadCodec.encode(p)
case p: PaymentOnion.NodeRelayPayload => PaymentOnionCodecs.nodeRelayPerHopPayloadCodec.encode(p)
}
.map {
case Attempt.Successful(bitVector) => bitVector.bytes
case Attempt.Failure(cause) => throw new RuntimeException(s"serialization error: $cause")
}
Sphinx.create(sessionKey, packetPayloadLength, nodes, payloadsBin, Some(associatedData))
}
/**
* Build the onion payloads for each hop.
*
* @param hops the hops as computed by the router + extra routes from payment request
* @param finalPayload payload data for the final node (amount, expiry, etc)
* @return a (firstAmount, firstExpiry, payloads) tuple where:
* - firstAmount is the amount for the first htlc in the route
* - firstExpiry is the cltv expiry for the first htlc in the route
* - a sequence of payloads that will be used to build the onion
*/
def buildPayloads(hops: Seq[Hop], finalPayload: PaymentOnion.FinalPayload): (MilliSatoshi, CltvExpiry, Seq[PaymentOnion.PerHopPayload]) = {
hops.reverse.foldLeft((finalPayload.amount, finalPayload.expiry, Seq[PaymentOnion.PerHopPayload](finalPayload))) {
case ((amount, expiry, payloads), hop) =>
val payload = hop match {
case hop: ChannelHop => PaymentOnion.ChannelRelayTlvPayload(hop.edge.updExt.update.shortChannelId, amount, expiry)
case hop: NodeHop => PaymentOnion.createNodeRelayPayload(amount, expiry, hop.nextNodeId)
}
(amount + hop.fee(amount), expiry + hop.cltvExpiryDelta, payload +: payloads)
}
}
/**
* Build an encrypted onion packet with the given final payload.
*
* @param hops the hops as computed by the router + extra routes from payment request, including ourselves in the first hop
* @param finalPayload payload data for the final node (amount, expiry, etc)
* @return a (firstAmount, firstExpiry, onion) tuple where:
* - firstAmount is the amount for the first htlc in the route
* - firstExpiry is the cltv expiry for the first htlc in the route
* - the onion to include in the HTLC
*/
private def buildPacket(sessionKey: PrivateKey, packetPayloadLength: Int, paymentHash: ByteVector32, hops: Seq[Hop], finalPayload: PaymentOnion.FinalPayload): Try[(MilliSatoshi, CltvExpiry, Sphinx.PacketAndSecrets)] = {
val (firstAmount, firstExpiry, payloads) = buildPayloads(hops.drop(1), finalPayload)
val nodes = hops.map(_.nextNodeId)
// BOLT 2 requires that associatedData == paymentHash
val onionTry = buildOnion(sessionKey, packetPayloadLength, nodes, payloads, paymentHash)
onionTry.map(onion => (firstAmount, firstExpiry, onion))
}
def buildPaymentPacket(sessionKey: PrivateKey, paymentHash: ByteVector32, hops: Seq[Hop], finalPayload: PaymentOnion.FinalPayload): Try[(MilliSatoshi, CltvExpiry, Sphinx.PacketAndSecrets)] =
buildPacket(sessionKey, PaymentOnionCodecs.paymentOnionPayloadLength, paymentHash, hops, finalPayload)
def buildTrampolinePacket(sessionKey: PrivateKey, paymentHash: ByteVector32, hops: Seq[Hop], finalPayload: PaymentOnion.FinalPayload): Try[(MilliSatoshi, CltvExpiry, Sphinx.PacketAndSecrets)] =
buildPacket(sessionKey, PaymentOnionCodecs.trampolineOnionPayloadLength, paymentHash, hops, finalPayload)
/**
* Build an encrypted trampoline onion packet when the final recipient doesn't support trampoline.
* The next-to-last trampoline node payload will contain instructions to convert to a legacy payment.
*
* @param invoice Bolt 11 invoice (features and routing hints will be provided to the next-to-last node).
* @param hops the trampoline hops (including ourselves in the first hop, and the non-trampoline final recipient in the last hop).
* @param finalPayload payload data for the final node (amount, expiry, etc)
* @return a (firstAmount, firstExpiry, onion) tuple where:
* - firstAmount is the amount for the trampoline node in the route
* - firstExpiry is the cltv expiry for the first trampoline node in the route
* - the trampoline onion to include in final payload of a normal onion
*/
def buildTrampolineToLegacyPacket(sessionKey: PrivateKey, invoice: PaymentRequest, hops: Seq[NodeHop], finalPayload: PaymentOnion.FinalPayload): Try[(MilliSatoshi, CltvExpiry, Sphinx.PacketAndSecrets)] = {
// NB: the final payload will never reach the recipient, since the next-to-last node in the trampoline route will convert that to a non-trampoline payment.
// We use the smallest final payload possible, otherwise we may overflow the trampoline onion size.
val dummyFinalPayload = PaymentOnion.createSinglePartPayload(finalPayload.amount, finalPayload.expiry, finalPayload.paymentSecret, None)
val (firstAmount, firstExpiry, payloads) = hops.drop(1).reverse.foldLeft((finalPayload.amount, finalPayload.expiry, Seq[PaymentOnion.PerHopPayload](dummyFinalPayload))) {
case ((amount, expiry, payloads1), hop) =>
// The next-to-last node in the trampoline route must receive invoice data to indicate the conversion to a non-trampoline payment.
val payload = if (payloads1.length == 1) {
PaymentOnion.createNodeRelayToNonTrampolinePayload(finalPayload.amount, finalPayload.totalAmount, finalPayload.expiry, hop.nextNodeId, invoice)
} else {
PaymentOnion.createNodeRelayPayload(amount, expiry, hop.nextNodeId)
}
(amount + hop.fee(amount), expiry + hop.cltvExpiryDelta, payload +: payloads1)
}
val nodes = hops.map(_.nextNodeId)
val onionTry = buildOnion(sessionKey, PaymentOnionCodecs.trampolineOnionPayloadLength, nodes, payloads, invoice.paymentHash)
onionTry.map(onion => (firstAmount, firstExpiry, onion))
}
import immortan.crypto.Tools._
def buildHtlcFailure(cmd: CMD_FAIL_HTLC, theirAdd: UpdateAddHtlc): UpdateMessage = {
(Sphinx.peel(cmd.nodeSecret, Some(theirAdd.paymentHash), theirAdd.onionRoutingPacket), cmd.reason) match {
case Right(packet) ~ Left(forwarded) => UpdateFailHtlc(theirAdd.channelId, reason = Sphinx.FailurePacket.wrap(forwarded, packet.sharedSecret), id = cmd.theirAdd.id)
case Right(packet) ~ Right(failure) => UpdateFailHtlc(theirAdd.channelId, reason = Sphinx.FailurePacket.create(packet.sharedSecret, failure), id = cmd.theirAdd.id)
case Left(fail) ~ _ => UpdateFailMalformedHtlc(theirAdd.channelId, cmd.theirAdd.id, fail.onionHash, fail.code)
}
}
}
| btcontract/wallet | app/src/main/java/fr/acinq/eclair/payment/PaymentPacket.scala | Scala | apache-2.0 | 13,815 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.graphframes.lib.org.trustedanalytics.sparktk
import org.apache.spark.graphx.lib.org.trustedanalytics._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{ Row, DataFrame }
import org.graphframes.GraphFrame
import org.graphframes.lib.GraphXConversions
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.rdd.RDD
/**
* Computes the betweenness centrality exactly on the given graph.
*/
object BetweennessCentrality {
private val betweennessResults = "betweenness_centrality"
/**
* Computes the betweeness centrality for the vertices of the graph using Graphx-based betweenness centrality
* algorithm
*
* @param graph the graph to compute betweenness centrality on
* @param edgeWeight the name of the column containing the edge weights. If none, every edge is assigned a weight of 1
* @param normalize If true, normalize the betweenness centrality values
* by the number of pairwise paths possible
* @return the target vertexID, the shortest path from the source vertex and the corresponding cost
*/
def run(graph: GraphFrame, edgeWeight: Option[String] = None, normalize: Boolean = true): DataFrame = {
// Convert to graphx
val gf = GraphFrame(graph.vertices.select(GraphFrame.ID), graph.edges)
// calculate the betweenness centrality
val graphxBetweennessRDD = edgeWeight match {
case Some(edgeName) =>
val edgeWeightType = graph.edges.schema(edgeName).dataType
sparktk.BetweennessCentrality.run(graph.toGraphX, getEdgeWeightFunc(graph, edgeWeight), normalize)
case None => sparktk.BetweennessCentrality.run(gf.toGraphX, normalize = normalize)
}
// return an RDD representing the betweenness value on vertices
GraphXConversions.fromGraphX(graph, graphxBetweennessRDD, Seq(betweennessResults)).vertices
}
private def getEdgeWeightFunc(graph: GraphFrame, edgeWeight: Option[String]): Option[(Row) => Int] = {
val edgeWeightFunc = if (edgeWeight.isDefined) {
val edgeWeightType = graph.edges.schema(edgeWeight.get).dataType
require(edgeWeightType.isInstanceOf[NumericType], "The edge weight type should be numeric")
Some((row: Row) => row.getAs[Any](edgeWeight.get) match {
case x: Int => x.toInt
case x: Long => x.toInt
case x: Short => x.toInt
case x: Byte => x.toInt
case _ => throw new scala.ClassCastException(s"the edge weight type cannot be $edgeWeightType")
})
}
else {
None
}
edgeWeightFunc
}
}
| aayushidwivedi01/spark-tk | sparktk-core/src/main/scala/org/graphframes/lib/org/trustedanalytics/sparktk/BetweennessCentrality.scala | Scala | apache-2.0 | 3,280 |
package com.outr.arango.api
import com.outr.arango.api.model._
import io.youi.client.HttpClient
import io.youi.http.HttpMethod
import io.youi.net._
import io.circe.Json
import scala.concurrent.{ExecutionContext, Future}
object APIReplicationApplierStop {
def put(client: HttpClient)(implicit ec: ExecutionContext): Future[Json] = client
.method(HttpMethod.Put)
.path(path"/_api/replication/applier-stop", append = true)
.call[Json]
} | outr/arangodb-scala | api/src/main/scala/com/outr/arango/api/APIReplicationApplierStop.scala | Scala | mit | 457 |
/*
* Copyright (c) Microsoft. All rights reserved.
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
package org.apache.spark.api.csharp
import java.io.{DataOutputStream, File, FileOutputStream, IOException}
import java.net.{InetAddress, InetSocketAddress, ServerSocket, Socket}
import java.util.concurrent.{BlockingQueue, LinkedBlockingQueue, TimeUnit}
import io.netty.bootstrap.ServerBootstrap
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.SocketChannel
import io.netty.channel.socket.nio.NioServerSocketChannel
import io.netty.channel.{ChannelFuture, ChannelInitializer, EventLoopGroup}
import io.netty.handler.codec.LengthFieldBasedFrameDecoder
import io.netty.handler.codec.bytes.{ByteArrayDecoder, ByteArrayEncoder}
import org.apache.spark.internal.Logging
/**
* Netty server that invokes JVM calls based upon receiving
* messages from C# in SparkCLR.
* This implementation is identical to RBackend and that can be reused
* in SparkCLR if the handler is made pluggable
*/
// Since SparkCLR is a package to Spark and not a part of spark-core it mirrors the implementation
// of selected parts from RBackend with SparkCLR customizations
class CSharpBackend extends Logging
{ self => // for accessing the this reference in inner class(ChannelInitializer)
private[this] var channelFuture: ChannelFuture = null
private[this] var bootstrap: ServerBootstrap = null
private[this] var bossGroup: EventLoopGroup = null
def init(portNumber: Int): Int = {
// need at least 3 threads, use 10 here for safety
bossGroup = new NioEventLoopGroup(10)
val workerGroup = bossGroup
bootstrap = new ServerBootstrap()
.group(bossGroup, workerGroup)
.channel(classOf[NioServerSocketChannel])
bootstrap.childHandler(new ChannelInitializer[SocketChannel]() {
def initChannel(ch: SocketChannel): Unit = {
ch.pipeline()
.addLast("encoder", new ByteArrayEncoder())
.addLast("frameDecoder",
// maxFrameLength = 2G
// lengthFieldOffset = 0
// lengthFieldLength = 4
// lengthAdjustment = 0
// initialBytesToStrip = 4, i.e. strip out the length field itself
//new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4))
new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4))
.addLast("decoder", new ByteArrayDecoder())
//TODO - work with SparkR devs to make this configurable and reuse RBackend
.addLast("handler", new CSharpBackendHandler(self))
}
})
channelFuture = bootstrap.bind(new InetSocketAddress("localhost", portNumber))
channelFuture.syncUninterruptibly()
channelFuture.channel().localAddress().asInstanceOf[InetSocketAddress].getPort()
}
def run(): Unit = {
channelFuture.channel.closeFuture().syncUninterruptibly()
}
def close(): Unit = {
if (channelFuture != null) {
// close is a local operation and should finish within milliseconds; timeout just to be safe
channelFuture.channel().close().awaitUninterruptibly(10, TimeUnit.SECONDS)
channelFuture = null
}
if (bootstrap != null && bootstrap.group() != null) {
bootstrap.group().shutdownGracefully()
}
if (bootstrap != null && bootstrap.childGroup() != null) {
bootstrap.childGroup().shutdownGracefully()
}
bootstrap = null
// Send close to CSharp callback server.
logInfo("Requesting to close all call back sockets.")
var socket: Socket = null
do {
socket = CSharpBackend.callbackSockets.poll()
if (socket != null) {
try {
val dos = new DataOutputStream(socket.getOutputStream)
SerDe.writeString(dos, "close")
socket.close()
socket = null
}
catch {
case e : Exception => logError("Exception when closing socket: ", e)
}
}
} while (socket != null)
CSharpBackend.callbackSocketShutdown = true
}
}
object CSharpBackend {
// Channels to callback server.
private[spark] val callbackSockets: BlockingQueue[Socket] = new LinkedBlockingQueue[Socket]()
@volatile private[spark] var callbackPort: Int = 0
// flag to denote whether the callback socket is shutdown explicitly
@volatile private[spark] var callbackSocketShutdown: Boolean = false
}
| skaarthik/Mobius | scala/src/main/org/apache/spark/api/csharp/CSharpBackend.scala | Scala | mit | 4,412 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.index
import java.nio.charset.StandardCharsets
import org.apache.kudu.Schema
import org.apache.kudu.client.{KuduTable, PartialRow}
import org.geotools.factory.Hints
import org.locationtech.geomesa.index.index.IndexKeySpace
import org.locationtech.geomesa.index.index.IndexKeySpace._
import org.locationtech.geomesa.index.utils.Explainer
import org.locationtech.geomesa.kudu.data.KuduQueryPlan.{EmptyPlan, ScanPlan}
import org.locationtech.geomesa.kudu.data.{KuduDataStore, KuduFeature}
import org.locationtech.geomesa.kudu.result.KuduResultAdapter
import org.locationtech.geomesa.kudu.schema.KuduIndexColumnAdapter.VisibilityAdapter
import org.locationtech.geomesa.kudu.schema.KuduSimpleFeatureSchema
import org.locationtech.geomesa.kudu.schema.KuduSimpleFeatureSchema.KuduFilter
import org.locationtech.geomesa.kudu.{KuduFilterStrategyType, KuduQueryPlanType, KuduValue, WriteOperation}
import org.locationtech.geomesa.security.SecurityUtils
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
trait KuduTieredFeatureIndex[T, U] extends KuduFeatureIndex[T, U] {
import scala.collection.JavaConverters._
/**
* Tiered key space beyond the primary one, if any
*
* @param sft simple feature type
* @return
*/
protected def tieredKeySpace(sft: SimpleFeatureType): Option[IndexKeySpace[_, _]]
protected def createKeyValues(toIndexKey: SimpleFeature => Seq[U],
toTieredIndexKey: SimpleFeature => Seq[Array[Byte]])
(kf: KuduFeature): Seq[Seq[KuduValue[_]]]
protected def toTieredRowRanges(sft: SimpleFeatureType,
schema: Schema,
range: ScanRange[U],
tiers: => Seq[ByteRange],
minTier: => Array[Byte],
maxTier: => Array[Byte]): Seq[(Option[PartialRow], Option[PartialRow])]
override def writer(sft: SimpleFeatureType, ds: KuduDataStore): (KuduFeature) => Seq[WriteOperation] = {
val table = ds.client.openTable(getTableName(sft.getTypeName, ds))
val schema = KuduSimpleFeatureSchema(sft)
val splitters = KuduFeatureIndex.splitters(sft)
val toIndexKey = keySpace.toIndexKey(sft)
val toTieredKey = createTieredKey(tieredKeySpace(sft).map(_.toIndexKeyBytes(sft))) _
createInsert(sft, table, schema, splitters, toIndexKey, toTieredKey)
}
override def remover(sft: SimpleFeatureType, ds: KuduDataStore): (KuduFeature) => Seq[WriteOperation] = {
val table = ds.client.openTable(getTableName(sft.getTypeName, ds))
val toIndexKey = keySpace.toIndexKey(sft)
val toTieredKey = createTieredKey(tieredKeySpace(sft).map(_.toIndexKeyBytes(sft))) _
createDelete(table, toIndexKey, toTieredKey)
}
override def getQueryPlan(sft: SimpleFeatureType,
ds: KuduDataStore,
filter: KuduFilterStrategyType,
hints: Hints,
explain: Explainer): KuduQueryPlanType = {
val tier = tieredKeySpace(sft).orNull.asInstanceOf[IndexKeySpace[Any, Any]]
val primary = filter.primary.orNull
val secondary = filter.secondary.orNull
// only evaluate the tiered ranges if needed - depending on the primary filter we might not use them
lazy val tiers = tier.getRangeBytes(tier.getRanges(tier.getIndexValues(sft, secondary, explain))).toSeq
if (tier == null || primary == null || secondary == null || tiers.isEmpty) {
// primary == null handles Filter.INCLUDE
super.getQueryPlan(sft, ds, filter, hints, explain)
} else {
val values = keySpace.getIndexValues(sft, primary, explain)
val keyRanges = keySpace.getRanges(values)
lazy val minTier = ByteRange.min(tiers)
lazy val maxTier = ByteRange.max(tiers)
val kuduSchema = tableSchema(sft)
val ranges = keyRanges.flatMap(toTieredRowRanges(sft, kuduSchema, _, tiers, minTier, maxTier))
if (ranges.isEmpty) { EmptyPlan(filter) } else {
val schema = KuduSimpleFeatureSchema(sft)
val fullFilter =
if (keySpace.useFullFilter(Some(values), Some(ds.config), hints)) { filter.filter } else { filter.secondary }
val auths = ds.config.authProvider.getAuthorizations.asScala.map(_.getBytes(StandardCharsets.UTF_8))
val KuduFilter(predicates, ecql) = fullFilter.map(schema.predicate).getOrElse(KuduFilter(Seq.empty, None))
val adapter = KuduResultAdapter(sft, auths, ecql, hints)
val table = getTableName(sft.getTypeName, ds)
ScanPlan(filter, table, ranges.toSeq, predicates, ecql, adapter, ds.config.queryThreads)
}
}
}
private def createInsert(sft: SimpleFeatureType,
table: KuduTable,
schema: KuduSimpleFeatureSchema,
splitters: Map[String, String],
toIndexKey: SimpleFeature => Seq[U],
toTieredIndexKey: SimpleFeature => Seq[Array[Byte]])
(kf: KuduFeature): Seq[WriteOperation] = {
val featureValues = schema.serialize(kf.feature)
val vis = SecurityUtils.getVisibility(kf.feature)
val partitioning = () => createPartition(sft, table, splitters, kf.bin)
createKeyValues(toIndexKey, toTieredIndexKey)(kf).map { key =>
val upsert = table.newUpsert()
val row = upsert.getRow
key.foreach(_.writeToRow(row))
featureValues.foreach(_.writeToRow(row))
VisibilityAdapter.writeToRow(row, vis)
WriteOperation(upsert, s"$identifier.${kf.bin}", partitioning)
}
}
private def createDelete(table: KuduTable,
toIndexKey: SimpleFeature => Seq[U],
toTieredIndexKey: SimpleFeature => Seq[Array[Byte]])
(kf: KuduFeature): Seq[WriteOperation] = {
createKeyValues(toIndexKey, toTieredIndexKey)(kf).map { key =>
val delete = table.newDelete()
val row = delete.getRow
key.foreach(_.writeToRow(row))
WriteOperation(delete, "", null)
}
}
private def createTieredKey(tieredBytes: Option[ToIndexKeyBytes])(feature: SimpleFeature): Seq[Array[Byte]] =
tieredBytes.map(_.apply(Seq.empty, feature, Array.empty)).getOrElse(Seq(Array.empty))
}
| ddseapy/geomesa | geomesa-kudu/geomesa-kudu-datastore/src/main/scala/org/locationtech/geomesa/kudu/index/KuduTieredFeatureIndex.scala | Scala | apache-2.0 | 6,868 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.platform.types
import cogx.cogmath.hypercircuit.{Hyperedge, Hypernode}
import cogx.compiler.codegenerator.opencl.cpukernels._
import cogx.compiler.parser.op.{FieldReduceMedianOp, OuterProductOp}
import cogx.platform.checkpoint.{ObjectRestorer, RestoreFactory, Saveable, ObjectSaver}
import cogx.platform.opencl.OpenCLRestoredDeviceKernel
import cogx.platform.types.KernelCodeTypes.KernelCodeType
import cogx.platform.types.KernelTypes._
import cogx.runtime.{ComputeGraphSaverState, ComputeGraphRestorerState}
import scala.collection.mutable.ArrayBuffer
/** Base class for all kernels, CPU or Device, for all platforms (CUDA or
* OpenCL)
*
* This manages connectivity between kernels, allowing kernels to be connected
* and disconnected. The responsibility for adding output edges is left to
* the subclasses.
*
* @param opcode The operation performed by this kernel.
* @param _inputs The virtual field registers driving the inputs of this kernel.
* @param fieldTypes The types of the results generated by this kernel.
*
* @author Greg Snider
*/
private[cogx]
abstract class AbstractKernel(val opcode: Opcode,
_inputs: Array[VirtualFieldRegister],
val fieldTypes: Array[FieldType])
extends Hypernode[AbstractKernel](_inputs.asInstanceOf[Array[Hyperedge[AbstractKernel]]])
with Saveable
{
/** The language of the kernel. This should reside in a platform-independent
* subclass of AbstractKernel named 'DeviceKernel' and there should be no
* platform-dependent CPU kernels (e.g. OpenCLCpuKernel), but such is life.
*/
val kernelCodeType: KernelCodeType
/** The type of the kernel, either DeviceKernel, or one of a number of CPU kernel types. */
val kernelType: KernelType
/** Unique ID for each Kernel. */
val id = AbstractKernel.allocateID
/* "Alias" is probably not the right word to use here. This is a quick fix
* for a debugger issue that arises when we distribute a graph across
* multiple nodes. During partioning of the user's compute graph, we copy
* kernels from the unpartioned graph to subcircuits (as the original
* circuit's structure is immutable and can't be broken apart). The
* subcircuits are what is bound to compute devices, but the debugger gets
* the original, unpartitioned graph. The kernels the debugger knows about
* aren't actually bound to a compute device. It needs a mapping from the
* kernels in the original circuit to their copies that have been distributed
* throughout the cluster. That's what these "aliases" are for. If we copy
* a kernel to a subcircuit, we add the copy's ID here. Realistically, we'd
* expect that there's only ever a single copy (a single kernel probably
* shouldn't be distributed to multiple nodes, except perhaps for some sort
* of failure recovery mechanism?), but as there's nothing currently
* preventing a kernel from being copied multiple times, I've set this up
* for allowing multiple aliases. */
private var _aliases = collection.mutable.Set.empty[Int]
def aliases: Option[Set[Int]] = if (_aliases.size == 0) None else Some(_aliases.toSet)
def addAlias(id: Int) { _aliases += id }
/** The inputs to this kernel, as VirtualFieldRegisters, not as the base class
* Hyperedge[AbstractKernel]
*/
override def inputs = super.inputs.asInstanceOf[Seq[VirtualFieldRegister]]
/** The outputs to this kernel, as VirtualFieldRegisters, not as the base class
* Hyperedge[AbstractKernel]
*/
override def outputs = super.outputs.asInstanceOf[Seq[VirtualFieldRegister]]
/** Check if any output register of the kernel is "probed".
*
* A probed field register cannot be optimized away and is guaranteed to be
* visible.
*
* @return True if any output register of the kernel is probed.
*/
def probed = outputs.exists(_.probed)
/** Mark all of the kernel outputs as probed. Once marked, it cannot be unprobed. */
def markProbed() {
outputs.foreach(_.markProbed)
}
/** Mark a single kernel output as probed. Once marked, it cannot be unprobed. */
def markProbed(outputIndex: Int) {
outputs(outputIndex).markProbed
}
/** Get the name of the kernel (usually the name of its single output) */
def name = outputs.map(_.name).mkString("|")
/** Synonym for the input kernels to this kernel. */
def children = inputs.map(_.source)
/** Test "this" and "other" for deep equality, allowing "==" to work.
*
* Two AbstractKernels are equal if: (1) they have the same opcode; and
* (2) they have the same input field registers.
*
* @return Tr
*/
final override def equals(other: Any): Boolean = {
other match {
case that: AbstractKernel =>
if (that eq this)
true
else if ((that canEqual this) && (that.opcode == this.opcode) &&
(that.inputs.length == this.inputs.length) &&
(that.outputs.length == this.outputs.length) &&
(this.outputs zip that.outputs).forall(x => x._1.fieldType == x._2.fieldType))
{
// Because of the use of 'eq' here, common sub expressions will be
// consolidated from the leaves toward the roots.
// Equal kernels must be sourced from the same VirtualFieldRegisters.
for (i <- 0 until inputs.length) {
if (!(this.inputs(i) eq that.inputs(i)))
return false
}
true
}
else
false
case _ => false
}
}
/** Helper for equals. Default canEqual requires that the two objects are the
* same subclass of AbstractKernel. Subclasses may override this if a more
* restrictive equality is required.
*/
def canEqual(other: Any): Boolean = {
other.isInstanceOf[AbstractKernel] &&
other.asInstanceOf[AbstractKernel].getClass == this.getClass
}
/** Required because of overriding equals. This can be tricky. Two objects
* that might be 'equal' should have the same hashcode. A kernel that has been
* entered into a HashMap, then has an input replaced with an equal kernel
* should not have its hash changed. However the field register would change
* during such a replacement. Thus, the kernel 'hashCode'
* shouldn't involve the intervening field registers. */
override val hashCode: Int = {
var code = getClass.hashCode
for (in <- inputs) {
if (in != null && in.source != null)
code += in.source.hashCode
}
def fieldTypeHash =
fieldTypes.zipWithIndex.map(x => x._1.hashCode * x._2).foldLeft(0)(_ + _)
code += opcode.hashCode + 41 * (inputs.length + 41 * (fieldTypeHash + 41))
code
}
/** Create a clone of this kernel that uses a new set of virtual field registers
* as inputs. Useful for breaking a large circuit apart into smaller subcircuits. */
def copyWithNewInputs(inputs: Array[VirtualFieldRegister]): AbstractKernel
/** Save this AbstractKernel instance using the facilities of the ObjectSaver */
def save(saver: ObjectSaver) {
val saverState = saver.asInstanceOf[ComputeGraphSaverState]
import saverState._
// A map from VirtualFieldRegister to its index, created as the kernels are saved.
// Inputs of this kernel should be part of the map already, so simply look up their indices.
val inputIndices = inputs.map(vfrToIndex(_)).toArray
// Outputs of this kernel should be part of the map already, so simply look up their indices.
val outputIndices = outputs.map(vfrToIndex(_)).toArray
saver.writeInt("kernelId", id)
saver.writeString("kernelType", kernelType.toString)
kernelType match {
case UnrestorableKernelType =>
println("Error: The following kernel is not restorable: " + this.toString)
case _ =>
if (!kernelType.supportedByNativeRuntime)
println("Warning: The following kernel is not restorable within the native runtime: " + this.toString)
}
saver.writeIntArray("inputFields", inputIndices)
saver.writeIntArray("outputFields", outputIndices)
}
}
/** Companion for AbstractKernel that generates unique IDs.
*/
private[cogx]
object AbstractKernel extends RestoreFactory {
/** Number of allocated AbstractKernels. */
private var allocated = 0
/** Allocate a unique ID for an AbstractKernel. */
private def allocateID: Int = {
allocated += 1
allocated
}
/** Create an AbstractKernel instance through use of the provided ObjectRestorer
*
* @param restorer The restorer through which to read the new object state.
* @return The created AbstractKernel based on the read information.
*/
def restore(restorer: ObjectRestorer) = {
val restorerState = restorer.asInstanceOf[ComputeGraphRestorerState]
import restorerState._
val kernelId = restorer.readInt("kernelId")
val kernelType = restorer.readString("kernelType")
val inputIndices = restorer.readIntArray("inputFields")
val outputIndices = restorer.readIntArray("outputFields")
// inputs should already have been encountered and added to the global 'vfrs' collection:
val inputRegisters = inputIndices.map(vfrs(_))
val outputFieldTypes = outputIndices.map(fieldInfos(_).fieldType)
// Check output indices to make sure the vfrs collection is in sync:
for (i <- 0 until outputIndices.length) {
// println(s"i = $i, outputIndices = ${outputIndices(i)}, vfrs.length = ${vfrs.length}")
require(outputIndices(i) == vfrs.length + i,
"Internal error: inconsistency in output VirtualFieldRegister count.")
}
// Trust for now that kernelIds are unique. Set up the allocated variable to give the next created
// kernel the proper Id. Note that kernels won't be numbered densely and consecutively because of kernel merging.
allocated = kernelId - 1
val newKernel = KernelTypes(kernelType) match {
case DeviceKernelType =>
OpenCLRestoredDeviceKernel.restore(restorer, kernelId, inputRegisters, outputFieldTypes)
case RecurrenceKernelType =>
require(inputRegisters.length == 0)
require(outputFieldTypes.length == 1)
RecurrentFieldKernel.restore(restorer, outputFieldTypes(0))
case ConstantKernelType =>
require(inputRegisters.length == 0)
require(outputFieldTypes.length == 1)
ConstantFieldKernel.restore(restorer, outputFieldTypes(0))
case SensorKernelType =>
require(inputRegisters.length == 0)
require(outputFieldTypes.length == 1)
SensorKernelRestorer.restore(restorer, outputFieldTypes(0))
case ActuatorKernelType =>
require(inputRegisters.length == 1)
require(outputFieldTypes.length == 0)
ActuatorKernelRestorer.restore(restorer, inputRegisters(0))
case CPUOuterProductKernelType =>
CPUOuterProductKernel(inputRegisters, OuterProductOp, outputFieldTypes(0))
case CPUScalarReduceMedianKernelType =>
CPUScalarReduceMedianKernel(inputRegisters(0), FieldReduceMedianOp, outputFieldTypes(0))
case x => throw new RuntimeException(s"Can't currently restore kernel $kernelId, kernel type $x")
}
newKernel.outputs.foreach(vfrs += _)
// Transfer the saved vfr info (field name and whether it's probed or not) to the restored kernel's outputs
for (i <- 0 until newKernel.outputs.length) {
val outputInfo = fieldInfos(outputIndices(i))
if (outputInfo.probed)
newKernel.outputs(i).markProbed()
newKernel.outputs(i).name = outputInfo.name
}
newKernel
}
}
| hpe-cct/cct-core | src/main/scala/cogx/platform/types/AbstractKernel.scala | Scala | apache-2.0 | 12,234 |
package scalaprops
import scalaz._
import scalaz.std.anyVal._
import ScalapropsScalaz._
object EitherTTest extends Scalaprops {
val iListBindRec =
scalazlaws.bindRec.laws[({ type l[a] = EitherT[Byte, IList, a] })#l].andThenParam(Param.maxSize(1))
val maybe = {
type F[A] = EitherT[Int, Maybe, A]
Properties.list(
scalazlaws.equal.all[EitherT[Int, Maybe, Int]],
scalazlaws.monadPlus.all[F],
scalazlaws.bindRec.all[F],
scalazlaws.monadError.all[F, Int],
scalazlaws.traverse.all[F]
)
}
val maybe2 =
scalazlaws.bitraverse.all[({ type l[a, b] = EitherT[a, Maybe, b] })#l]
val iList = {
type F[A] = EitherT[Int, IList, A]
Properties.list(
scalazlaws.equal.all[EitherT[Int, IList, Int]],
scalazlaws.monadPlus.all[F],
scalazlaws.monadError.all[F, Int],
scalazlaws.traverse.all[F]
)
}
val nel = {
type F[A] = EitherT[Int, NonEmptyList, A]
Properties.list(
scalazlaws.equal.all[EitherT[Int, NonEmptyList, Int]],
scalazlaws.monadPlus.all[F],
scalazlaws.monadError.all[F, Int],
scalazlaws.traverse.all[F]
)
}
val monadTrans = scalazlaws.monadTrans.all[({ type l[f[_], a] = EitherT[Int, f, a] })#l]
}
| scalaprops/scalaprops | scalaz/src/test/scala/scalaprops/EitherTTest.scala | Scala | mit | 1,235 |
package org.scaladebugger.api.profiles.java.info.events
import com.sun.jdi.event._
import com.sun.jdi.{Location, ReferenceType, ThreadReference, VirtualMachine}
import org.scaladebugger.api.lowlevel.JDIArgument
import org.scaladebugger.api.lowlevel.events.JDIEventArgument
import org.scaladebugger.api.lowlevel.requests.JDIRequestArgument
import org.scaladebugger.api.profiles.traits.info.events._
import org.scaladebugger.api.profiles.traits.info.{InfoProducer, LocationInfo, ThreadInfo}
import org.scaladebugger.api.virtualmachines.ScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
class JavaLocatableEventInfoSpec extends ParallelMockFunSpec {
private val mockScalaVirtualMachine = mock[ScalaVirtualMachine]
private val mockInfoProducer = mock[InfoProducer]
private val mockLocatableEvent = mock[LocatableEvent]
private val mockJdiRequestArguments = Seq(mock[JDIRequestArgument])
private val mockJdiEventArguments = Seq(mock[JDIEventArgument])
private val mockJdiArguments =
mockJdiRequestArguments ++ mockJdiEventArguments
private val mockVirtualMachine = mock[VirtualMachine]
private val mockThreadReference = mock[ThreadReference]
private val mockThreadReferenceType = mock[ReferenceType]
private val mockLocation = mock[Location]
private val javaLocatableEventInfoProfile = new JavaLocatableEventInfo(
scalaVirtualMachine = mockScalaVirtualMachine,
infoProducer = mockInfoProducer,
locatableEvent = mockLocatableEvent,
jdiArguments = mockJdiArguments
)(
_virtualMachine = mockVirtualMachine,
_thread = mockThreadReference,
_threadReferenceType = mockThreadReferenceType,
_location = mockLocation
)
describe("JavaLocatableEventInfo") {
describe("#toJavaInfo") {
it("should return a new instance of the Java profile representation") {
val expected = mock[LocatableEventInfo]
// Event info producer will be generated in its Java form
val mockEventInfoProducer = mock[EventInfoProducer]
(mockInfoProducer.eventProducer _).expects()
.returning(mockEventInfoProducer).once()
(mockEventInfoProducer.toJavaInfo _).expects()
.returning(mockEventInfoProducer).once()
// Java version of event info producer creates a new event instance
// NOTE: Cannot validate second set of args because they are
// call-by-name, which ScalaMock does not support presently
(mockEventInfoProducer.newLocatableEventInfo(
_: ScalaVirtualMachine,
_: LocatableEvent,
_: Seq[JDIArgument]
)(
_: VirtualMachine,
_: ThreadReference,
_: ReferenceType,
_: Location
)).expects(
mockScalaVirtualMachine,
mockLocatableEvent,
mockJdiArguments,
*, *, *, *
).returning(expected).once()
val actual = javaLocatableEventInfoProfile.toJavaInfo
actual should be (expected)
}
}
describe("#isJavaInfo") {
it("should return true") {
val expected = true
val actual = javaLocatableEventInfoProfile.isJavaInfo
actual should be (expected)
}
}
describe("#toJdiInstance") {
it("should return the JDI instance this profile instance represents") {
val expected = mockLocatableEvent
val actual = javaLocatableEventInfoProfile.toJdiInstance
actual should be (expected)
}
}
describe("#thread") {
it("should return a new instance of the thread info profile") {
val expected = mock[ThreadInfo]
// NOTE: Cannot validate second set of args because they are
// call-by-name, which ScalaMock does not support presently
(mockInfoProducer.newThreadInfo(
_: ScalaVirtualMachine,
_: ThreadReference
)(
_: VirtualMachine,
_: ReferenceType
)).expects(
mockScalaVirtualMachine,
mockThreadReference,
*, *
).returning(expected).once()
val actual = javaLocatableEventInfoProfile.thread
actual should be (expected)
}
}
describe("#location") {
it("should return a new instance of the location info profile") {
val expected = mock[LocationInfo]
(mockInfoProducer.newLocationInfo _)
.expects(mockScalaVirtualMachine, mockLocation)
.returning(expected).once()
val actual = javaLocatableEventInfoProfile.location
actual should be (expected)
}
}
describe("#toString") {
it("should return the string representation of the JDI event object") {
val expected = mockLocatableEvent.toString // NOTE: Cannot mock toString method
val actual = javaLocatableEventInfoProfile.toString
actual should be (expected)
}
}
}
}
| ensime/scala-debugger | scala-debugger-api/src/test/scala/org/scaladebugger/api/profiles/java/info/events/JavaLocatableEventInfoSpec.scala | Scala | apache-2.0 | 4,892 |
package controllers
import controllers.AuthController.{UserLoginData, UserSignupData}
import org.postgresql.util.PSQLException
import play.api.libs.functional.syntax._
import play.api.libs.json._
import play.api.mvc._
import services.{AuthService, DatabaseService}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}
class AuthController(cc: ControllerComponents, databaseService: DatabaseService, authService: AuthService, userAuthAction: UserAuthAction)(implicit ec: ExecutionContext)
extends AbstractController(cc) {
def login: Action[AnyContent] = Action.async { implicit request =>
request.body.asJson match {
case Some(json) => json.validate[UserLoginData].fold(
errors => Future.successful(BadRequest(Json.obj("error" -> "Expected username and password"))),
userLoginData => {
for {
cookieOptionTry <- authService.login(userLoginData.email, userLoginData.password)
} yield {
cookieOptionTry match {
case Success(cookieOption) => cookieOption match {
case Some(cookie) => Ok(Json.obj("message" -> "Log in successful")).withCookies(cookie)
case None => Unauthorized(Json.obj("error" -> "Invalid login credentials provided"))
}
case Failure(_) => InternalServerError(Json.obj("error" -> "Unexpected internal error occurred"))
}
}
}
)
case None => Future.successful(BadRequest(Json.obj("error" -> "Expected JSON body")))
}
}
def signup: Action[AnyContent] = Action.async { implicit request =>
request.body.asJson match {
case Some(json) => json.validate[UserSignupData].fold(
errors => Future.successful(BadRequest(Json.obj("error" -> "Invalid email, username or password"))),
userSignupData =>
for {
result <- authService.signup(userSignupData.email, userSignupData.username, userSignupData.password)
} yield result match {
case Success(_) => Ok(Json.obj("message" -> "Signup successful"))
case Failure(e: PSQLException) => Conflict(Json.obj("error" -> e.getServerErrorMessage.getConstraint))
case Failure(_) => InternalServerError(Json.obj("error" -> "Unexpected internal error occurred"))
}
)
case None => Future.successful(BadRequest(Json.obj("error" -> "Expected JSON body")))
}
}
def logout = userAuthAction { implicit request =>
Ok(Json.obj("message" -> "Successfully logged out")).discardingCookies(DiscardingCookie(authService.cookieHeader))
}
def getUser = userAuthAction { implicit request =>
Ok(Json.toJsObject(request.user) - "hashedPassword" - "salt")
}
}
object AuthController {
case class UserLoginData(email: String, password: String)
case class UserSignupData(email: String, username: String, password: String)
private def emailRegex =
"^(([^<>()\\[\\]\\\\.,;:\\s@\"]+(\\.[^<>()\\[\\]\\\\.,;:\\s@\"]+)*)|(\".+\"))@((\\[[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}])|(([a-zA-Z\\-0-9]+\\.)+[a-zA-Z]{2,}))$".r
implicit val userLoginDataReads: Reads[UserLoginData] = Json.reads[UserLoginData]
implicit val userSignupDataReads: Reads[UserSignupData] = (
(JsPath \ "email").read[String].filter(JsonValidationError("Invalid email address"))(emailRegex.findFirstIn(_).isDefined) and
(JsPath \ "username").read[String].filter(JsonValidationError("Invalid username"))(_.length > 0) and
(JsPath \ "password").read[String].filter(JsonValidationError("Invalid password"))(_.length > 0)
)(UserSignupData.apply _)
}
| stuart-xyz/rate-my-area | app/controllers/AuthController.scala | Scala | mit | 3,640 |
/*
* @author Philip Stutz
*
* Copyright 2014 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.triplerush.sparql
import com.signalcollect.triplerush.TriplePattern
object VariableEncoding {
@inline def variableIdToDecodingIndex(variableId: Int) = -(variableId + 1)
@inline def requiredVariableBindingsSlots(query: Seq[TriplePattern]): Int = {
var minId = 0
query.foreach {
tp =>
minId = math.min(minId, tp.s)
minId = math.min(minId, tp.p)
minId = math.min(minId, tp.o)
}
-minId
}
}
| hicolour/triplerush | src/main/scala/com/signalcollect/triplerush/sparql/VariableEncoding.scala | Scala | apache-2.0 | 1,118 |
package com.pragmasoft.eventaggregator.streams.esrestwriter
import java.util
import java.util.concurrent.atomic.AtomicInteger
import akka.actor.ActorSystem
import akka.testkit.{TestKit, TestProbe}
import com.pragmasoft.eventaggregator.GenericRecordEventJsonConverter.EventHeaderDescriptor
import com.pragmasoft.eventaggregator.model.{EventKafkaLocation, KafkaAvroEvent}
import com.pragmasoft.eventaggregator.support.SpecificRecordEventFixture
import com.typesafe.scalalogging.LazyLogging
import io.searchbox.action.Action
import io.searchbox.client.{JestClient, JestClientFactory, JestResult, JestResultHandler}
import io.searchbox.core.DocumentResult
import org.mockito.Mockito.when
import org.scalatest.concurrent.Eventually
import org.scalatest.mockito.MockitoSugar
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.{Matchers, WordSpecLike}
import scala.concurrent.Future
class EsRestWriterActorSpec extends TestKit(ActorSystem("EsRestWriterActorSpec"))
with WordSpecLike with MockitoSugar with Matchers with LazyLogging with SpecificRecordEventFixture with Eventually {
override implicit val patienceConfig: PatienceConfig = PatienceConfig(Span(15, Seconds), Span(200, Milliseconds))
"EsRestWriterActor" should {
"Write into ES using the created JEST Client when receiving a Write command" in {
val jestClientFactory = mock[JestClientFactory]
val jestClientCallCount = new AtomicInteger(0)
val stubJestClient = new JestClient {
override def shutdownClient(): Unit = ???
override def execute[T <: JestResult](clientRequest: Action[T]): T = ???
override def setServers(servers: util.Set[String]): Unit = ???
override def executeAsync[T <: JestResult](clientRequest: Action[T], jestResultHandler: JestResultHandler[_ >: T]): Unit = {
jestClientCallCount.incrementAndGet()
}
}
when(jestClientFactory.getObject).thenReturn(stubJestClient)
val writer = system.actorOf(EsRestWriterActor.props(jestClientFactory, () => "Index", EventHeaderDescriptor(Some("header/id"), Some("header/eventTs"))))
writer ! EsRestWriterActor.Write(KafkaAvroEvent(EventKafkaLocation("topic1", 1, 100), aProfileCreatedEvent))
eventually {
jestClientCallCount.get() shouldBe 1
}
}
"Notify the write has been successful with a WriteResult message to the sender" in {
val jestClientFactory = mock[JestClientFactory]
val jestClientCallCount = new AtomicInteger(0)
val stubJestClient = new JestClient {
override def shutdownClient(): Unit = ???
override def execute[T <: JestResult](clientRequest: Action[T]): T = ???
override def setServers(servers: util.Set[String]): Unit = ???
import scala.concurrent.ExecutionContext.Implicits.global
override def executeAsync[T <: JestResult](clientRequest: Action[T], jestResultHandler: JestResultHandler[_ >: T]): Unit = {
jestClientCallCount.incrementAndGet()
Future {
val successfulIndexResult = mock[DocumentResult]
when(successfulIndexResult.isSucceeded).thenReturn(true)
jestResultHandler.completed(successfulIndexResult.asInstanceOf[T])
}
}
}
when(jestClientFactory.getObject).thenReturn(stubJestClient)
val writer = system.actorOf(EsRestWriterActor.props(jestClientFactory, () => "Index", EventHeaderDescriptor(Some("header/id"), Some("header/eventTs"))))
val sender = TestProbe()
val event = KafkaAvroEvent(EventKafkaLocation("topic1", 1, 100), aProfileCreatedEvent)
sender.send(writer, EsRestWriterActor.Write(event))
sender.expectMsg(EsRestWriterActor.WriteResult(event, true))
}
"Notify the write failed with a WriteResult message to the sender" in {
val jestClientFactory = mock[JestClientFactory]
val jestClientCallCount = new AtomicInteger(0)
val stubJestClient = new JestClient {
override def shutdownClient(): Unit = ???
override def execute[T <: JestResult](clientRequest: Action[T]): T = ???
override def setServers(servers: util.Set[String]): Unit = ???
import scala.concurrent.ExecutionContext.Implicits.global
override def executeAsync[T <: JestResult](clientRequest: Action[T], jestResultHandler: JestResultHandler[_ >: T]): Unit = {
jestClientCallCount.incrementAndGet()
Future {
val successfulIndexResult = mock[DocumentResult]
when(successfulIndexResult.isSucceeded).thenReturn(false)
jestResultHandler.completed(successfulIndexResult.asInstanceOf[T])
}
}
}
when(jestClientFactory.getObject).thenReturn(stubJestClient)
val writer = system.actorOf(EsRestWriterActor.props(jestClientFactory, () => "Index", EventHeaderDescriptor(Some("header/id"), Some("header/eventTs"))))
val sender = TestProbe()
val event = KafkaAvroEvent(EventKafkaLocation("topic1", 1, 100), aProfileCreatedEvent)
sender.send(writer, EsRestWriterActor.Write(event))
sender.expectMsg(EsRestWriterActor.WriteResult(event, false))
}
"Notify the write failed with an exception with a WriteResult message to the sender" in {
val jestClientFactory = mock[JestClientFactory]
val jestClientCallCount = new AtomicInteger(0)
val stubJestClient = new JestClient {
override def shutdownClient(): Unit = ???
override def execute[T <: JestResult](clientRequest: Action[T]): T = ???
override def setServers(servers: util.Set[String]): Unit = ???
import scala.concurrent.ExecutionContext.Implicits.global
override def executeAsync[T <: JestResult](clientRequest: Action[T], jestResultHandler: JestResultHandler[_ >: T]): Unit = {
jestClientCallCount.incrementAndGet()
Future {
jestResultHandler.failed(new Exception("Simulating an exception while writing to ES"))
}
}
}
when(jestClientFactory.getObject).thenReturn(stubJestClient)
val writer = system.actorOf(EsRestWriterActor.props(jestClientFactory, () => "Index", EventHeaderDescriptor(Some("header/id"), Some("header/eventTs"))))
val sender = TestProbe()
val event = KafkaAvroEvent(EventKafkaLocation("topic1", 1, 100), aProfileCreatedEvent)
sender.send(writer, EsRestWriterActor.Write(event))
sender.expectMsg(EsRestWriterActor.WriteResult(event, false))
}
}
}
| galarragas/event-aggregator | src/test/scala/com/pragmasoft/eventaggregator/streams/esrestwriter/EsRestWriterActorSpec.scala | Scala | apache-2.0 | 6,522 |
package io.eels.component.parquet.avro
import io.eels.Predicate
import io.eels.component.parquet.{ParquetPredicateBuilder, ParquetReaderConfig}
import org.apache.avro.Schema
import org.apache.avro.generic.GenericRecord
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.parquet.avro.{AvroParquetReader, AvroReadSupport}
import org.apache.parquet.filter2.compat.FilterCompat
import org.apache.parquet.hadoop.ParquetReader
/**
* Helper function to create a parquet reader, using the apache parquet library.
* The reader supports optional predicate (for row filtering) and a
* projection schema (for column filtering).
*/
object AvroParquetReaderFn {
private val config = ParquetReaderConfig()
/**
* Creates a new reader for the given path.
*
* @param predicate if set then a parquet predicate is applied to the rows
* @param projectionSchema if set then the schema is used to narrow the fields returned
*/
def apply(path: Path,
predicate: Option[Predicate],
projectionSchema: Option[Schema]): ParquetReader[GenericRecord] = {
// The parquet reader can use a projection by setting a projected schema onto a conf object
def configuration(): Configuration = {
val conf = new Configuration()
projectionSchema.foreach { it =>
AvroReadSupport.setAvroReadSchema(conf, it)
AvroReadSupport.setRequestedProjection(conf, it)
}
//conf.set(ParquetInputFormat.DICTIONARY_FILTERING_ENABLED, "true")
conf.set(org.apache.parquet.hadoop.ParquetFileReader.PARQUET_READ_PARALLELISM, config.parallelism.toString)
conf
}
// a filter is set when we have a predicate for the read
def filter(): FilterCompat.Filter = predicate.map(ParquetPredicateBuilder.build)
.map(FilterCompat.get)
.getOrElse(FilterCompat.NOOP)
AvroParquetReader.builder[GenericRecord](path)
.withCompatibility(false)
.withConf(configuration())
.withFilter(filter())
.build()
.asInstanceOf[ParquetReader[GenericRecord]]
}
} | stheppi/eel | eel-components/src/main/scala/io/eels/component/parquet/avro/AvroParquetReaderFn.scala | Scala | apache-2.0 | 2,102 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.anyvals
import scala.collection.GenTraversable
import scala.collection.mutable.Buffer
import scala.collection.mutable.ListBuffer
import org.scalactic.{Every, One, Many, StringNormalizations}
import org.scalactic.UnitSpec
import org.scalactic.NormalizingEquality
import org.scalatest.CompatParColls.Converters._
class NonEmptyMapSpec extends UnitSpec {
"A NonEmptyMap" can "be constructed with one element" in {
val onesie = NonEmptyMap(3 -> "three")
onesie.size shouldBe 1
onesie(3) shouldBe "three"
}
it can "be constructed with many elements" in {
val twosie = NonEmptyMap(2 -> "two", 3 -> "three")
twosie.size shouldBe 2
twosie(2) shouldBe "two"
twosie(3) shouldBe "three"
val threesie = NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three")
threesie.size shouldBe 3
threesie(1) shouldBe "one"
threesie(2) shouldBe "two"
threesie(3) shouldBe "three"
}
it can "be constructed from a GenTraversable via the from method on NonEmptyMap singleton" in {
NonEmptyMap.from(Map.empty[Int, String]) shouldBe None
NonEmptyMap.from(Map(1 -> "one")) shouldBe Some(NonEmptyMap(1 -> "one"))
NonEmptyMap.from(Map(1 -> "one", 2 -> "two", 3 -> "three")) shouldBe Some(NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three"))
// SKIP-SCALATESTJS,NATIVE-START
NonEmptyMap.from(Map.empty[Int, String].par) shouldBe None
NonEmptyMap.from(Map(1 -> "one").par) shouldBe Some(NonEmptyMap(1 -> "one"))
NonEmptyMap.from(Map(1 -> "one", 2 -> "two", 3 -> "three").par) shouldBe Some(NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three"))
// SKIP-SCALATESTJS,NATIVE-END
}
it can "be deconstructed with NonEmptyMap" in {
NonEmptyMap(1 -> "one") match {
case NonEmptyMap((x, y)) =>
x shouldEqual 1
y shouldEqual "one"
case _ => fail()
}
NonEmptyMap("hi" -> "hello") match {
case NonEmptyMap((hi, hello)) =>
hi shouldEqual "hi"
hello shouldEqual "hello"
case _ => fail()
}
}
it can "be deconstructed with Many" in {
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") match {
case NonEmptyMap((x1, x2), (y1, y2), (z1, z2)) => ((x1, x2), (y1, y2), (z1, z2)) shouldEqual (2 -> "two", 3 -> "three", 1 -> "one")
case _ => fail()
}
NonEmptyMap("hi" -> "hello", "there" -> "here") match {
case NonEmptyMap((s1, s2), (t1, t2)) =>
(s1, s2) shouldEqual ("there", "here")
(t1, t2) shouldEqual ("hi", "hello")
case _ => fail()
}
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") match {
case NonEmptyMap((x1, x2), (y1, y2), _) =>
(x1, x2) shouldEqual (2, "two")
(y1, y2) shouldEqual (3, "three")
case _ => fail()
}
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five") match {
case NonEmptyMap((x1, x2), (y1, y2), _*) =>
(x1, x2) shouldEqual (5, "five")
(y1, y2) shouldEqual (1, "one")
case _ => fail()
}
}
it can "be deconstructed with Every" in {
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") match {
case NonEmptyMap((x1, x2), (y1, y2), (z1, z2)) =>
((x1, x2), (y1, y2), (z1, z2)) shouldEqual ((2, "two"), (3, "three"), (1, "one"))
case _ => fail()
}
NonEmptyMap("hi" -> "hello", "there" -> "here") match {
case NonEmptyMap((s1, s2), (t1, t2)) =>
((s1, s2), (t1, t2)) shouldEqual (("there", "here"), ("hi", "hello"))
case _ => fail()
}
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") match {
case NonEmptyMap(x, y, _) => (x, y) shouldEqual ((2, "two"), (3, "three"))
case _ => fail()
}
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five") match {
case NonEmptyMap((x1, x2), (y1, y2), _*) => ((x1, x2), (y1, y2)) shouldEqual ((5, "five"), (1, "one"))
case _ => fail()
}
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") match {
case NonEmptyMap((x1, x2), _*) => (x1, x2) shouldEqual (2, "two")
case _ => fail()
}
NonEmptyMap("hi" -> "hello") match {
case NonEmptyMap((hi, hello)) => (hi, hello) shouldEqual ("hi", "hello")
case _ => fail()
}
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") match {
case NonEmptyMap((x1, x2), (y1, y2), (z1, z2)) => ((x1, x2), (y1, y2), (z1, z2)) shouldEqual ((2, "two"), (3, "three"), (1, "one"))
case _ => fail()
}
NonEmptyMap("hi" -> "hello", "there" -> "here") match {
case NonEmptyMap((s1, s2), (t1, t2)) => ((s1, s2), (t1, t2)) shouldEqual (("there", "here"), ("hi", "hello"))
case _ => fail()
}
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") match {
case NonEmptyMap((x1, x2), (y1, y2), _) => ((x1, x2), (y1, y2)) shouldEqual ((2, "two"), (3, "three"))
case _ => fail()
}
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five") match {
case NonEmptyMap((x1, x2), (y1, y2), _*) => ((x1, x2), (y1, y2)) shouldEqual ((5, "five"), (1, "one"))
case _ => fail()
}
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") match {
case NonEmptyMap((x1, x2), _*) => (x1, x2) shouldEqual (2, "two")
case _ => fail()
}
}
it should "have an apply method" in {
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three")(1) shouldEqual "one"
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three")(2) shouldEqual "two"
NonEmptyMap("hi" -> "hello")("hi") shouldEqual "hello"
NonEmptyMap(7 -> "seven", 8 -> "eight", 9 -> "nine")(9) shouldEqual "nine"
the [NoSuchElementException] thrownBy {
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three")(0)
} should have message "key not found: 0"
}
it should "have a ++ method that takes another NonEmptyMap" in {
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ NonEmptyMap(4 -> "four") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ NonEmptyMap(4 -> "four", 5 -> "five") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ NonEmptyMap(4 -> "four", 5 -> "five", 6 -> "six") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five", 6 -> "six")
}
it should "have a ++ method that takes an Every" in {
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ One(4 -> "four") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ Every(4 -> "four") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ Every(4 -> "four", 5 -> "five", 6 -> "six") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five", 6 -> "six")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ One(4 -> "four") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ One(4 -> "four") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ Every(4 -> "four") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ Every(4 -> "four") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ One(4 -> "four") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
}
it should "have a ++ method that takes a GenTraversableOnce" in {
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ Map(4 -> "four") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ Vector(4 -> "four", 5 -> "five", 6 -> "six") shouldEqual NonEmptyMap(5 -> "five", 6 -> "six", 1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ GenTraversable(4 -> "four") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ Set(4 -> "four", 5 -> "five") shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") ++ Set(4 -> "four", 5 -> "five").iterator shouldEqual NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five")
}
it should "have a +: method" in {
(0, "zero") +: NonEmptyMap(1 -> "one") shouldBe NonEmptyMap(0 -> "zero", 1 -> "one")
(0, "zero") +: NonEmptyMap(1 -> "one", 2 -> "two") shouldBe NonEmptyMap(0 -> "zero", 1 -> "one", 2 -> "two")
("zero", 0) +: NonEmptyMap("one" -> 1, "two" -> 2) shouldBe NonEmptyMap("zero" -> 0, "one" -> 1, "two" -> 2)
}
it should "implement PartialFunction[K, V]" in {
val pf1: PartialFunction[Int, String] = NonEmptyMap(1 -> "one")
pf1.isDefinedAt(1) shouldBe true
pf1.isDefinedAt(0) shouldBe false
}
it should "have 3 addString methods" in {
NonEmptyMap("hi" -> "hello").addString(new StringBuilder) shouldBe new StringBuilder("hi -> hello")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").addString(new StringBuilder) shouldBe new StringBuilder("2 -> two3 -> three1 -> one")
NonEmptyMap("hi" -> "hello").addString(new StringBuilder, "#") shouldBe new StringBuilder("hi -> hello")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").addString(new StringBuilder, "#") shouldBe new StringBuilder("2 -> two#3 -> three#1 -> one")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").addString(new StringBuilder, ", ") shouldBe new StringBuilder("2 -> two, 3 -> three, 1 -> one")
NonEmptyMap("hi" -> "hello").addString(new StringBuilder, "<", "#", ">") shouldBe new StringBuilder("<hi -> hello>")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").addString(new StringBuilder, "<", "#", ">") shouldBe new StringBuilder("<2 -> two#3 -> three#1 -> one>")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").addString(new StringBuilder, " ( ", ", ", " ) ") shouldBe new StringBuilder(" ( 2 -> two, 3 -> three, 1 -> one ) ")
}
it should "have an andThen method (inherited from PartialFunction)" in {
val pf1 = NonEmptyMap(1 -> "one") andThen (_ + 1)
pf1(1) shouldEqual "one1"
val pf2 = NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three") andThen (_ + 1)
pf2(1) shouldEqual "one1"
pf2(2) shouldEqual "two1"
pf2(3) shouldEqual "three1"
}
it should "have an applyOrElse method (inherited from PartialFunction)" in {
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").applyOrElse(0, (_: Int) + " not found") shouldEqual "0 not found"
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").applyOrElse(1, (_: Int) + " not found") shouldEqual "one"
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").applyOrElse(2, (_: Int) + " not found") shouldEqual "two"
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").applyOrElse(3, (_: Int) + " not found") shouldEqual "three"
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").applyOrElse(4, (_: Int) + " not found") shouldEqual "4 not found"
}
it should "have an canEqual method" is pending
// it should "have an charAt method" is pending
// Could have an implicit conversion from Every[Char] to CharSequence like
// there is for Seq in Predef.
/*
scala> Vector(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).collect { case i if i > 10 == 0 => i / 2 }
res1: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an collectFirst method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5", 6 -> "6", 7 -> "7", 8 -> "8", 9 -> "9", 10 -> "10") collectFirst { case (i, _) if i > 10 => i / 2 } shouldBe None
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5", 6 -> "6", 7 -> "7", 8 -> "8", 9 -> "9", 10 -> "10", 11 -> "11", 12 -> "12") collectFirst { case (i, _) if i > 10 => i / 2 } shouldBe Some(6)
}
/*
scala> Vector(1).combinations(2).toVector
res2: Vector[scala.collection.immutable.Vector[Int]] = Vector()
*/
/*
companion method not relevant. Has an empty and other GenTraverable stuff.
*/
it should "have an compose method, inherited from PartialFunction" in {
val fn: Int => String = NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").compose(i => i + 1)
fn(0) shouldBe "one"
fn(1) shouldBe "two"
fn(2) shouldBe "three"
}
it should "have a contains method" in {
val e = NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three")
e.contains(-1) shouldBe false
e.contains(0) shouldBe false
e.contains(1) shouldBe true
e.contains(2) shouldBe true
e.contains(3) shouldBe true
e.contains(4) shouldBe false
val es = NonEmptyMap("one" -> 1, "two" -> 2, "three" -> 3)
es.contains("one") shouldBe true
es.contains("ONE") shouldBe false
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6114
implicit val strEq = StringNormalizations.lowerCased.toEquality
//DOTTY-ONLY implicit val strEq: NormalizingEquality[String] = StringNormalizations.lowerCased.toEquality
es.contains("one") shouldBe true
es.contains("ONE") shouldBe false
// SKIP-DOTTY-END
}
it should "have 3 copyToArray methods" in {
val arr1 = Array.fill(5)(-1 -> "negative 1")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five").copyToArray(arr1)
arr1 shouldEqual Array(5 -> "five", 1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
val arr2 = Array.fill(5)(-1 -> "negative 1")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five").copyToArray(arr2, 1)
arr2 shouldEqual Array(-1 -> "negative 1", 5 -> "five", 1 -> "one", 2 -> "two", 3 -> "three")
val arr3 = Array.fill(5)(-1 -> "negative 1")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five").copyToArray(arr3, 1, 2)
arr3 shouldEqual Array(-1 -> "negative 1", 5 -> "five", 1 -> "one", -1 -> "negative 1", -1 -> "negative 1")
}
it should "have a copyToBuffer method" in {
val buf = ListBuffer.fill(3)(-1 -> "negative 1")
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five").copyToBuffer(buf)
buf shouldEqual Buffer(-1 -> "negative 1", -1 -> "negative 1", -1 -> "negative 1", 5 -> "five", 1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four")
}
it should "have a count method" in {
val nonEmptyMap = NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five")
nonEmptyMap.count(_._1 > 10) shouldBe 0
nonEmptyMap.count(_._1 % 2 == 0) shouldBe 2
nonEmptyMap.count(_._1 % 2 == 1) shouldBe 3
}
/*
it should not have a diff method
scala> Vector(1, 2, 3).diff(Vector(1, 2, 3))
res0: scala.collection.immutable.Vector[Int] = Vector()
*/
/*
it should not have an drop method
scala> Vector(1, 2, 3).drop(3)
res1: scala.collection.immutable.Vector[Int] = Vector()
it should not have an dropRight method
scala> Vector(1, 2, 3).dropRight(3)
res0: scala.collection.immutable.Vector[Int] = Vector()
it should not have an dropWhile method
scala> Vector(1, 2, 3).dropWhile(_ < 10)
res2: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an equals method" in {
NonEmptyMap(1 -> "one") shouldEqual NonEmptyMap(1 -> "one")
NonEmptyMap(1 -> "one") should not equal NonEmptyMap(2 -> "two")
NonEmptyMap(1 -> "one", 2 -> "two") should not equal NonEmptyMap(2 -> "two", 3 -> "three")
}
it should "have an exists method" in {
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").exists(_._1 == 2) shouldBe true
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").exists(_._1 == 5) shouldBe false
}
/*
it should not have a filter method
scala> Vector(1, 2, 3).filter(_ > 10)
res12: scala.collection.immutable.Vector[Int] = Vector()
it should not have a filterNot method
scala> Vector(1, 2, 3).filterNot(_ < 10)
res13: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have a find method" in {
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").find(_._1 == 5) shouldBe None
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three").find(_._1 == 2) shouldBe Some(2 -> "two")
}
it should "have a flatMap method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3") flatMap (i => NonEmptyMap(i._1 + 1 -> (i._1 + 1).toString)) shouldBe NonEmptyMap(2 -> "2", 3 -> "3", 4 -> "4")
val ss = NonEmptyMap("hi" -> "hihi", "ho" -> "hoho")
val is = NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three")
(for (s <- ss; i <- is) yield (s._1 + i._1, i._1)) shouldBe
NonEmptyMap(
("hi1",1), ("hi2",2), ("hi3",3), ("ho1",1), ("ho2",2), ("ho3",3)
)
NonEmptyMap(5 -> "five") flatMap (i => NonEmptyMap(i._1 + 3 -> i._2)) shouldBe NonEmptyMap(8 -> "five")
NonEmptyMap(8 -> "eight") flatMap (i => NonEmptyMap(i._1.toString -> i._2)) shouldBe NonEmptyMap("8" -> "eight")
}
/*
Can only flatten NonEmptyMaps
scala> Vector(Set.empty[Int], Set.empty[Int]).flatten
res17: scala.collection.immutable.Vector[Int] = Vector()
*/
// TODO: Actually it would make sense to flatten Everys too
it should "have a fold method" in {
NonEmptyMap(1 -> "1").fold(0 -> "0"){ case (t1, t2) => (t1._1 + t2._1, t1._2 + t2._2)} shouldBe (1, "01")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").fold(0 -> "0"){ case (t1, t2) => (t1._1 + t2._1, t1._2 + t2._2)} shouldBe (6, "0231")
}
it should "have a foldLeft method" in {
NonEmptyMap(1 -> "1").foldLeft(0)(_ + _._1) shouldBe 1
NonEmptyMap(1 -> "1").foldLeft(1)(_ + _._1) shouldBe 2
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").foldLeft(0)(_ + _._1) shouldBe 6
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").foldLeft(1)(_ + _._1) shouldBe 7
}
it should "have a foldRight method" in {
NonEmptyMap(1 -> "1").foldRight(0)(_._1 + _) shouldBe 1
NonEmptyMap(1 -> "1").foldRight(1)(_._1 + _) shouldBe 2
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").foldRight(0)(_._1 + _) shouldBe 6
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").foldRight(1)(_._1 + _) shouldBe 7
}
it should "have a forall method" in {
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five").forall(_._1 > 0) shouldBe true
NonEmptyMap(1 -> "one", 2 -> "two", 3 -> "three", 4 -> "four", 5 -> "five").forall(_._1 < 0) shouldBe false
}
it should "have a foreach method" in {
var num = 0
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3") foreach (num += _._1)
num shouldBe 6
for (i <- NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3"))
num += i._1
num shouldBe 12
NonEmptyMap(5 -> "5") foreach (num *= _._1)
num shouldBe 60
}
it should "have a groupBy method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").groupBy(_._1 % 2) shouldBe Map(1 -> NonEmptyMap(1 -> "1", 3 -> "3", 5 -> "5"), 0 -> NonEmptyMap(2 -> "2", 4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 3 -> "3", 3 -> "3").groupBy(_._1 % 2) shouldBe Map(1 -> NonEmptyMap(1 -> "1", 3 -> "3", 3 -> "3", 3 -> "3"), 0 -> NonEmptyMap(2 -> "2"))
NonEmptyMap(1 -> "1", 1 -> "1", 3 -> "3", 3 -> "3", 3 -> "3").groupBy(_._1 % 2) shouldBe Map(1 -> NonEmptyMap(1 -> "1", 1 -> "1", 3 -> "3", 3 -> "3", 3 -> "3"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 5 -> "5", 7 -> "7").groupBy(_._1 % 2) shouldBe Map(1 -> NonEmptyMap(1 -> "1", 3 -> "3", 5 -> "5", 7 -> "7"), 0 -> NonEmptyMap(2 -> "2"))
}
it should "have a grouped method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").grouped(2).toList shouldBe List(NonEmptyMap(2 -> "2", 3 -> "3"), NonEmptyMap(1 -> "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").grouped(1).toList shouldBe List(NonEmptyMap(2 -> "2"), NonEmptyMap(3 -> "3"), NonEmptyMap(1 -> "1"))
an [IllegalArgumentException] should be thrownBy { NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").grouped(0) }
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5", 6 -> "6", 7 -> "7", 8 -> "8", 9 -> "9", 10 -> "10").grouped(2).toList shouldBe List(NonEmptyMap(5 -> "5", 10 -> "10"), NonEmptyMap(1 -> "1", 6 -> "6"), NonEmptyMap(9 -> "9", 2 -> "2"), NonEmptyMap(7 -> "7", 3 -> "3"), NonEmptyMap(8 -> "8", 4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5", 6 -> "6", 7 -> "7", 8 -> "8", 9 -> "9", 10 -> "10").grouped(3).toList shouldBe List(NonEmptyMap(5 -> "5", 10 -> "10", 1 -> "1"), NonEmptyMap(6 -> "6", 9 -> "9", 2 -> "2"), NonEmptyMap(7 -> "7", 3 -> "3", 8 -> "8"), NonEmptyMap(4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5", 6 -> "6", 7 -> "7", 8 -> "8", 9 -> "9", 10 -> "10").grouped(4).toList shouldBe List(NonEmptyMap(5 -> "5", 10 -> "10", 1 -> "1", 6 -> "6"), NonEmptyMap(9 -> "9", 2 -> "2", 7 -> "7", 3 -> "3"), NonEmptyMap(8 -> "8", 4 -> "4"))
NonEmptyMap(1 -> "1").grouped(2).toList shouldBe List(NonEmptyMap(1 -> "1"))
NonEmptyMap(1 -> "1").grouped(1).toList shouldBe List(NonEmptyMap(1 -> "1"))
}
it should "have a hasDefiniteSize method" in {
NonEmptyMap(1 -> "1").hasDefiniteSize shouldBe true
NonEmptyMap(1 -> "1", 2 -> "2").hasDefiniteSize shouldBe true
}
it should "have a hashCode method" in {
NonEmptyMap(1 -> "1").hashCode shouldEqual NonEmptyMap(1 -> "1").hashCode
NonEmptyMap(1 -> "1", 2 -> "2").hashCode shouldEqual NonEmptyMap(1 -> "1", 2 -> "2").hashCode
}
it should "have a head method" in {
NonEmptyMap("hi" -> "ho").head shouldBe ("hi", "ho")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").head shouldBe (2, "2")
}
it should "have a headOption method" in {
NonEmptyMap("hi" -> "ho").headOption shouldBe Some(("hi", "ho"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").headOption shouldBe Some((2, "2"))
}
/*
it should not have an init method
scala> Vector(1).init
res30: scala.collection.immutable.Vector[Int] = Vector()
it should "have an inits method" is pending
scala> Vector(1).inits.toMap
res32: Map[scala.collection.immutable.Vector[Int]] = Map(Vector(1), Vector())
it should "have an intersect method" is pending
scala> Vector(1, 2, 3) intersect Vector(4, 5)
res33: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an isDefinedAt method, inherited from PartialFunction" in {
NonEmptyMap(1 -> "1").isDefinedAt(0) shouldBe false
NonEmptyMap(1 -> "1").isDefinedAt(1) shouldBe true
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").isDefinedAt(1) shouldBe true
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").isDefinedAt(2) shouldBe true
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").isDefinedAt(3) shouldBe true
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").isDefinedAt(0) shouldBe false
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").isDefinedAt(-1) shouldBe false
}
it should "have an isEmpty method" in {
NonEmptyMap("hi" -> "ho").isEmpty shouldBe false
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").isEmpty shouldBe false
}
it should "have an isTraversableAgain method" in {
NonEmptyMap("hi" -> "ho").isTraversableAgain shouldBe true
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").isTraversableAgain shouldBe true
}
it should "have an iterator method" in {
NonEmptyMap("hi" -> "ho").iterator.toMap shouldBe Map("hi" -> "ho")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").iterator.toMap shouldBe Map(1 -> "1", 2 -> "2", 3 -> "3")
}
it should "have a last method" in {
NonEmptyMap("hi" -> "ho").last shouldBe ("hi", "ho")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").last shouldBe (1, "1")
}
it should "have an lastOption method" in {
NonEmptyMap("hi" -> "ho").lastOption shouldBe Some("hi" -> "ho")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").lastOption shouldBe Some((1 -> "1"))
}
it should "have an inherited lift method" in {
val liftedOne = NonEmptyMap("hi" -> "ho").lift
liftedOne("hi") shouldBe Some("ho")
liftedOne("other") shouldBe None
liftedOne("hello") shouldBe None
val liftedMany = NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").lift
liftedMany(1) shouldBe Some("1")
liftedMany(2) shouldBe Some("2")
liftedMany(3) shouldBe Some("3")
liftedMany(0) shouldBe None
liftedMany(-1) shouldBe None
}
it should "have a map method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3") map (e => (e._1 + 1, e._2)) shouldBe NonEmptyMap(3 -> "2", 4 -> "3", 2 -> "1")
(for (ele <- NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3")) yield ((ele._1 * 2) -> ele._2)) shouldBe NonEmptyMap(2 -> "1", 4 -> "2", 6 -> "3")
NonEmptyMap(5 -> "5") map (e => (e._1 + 1, e._2)) shouldBe NonEmptyMap(6 -> "5")
NonEmptyMap(8 -> "8") map (e => (e._1.toString, e._2)) shouldBe NonEmptyMap("8" -> "8")
}
it should "have a max method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").max shouldBe (5, "5")
NonEmptyMap(1 -> "1").max shouldBe (1, "1")
NonEmptyMap(-1 -> "-1").max shouldBe (-1, "-1")
NonEmptyMap("aaa" -> "AAA", "ccc" -> "CCC", "bbb" -> "BBB").max shouldBe ("ccc", "CCC")
}
it should "have a maxBy method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").maxBy(_._1.abs) shouldBe (5, "5")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", -5 -> "-5").maxBy(_._1.abs) shouldBe (-5, "-5")
}
it should "have a min method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").min shouldBe (1, "1")
NonEmptyMap(1 -> "1").min shouldBe (1, "1")
NonEmptyMap(-1 -> "-1").min shouldBe (-1, "-1")
NonEmptyMap("aaa" -> "AAA", "ccc" -> "CCC", "bbb" -> "BBB").min shouldBe ("aaa", "AAA")
}
it should "have a minBy method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").minBy(_._1.abs) shouldBe (1 -> "1")
NonEmptyMap(-1 -> "-1", -2 -> "-2", 3 -> "-3", 4 -> "4", 5 -> "5").minBy(_._1.abs) shouldBe (-1 -> "-1")
}
it should "have a mkString method" in {
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6705
NonEmptyMap("hi" -> "ho").mkString shouldBe "hi -> ho"
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").mkString shouldBe "2 -> 23 -> 31 -> 1"
// SKIP-DOTTY-END
NonEmptyMap("hi" -> "ho").mkString("#") shouldBe "hi -> ho"
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").mkString("#") shouldBe "2 -> 2#3 -> 3#1 -> 1"
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").mkString(", ") shouldBe "2 -> 2, 3 -> 3, 1 -> 1"
NonEmptyMap("hi" -> "ho").mkString("<", "#", ">") shouldBe "<hi -> ho>"
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").mkString("<", "#", ">") shouldBe "<2 -> 2#3 -> 3#1 -> 1>"
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").mkString(" ( ", ", ", " ) ") shouldBe " ( 2 -> 2, 3 -> 3, 1 -> 1 ) "
}
it should "have an nonEmpty method" in {
NonEmptyMap("hi" -> "ho").nonEmpty shouldBe true
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").nonEmpty shouldBe true
}
it should "have an orElse method, inherited from PartialFunction" in {
val pf: PartialFunction[Int, Int] = { case i => -i }
val f = NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3") orElse pf
f(0) shouldBe 0
f(1) shouldBe "1"
f(2) shouldBe "2"
f(3) shouldBe "3"
f(-1) shouldBe 1
}
// it should not have a par method, because I don't want to support that. If the user
// needs a parallel collection, they can use a parallel collection: nonEmptyMap.toVector.par...
/*
it should not have an partition method
scala> Vector(1, 2, 3, 4, 5).partition(_ > 10)
res10: (scala.collection.immutable.Vector[Int], scala.collection.immutable.Vector[Int]) = (Vector(),Vector(1, 2, 3, 4, 5))
*/
it should "have a reduce method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").reduce { (e1, e2) => (e1._1 + e2._1, e1._2 + e2._2) } shouldBe (15, "51234")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").reduce { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe (120, "51234")
NonEmptyMap(5 -> "5").reduce { (e1, e2) => (e1._1 + e2._1, e1._2 + e2._2) } shouldBe (5, "5")
NonEmptyMap(5 -> "5").reduce { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe (5, "5")
}
it should "have a reduceLeft method" in {
NonEmptyMap(1 -> "1").reduceLeft { (e1, e2) => (e1._1 + e2._1, e1._2 + e2._2) } shouldBe 1 -> "1"
NonEmptyMap(1 -> "1").reduceLeft { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe 1 -> "1"
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").reduceLeft { (e1, e2) => (e1._1 + e2._1, e1._2 + e2._2) } shouldBe 6 -> "231"
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").reduceLeft { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe 6 -> "231"
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").reduceLeft { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe 120 -> "51234"
}
it should "have a reduceLeftOption method" in {
NonEmptyMap(1 -> "1").reduceLeftOption { (e1, e2) => (e1._1 + e2._1, e1._2 + e2._2) } shouldBe Some(1 -> "1")
NonEmptyMap(1 -> "1").reduceLeftOption { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe Some(1 -> "1")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").reduceLeftOption { (e1, e2) => (e1._1 + e2._1, e1._2 + e2._2) } shouldBe Some(6 -> "231")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").reduceLeftOption { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe Some(6 -> "231")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").reduceLeftOption { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe Some(120 -> "51234")
}
it should "have a reduceOption method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").reduceOption { (e1, e2) => (e1._1 + e2._1, e1._2 + e2._2) } shouldBe Some(15 -> "51234")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").reduceOption { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe Some(120 -> "51234")
NonEmptyMap(5 -> "5").reduceOption { (e1, e2) => (e1._1 + e2._1, e1._2 + e2._2) } shouldBe Some(5 -> "5")
NonEmptyMap(5 -> "5").reduceOption { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe Some(5 -> "5")
}
it should "have a reduceRight method" in {
NonEmptyMap(1 -> "1").reduceRight { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe 1 -> "1"
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").reduceRight { (e1, e2) => (e1._1 + e2._1, e1._2 + e2._2) } shouldBe 6 -> "231"
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").reduceRight { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe 6 -> "231"
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").reduceRight { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe 120 -> "51234"
}
it should "have a reduceRightOption method" in {
NonEmptyMap(1 -> "1").reduceRightOption { (e1, e2) => (e1._1 + e2._1, e1._2 + e2._2) } shouldBe Some(1 -> "1")
NonEmptyMap(1 -> "1").reduceRightOption { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe Some(1 -> "1")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").reduceRightOption { (e1, e2) => (e1._1 + e2._1, e1._2 + e2._2) } shouldBe Some(6 -> "231")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").reduceRightOption { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe Some(6 -> "231")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").reduceRightOption { (e1, e2) => (e1._1 * e2._1, e1._2 + e2._2) } shouldBe Some(120 -> "51234")
}
it should "have a runWith method, inherited from PartialFunction" in {
// TODO: What is this? Seems to be testing Vector or Map instead of Every or NonEmptyMap.
var x = 0
val f = NonEmptyMap("1" -> 1, "2" -> 2, "3" -> 3).runWith(x += _)
f("0") shouldBe false
x shouldBe 0
f("1") shouldBe true
x shouldBe 1
f("2") shouldBe true
x shouldBe 3
f("3") shouldBe true
x shouldBe 6
var y = 0
val g = NonEmptyMap("3" -> 3).runWith(y += _)
g("3") shouldBe true
y shouldBe 3
g("3") shouldBe true
y shouldBe 6
}
it should "have a sameElements method that takes a GenIterable" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(List(5 -> "5", 1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4")) shouldBe true
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(List(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5")) shouldBe false
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(List(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4")) shouldBe false
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(List(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5", 6 -> "6")) shouldBe false
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(List(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 4 -> "4")) shouldBe false
NonEmptyMap(3 -> "3").sameElements(List(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5")) shouldBe false
NonEmptyMap(3 -> "3").sameElements(List(1 -> "1")) shouldBe false
NonEmptyMap(3 -> "3").sameElements(List(3 -> "3")) shouldBe true
}
it should "have a sameElements method that takes an Every" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(Every(5 -> "5", 1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4")) shouldBe true
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(Every(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5")) shouldBe false
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(Every(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4")) shouldBe false
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(Every(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5", 6 -> "6")) shouldBe false
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(Every(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 4 -> "4")) shouldBe false
NonEmptyMap(3 -> "3").sameElements(Every(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5")) shouldBe false
NonEmptyMap(3 -> "3").sameElements(Every(1 -> "1")) shouldBe false
NonEmptyMap(3 -> "3").sameElements(Every(3 -> "3")) shouldBe true
}
it should "have a sameElements method that takes a NonEmptyMap" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(NonEmptyMap(5 -> "5", 1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4")) shouldBe true
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5")) shouldBe true
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4")) shouldBe false
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5", 6 -> "6")) shouldBe false
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sameElements(NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 4 -> "4")) shouldBe false
NonEmptyMap(3 -> "3").sameElements(NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5")) shouldBe false
NonEmptyMap(3 -> "3").sameElements(NonEmptyMap(1 -> "1")) shouldBe false
NonEmptyMap(3 -> "3").sameElements(NonEmptyMap(3 -> "3")) shouldBe true
}
it should "have a scan method" in {
NonEmptyMap(1 -> "1").scan(0 -> "0")((e1, e2) => (e1._1 + e2._1, e1._2 + e2._2)) shouldBe NonEmptyMap(0 -> "0", 1 -> "01")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").scan(0 -> "0")((e1, e2) => (e1._1 + e2._1, e1._2 + e2._2)) shouldBe NonEmptyMap(0 -> "0", 2 -> "02", 5 -> "023", 6 -> "0231")
}
it should "have a scanLeft method" in {
NonEmptyMap(1 -> "1").scanLeft(0)(_ + _._1) shouldBe List(0, 1)
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").scanLeft(0)(_ + _._1) shouldBe List(0, 2, 5, 6)
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").scanLeft("z")(_ + _._2) shouldBe List("z", "z2", "z23", "z231")
NonEmptyMap(0 -> "0").scanLeft("z")(_ + _._2) shouldBe List("z", "z0")
}
it should "have a scanRight method" in {
NonEmptyMap(1 -> "1").scanRight(0)(_._1 + _) shouldBe List(1, 0)
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").scanRight(0)(_._1 + _) shouldBe List(6, 4, 1, 0)
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").scanRight("z")(_._2 + _) shouldBe List("231z", "31z", "1z", "z")
NonEmptyMap(0 -> "0").scanRight("z")(_._2 + _) shouldBe List("0z", "z")
}
// it should "have a seq method" is pending
it should "have a size method" in {
NonEmptyMap(5 -> "5").size shouldBe 1
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").size shouldBe 3
}
/*
it should not have a slice method
scala> Vector(3).slice(0, 0)
res83: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3, 4, 5).slice(2, 1)
res84: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have 2 sliding methods" in {
NonEmptyMap(1 -> "1").sliding(1).toList shouldBe List(NonEmptyMap(1 -> "1"))
NonEmptyMap(1 -> "1").sliding(2).toList shouldBe List(NonEmptyMap(1 -> "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").sliding(2).toList shouldBe List(NonEmptyMap(2 -> "2", 3 -> "3"), NonEmptyMap(3 -> "3", 1 -> "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").sliding(1).toList shouldBe List(NonEmptyMap(2 -> "2"), NonEmptyMap(3 -> "3"), NonEmptyMap(1 -> "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").sliding(3).toList shouldBe List(NonEmptyMap(2 -> "2", 3 -> "3", 1 -> "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(3).toList shouldBe List(NonEmptyMap(5 -> "5", 1 -> "1", 2 -> "2"), NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3"), NonEmptyMap(2 -> "2", 3 -> "3", 4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(2).toList shouldBe List(NonEmptyMap(5 -> "5", 1 -> "1"), NonEmptyMap(1 -> "1", 2 -> "2"), NonEmptyMap(2 -> "2", 3 -> "3"), NonEmptyMap(3 -> "3", 4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(1).toList shouldBe List(NonEmptyMap(5 -> "5"), NonEmptyMap(1 -> "1"), NonEmptyMap(2 -> "2"), NonEmptyMap(3 -> "3"), NonEmptyMap(4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(4).toList shouldBe List(NonEmptyMap(5 -> "5", 1 -> "1", 2 -> "2", 3 -> "3"), NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(5).toList shouldBe List(NonEmptyMap(5 -> "5", 1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4"))
NonEmptyMap(1 -> "1").sliding(1, 1).toList shouldBe List(NonEmptyMap(1 -> "1"))
NonEmptyMap(1 -> "1").sliding(1, 2).toList shouldBe List(NonEmptyMap(1 -> "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").sliding(1, 1).toList shouldBe List(NonEmptyMap(2 -> "2"), NonEmptyMap(3 -> "3"), NonEmptyMap(1 -> "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").sliding(2, 1).toList shouldBe List(NonEmptyMap(2 -> "2", 3 -> "3"), NonEmptyMap(3 -> "3", 1 -> "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").sliding(2, 2).toList shouldBe List(NonEmptyMap(2 -> "2", 3 -> "3"), NonEmptyMap(1 -> "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").sliding(3, 2).toList shouldBe List(NonEmptyMap(2 -> "2", 3 -> "3", 1 -> "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").sliding(3, 1).toList shouldBe List(NonEmptyMap(2 -> "2", 3 -> "3", 1 -> "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(3, 1).toList shouldBe List(NonEmptyMap(5 -> "5", 1 -> "1", 2 -> "2"), NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3"), NonEmptyMap(2 -> "2", 3 -> "3", 4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(2, 2).toList shouldBe List(NonEmptyMap(5 -> "5", 1 -> "1"), NonEmptyMap(2 -> "2", 3 -> "3"), NonEmptyMap(4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(2, 3).toList shouldBe List(NonEmptyMap(5 -> "5", 1 -> "1"), NonEmptyMap(3 -> "3", 4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(2, 4).toList shouldBe List(NonEmptyMap(5 -> "5", 1 -> "1"), NonEmptyMap(4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(3, 1).toList shouldBe List(NonEmptyMap(5 -> "5", 1 -> "1", 2 -> "2"), NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3"), NonEmptyMap(2 -> "2", 3 -> "3", 4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(3, 2).toList shouldBe List(NonEmptyMap(5 -> "5", 1 -> "1", 2 -> "2"), NonEmptyMap(2 -> "2", 3 -> "3", 4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(3, 3).toList shouldBe List(NonEmptyMap(5 -> "5", 1 -> "1", 2 -> "2"), NonEmptyMap(3 -> "3", 4 -> "4"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").sliding(3, 4).toList shouldBe List(NonEmptyMap(5 -> "5", 1 -> "1", 2 -> "2"), NonEmptyMap(4 -> "4"))
}
/*
it should not have a span method
scala> Vector(1, 2, 3, 4, 5).span(_ > 10)
res105: (scala.collection.immutable.Vector[Int], scala.collection.immutable.Vector[Int]) = (Vector(),Vector(1, 2, 3, 4, 5))
it should not have a splitAt method
scala> Vector(1, 2, 3, 4, 5).splitAt(0)
res106: (scala.collection.immutable.Vector[Int], scala.collection.immutable.Vector[Int]) = (Vector(),Vector(1, 2, 3, 4, 5))
*/
it should "have a stringPrefix method" in {
NonEmptyMap(1 -> "1").stringPrefix shouldBe "NonEmptyMap"
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").stringPrefix shouldBe "NonEmptyMap"
}
it should "have a sum method" in {
implicit object TestNumeric extends Numeric[(Int, Int)] {
def plus(x: (Int, Int), y: (Int, Int)): (Int, Int) = (x._1 + y._1, x._2 + y._2)
def minus(x: (Int, Int), y: (Int, Int)): (Int, Int) = (x._1 - y._1, x._2 - y._2)
def times(x: (Int, Int), y: (Int, Int)): (Int, Int) = (x._1 * y._1, x._2 * y._2)
def negate(x: (Int, Int)): (Int, Int) = (-x._1, -x._2)
def fromInt(x: Int): (Int, Int) = (x, x)
def toInt(x: (Int, Int)): Int = x._1
def toLong(x: (Int, Int)): Long = x._1.toLong
def toFloat(x: (Int, Int)): Float = x._1.toFloat
def toDouble(x: (Int, Int)): Double = x._1.toDouble
def compare(x: (Int, Int), y: (Int, Int)): Int = x._1 compare y._1
def parseString(str: String): Option[(Int, Int)] = ??? // For scala 2.13, for testing purpose we do not need to implement it here.
}
NonEmptyMap(1 -> 1).sum shouldBe 1 -> 1
NonEmptyMap(5 -> 5).sum shouldBe 5 -> 5
NonEmptyMap(1 -> 1, 2 -> 2, 3 -> 3).sum shouldBe 6 -> 6
NonEmptyMap(1 -> 1, 2 -> 2, 3 -> 3, 4 -> 4, 5 -> 5).sum shouldBe 15 -> 15
}
/*
it should not have a tail method
scala> Vector(1).tail
res7: scala.collection.immutable.Vector[Int] = Vector()
it should not have a tails method
scala> Vector(1).tails.toMap
res8: Map[scala.collection.immutable.Vector[Int]] = Map(Vector(1), Vector())
it should not have a take method
scala> Vector(1).take(0)
res10: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3).take(0)
res11: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3).take(-1)
res12: scala.collection.immutable.Vector[Int] = Vector()
it should not have a takeRight method
scala> Vector(1).takeRight(1)
res13: scala.collection.immutable.Vector[Int] = Vector(1)
scala> Vector(1).takeRight(0)
res14: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3).takeRight(0)
res15: scala.collection.immutable.Vector[Int] = Vector()
it should not have a takeWhile method
scala> Vector(1, 2, 3).takeWhile(_ > 10)
res17: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1).takeWhile(_ > 10)
res18: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have a to method" in {
import org.scalactic.ColCompatHelper.Factory._
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").to(scala.collection.mutable.ListBuffer) shouldBe ListBuffer(2 -> "2", 3 -> "3", 1 -> "1")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").to(Vector) shouldBe Vector(2 -> "2", 3 -> "3", 1 -> "1")
}
it should "have a toArray method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").toArray should === (Array(2 -> "2", 3 -> "3", 1 -> "1"))
NonEmptyMap("a" -> "A", "b" -> "B").toArray should === (Array("b" -> "B", "a" -> "A"))
NonEmptyMap(1 -> "1").toArray should === (Array(1 -> "1"))
}
it should "have a toBuffer method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").toBuffer should === (Buffer(2 -> "2", 3 -> "3", 1 -> "1"))
NonEmptyMap("a" -> "A", "b" -> "B").toBuffer should === (Buffer("b" -> "B", "a" -> "A"))
NonEmptyMap(1 -> "1").toBuffer should === (Buffer(1 -> "1"))
}
it should "have a toIndexedSeq method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").toIndexedSeq should === (IndexedSeq(2 -> "2", 3 -> "3", 1 -> "1"))
NonEmptyMap("a" -> "A", "b" -> "B").toIndexedSeq should === (IndexedSeq("b" -> "B", "a" -> "A"))
NonEmptyMap(1 -> "1").toIndexedSeq should === (IndexedSeq(1 -> "1"))
}
it should "have a toIterable method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").toIterable should === (Map(2 -> "2", 3 -> "3", 1 -> "1"))
NonEmptyMap("a" -> "A", "b" -> "B").toIterable should === (Map("b" -> "B", "a" -> "A"))
NonEmptyMap(1 -> "1").toIterable should === (Map(1 -> "1"))
}
it should "have a toIterator method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").toIterator.toMap should === (Iterator(2 -> "2", 3 -> "3", 1 -> "1").toMap)
NonEmptyMap("a" -> "A", "b" -> "B").toIterator.toMap should === (Iterator("b" -> "B", "a" -> "A").toMap)
NonEmptyMap(1 -> "1").toIterator.toMap should === (Iterator(1 -> "1").toMap)
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").toIterator shouldBe an [Iterator[_]]
NonEmptyMap("a" -> "A", "b" -> "B").toIterator shouldBe an [Iterator[_]]
NonEmptyMap(1 -> "1").toIterator shouldBe an [Iterator[_]]
}
it should "have a toList method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").toList should === (List(2 -> "2", 3 -> "3", 1 -> "1"))
NonEmptyMap("a" -> "A", "b" -> "B").toList should === (List("b" -> "B", "a" -> "A"))
NonEmptyMap(1 -> "1").toList should === (List(1 -> "1"))
}
it should "have a toMap method" in {
NonEmptyMap("1" -> 1, "2" -> 2, "3" -> 3).toMap should === (Map("1" -> 1, "2" -> 2, "3" -> 3))
NonEmptyMap('A' -> "a", 'B' -> "b").toMap should === (Map('A' -> "a", 'B' -> "b"))
NonEmptyMap("1" -> 1).toMap should === (Map("1" -> 1))
}
it should "have a toSeq method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").toSeq should === (Seq(2 -> "2", 3 -> "3", 1 -> "1"))
NonEmptyMap("a" -> "A", "b" -> "B").toSeq should === (Seq("b" -> "B", "a" -> "A"))
NonEmptyMap(1 -> "1").toSeq should === (Seq(1 -> "1"))
}
it should "have a toSet method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").toSet should === (Set(2 -> "2", 3 -> "3", 1 -> "1"))
NonEmptyMap("a" -> "A", "b" -> "B").toSet should === (Set("b" -> "B", "a" -> "A"))
NonEmptyMap(1 -> "1").toSet should === (Set(1 -> "1"))
}
it should "have a toStream method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").toStream should === (Stream(2 -> "2", 3 -> "3", 1 -> "1"))
NonEmptyMap("a" -> "A", "b" -> "B").toStream should === (Stream("b" -> "B", "a" -> "A"))
NonEmptyMap(1 -> "1").toStream should === (Stream(1 -> "1"))
}
it should "have a toString method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").toString should === ("NonEmptyMap(2 -> 2, 3 -> 3, 1 -> 1)")
NonEmptyMap(1 -> "1").toString should === ("NonEmptyMap(1 -> 1)")
}
it should "have a toVector method" in {
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").toVector should === (Vector(2 -> "2", 3 -> "3", 1 -> "1"))
NonEmptyMap("a" -> "A", "b" -> "B").toVector should === (Vector("b" -> "B", "a" -> "A"))
NonEmptyMap(1 -> "1").toVector should === (Vector(1 -> "1"))
}
it should "have an unzip method" in {
NonEmptyMap(1 -> "1", 2 -> "2").unzip shouldBe (List(2, 1), List("2", "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4").unzip shouldBe (List(2, 3, 4, 1), List("2", "3", "4", "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5", 6 -> "6").unzip shouldBe (List(5, 1, 6, 2, 3, 4), List("5", "1", "6", "2", "3", "4"))
}
it should "have an unzip3 method" in {
import scala.language.implicitConversions
implicit def convertEntryToTuple3(e: (Int, String)): (Int, Int, String) = (e._1, e._1 * 2, e._2)
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").unzip3 shouldBe (List(2, 3, 1), List(4, 6, 2), List("2", "3", "1"))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5", 6 -> "6").unzip3 shouldBe (List(5, 1, 6, 2, 3, 4), List(10, 2, 12, 4, 6, 8), List("5", "1", "6", "2", "3", "4"))
}
it should "have an updated method" in {
NonEmptyMap(1 -> "1").updated(1, "one") shouldBe NonEmptyMap(1 -> "one")
NonEmptyMap(1 -> "1").updated(2, "two") shouldBe NonEmptyMap(1 -> "1", 2 -> "two")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").updated(1, "one") shouldBe NonEmptyMap(1 -> "one", 2 -> "2", 3 -> "3")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").updated(2, "two") shouldBe NonEmptyMap(1 -> "1", 2 -> "two", 3 -> "3")
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").updated(3, "three") shouldBe NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "three")
}
/*
it should not have 2 view methods, because I don't want to support views in Every
*/
/*
it should not have a zip method
scala> Map(1) zip Nil
res0: Map[(Int, Nothing)] = Map()
*/
it should "have a zipAll method that takes an Iterable" in {
// Empty on right
NonEmptyMap(1 -> "1").zipAll(Nil, -1 -> "-1", -2) shouldBe NonEmptyMap((1, "1") -> -2)
NonEmptyMap(1 -> "1", 2 -> "2").zipAll(Nil, -1 -> "-1", -2) shouldBe NonEmptyMap((1, "1") -> -2, (2, "2") -> -2)
// Same length
NonEmptyMap(1 -> "1").zipAll(List(1), -1 -> "-1", -2) shouldBe NonEmptyMap((1, "1") -> 1)
NonEmptyMap(1 -> "1", 2 -> "2").zipAll(List(1, 2), -1 -> "-1", -2) shouldBe NonEmptyMap((2, "2") -> 1, (1, "1") -> 2)
// Non-empty, longer on right
NonEmptyMap(1 -> "1").zipAll(List(10, 20), -1 -> "-1", -2) shouldBe NonEmptyMap((1 -> "1", 10), (-1 -> "-1", 20))
NonEmptyMap(1 -> "1", 2 -> "2").zipAll(List(10, 20, 30), -1 -> "-1", -2) shouldBe NonEmptyMap((2 -> "2", 10), (1 -> "1", 20), (-1 -> "-1", 30))
// Non-empty, shorter on right
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").zipAll(List(10, 20), -1 -> "-1", -2) shouldBe NonEmptyMap((2 -> "2", 10), (3 -> "3", 20), (1 -> "1", -2))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4").zipAll(List(10, 20, 30), -1 -> "-1", -2) shouldBe NonEmptyMap((2 -> "2", 10), (3 -> "3", 20), (4 -> "4", 30), (1 -> "1", -2))
}
it should "have a zipAll method that takes an Every" in {
// Same length
NonEmptyMap(1 -> "1").zipAll(Every(1), -1 -> "-1", -2) shouldBe NonEmptyMap((1, "1") -> 1)
NonEmptyMap(1 -> "1", 2 -> "2").zipAll(Every(1, 2), -1 -> "-1", -2) shouldBe NonEmptyMap((2, "2") -> 1, (1, "1") -> 2)
// Non-empty, longer on right
NonEmptyMap(1 -> "1").zipAll(Every(10, 20), -1 -> "-1", -2) shouldBe NonEmptyMap((1 -> "1", 10), (-1 -> "-1", 20))
NonEmptyMap(1 -> "1", 2 -> "2").zipAll(Every(10, 20, 30), -1 -> "-1", -2) shouldBe NonEmptyMap((2 -> "2", 10), (1 -> "1", 20), (-1 -> "-1", 30))
// Non-empty, shorter on right
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").zipAll(Every(10, 20), -1 -> "-1", -2) shouldBe NonEmptyMap((2 -> "2", 10), (3 -> "3", 20), (1 -> "1", -2))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4").zipAll(Every(10, 20, 30), -1 -> "-1", -2) shouldBe NonEmptyMap((2 -> "2", 10), (3 -> "3", 20), (4 -> "4", 30), (1 -> "1", -2))
}
it should "have a zipAll method that takes a NonEmptyMap" in {
// Same length
NonEmptyMap(1 -> "1").zipAll(NonEmptyMap(1 -> "one"), -1 -> "-1", -2 -> "-two") shouldBe NonEmptyMap((1, "1") -> (1, "one"))
NonEmptyMap(1 -> "1", 2 -> "2").zipAll(NonEmptyMap(1 -> "one", 2 -> "two"), -1 -> "-1", -2 -> "-two") shouldBe NonEmptyMap((2, "2") -> (2, "two"), (1, "1") -> (1, "one"))
// Non-empty, longer on right
NonEmptyMap(1 -> "1").zipAll(NonEmptyMap(10 -> "ten", 20 -> "twenty"), -1 -> "-1", -2 -> "-two") shouldBe NonEmptyMap((1 -> "1", (20, "twenty")), (-1 -> "-1", (10, "ten")))
NonEmptyMap(1 -> "1", 2 -> "2").zipAll(NonEmptyMap(10 -> "ten", 20 -> "twenty", 30 -> "thirty"), -1 -> "-1", -2 -> "-two") shouldBe NonEmptyMap((2 -> "2", (20, "twenty")), (1 -> "1", (30, "thirty")), (-1 -> "-1", (10, "ten")))
// Non-empty, shorter on right
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3").zipAll(NonEmptyMap(10 -> "ten", 20 -> "twenty"), -1 -> "-1", -2 -> "-two") shouldBe NonEmptyMap((2 -> "2", (20, "twenty")), (3 -> "3", (10, "ten")), (1 -> "1", (-2, "-two")))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4").zipAll(NonEmptyMap(10 -> "ten", 20 -> "twenty", 30 -> "thirty"), -1 -> "-1", -2 -> "-two") shouldBe NonEmptyMap((2 -> "2", (20, "twenty")), (3 -> "3", (30, "thirty")), (4 -> "4", (10, "ten")), (1 -> "1", (-2, "-two")))
}
it should "have a zipWithIndex method" in {
NonEmptyMap(99 -> "99").zipWithIndex shouldBe NonEmptyMap((99 -> "99", 0))
NonEmptyMap(1 -> "1", 2 -> "2", 3 -> "3", 4 -> "4", 5 -> "5").zipWithIndex shouldBe NonEmptyMap((5 -> "5", 0), (1 -> "1", 1), (2 -> "2", 2), (3 -> "3", 3), (4 -> "4", 4))
}
}
| dotty-staging/scalatest | scalactic-test/src/test/scala/org/scalactic/anyvals/NonEmptyMapSpec.scala | Scala | apache-2.0 | 54,091 |
package com.seanshubin.server
class ServerErrorHandlerImpl(jsonSerialization: JsonSerialization) extends ServerErrorHandler {
def handle(request: SimplifiedRequest, exception: ExceptionInfo): SimplifiedResponse = {
val responseObject = ServerErrorDuringRequest(request, exception)
val responseBodyJson = jsonSerialization.toJson(responseObject)
SimplifiedResponse(
HttpResponseCode.InternalServerError.code,
Some(Content(InternetMediaType.Json.name, responseBodyJson))
)
}
}
| SeanShubin/schulze | server/src/main/scala/com/seanshubin/server/ServerErrorHandlerImpl.scala | Scala | unlicense | 508 |
/*
* Copyright 2016 Scalalaz Podcast Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.scalalaz.gen.parsing
import java.time.LocalDate
import cats.data.Validated.Valid
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.Inside
class EpisodeParserSpec extends AnyFlatSpec with Matchers with Inside {
val episodeStr = """
|title=Episode#1
|page=https://scalalaz.ru/series-01.html
|date=2016-11-28
|audio.url=https://scalalaz.ru/mp3/scalalaz-podcast-1.mp3
|audio.length=6
|----
|### Yoyoyo!
|it is a new episode!""".stripMargin
it should "parse from string" in {
val result = EpisodeParser.fromString(episodeStr)
inside(result) { case Valid(episode) =>
episode.content shouldBe "### Yoyoyo!\\nit is a new episode!"
val rss = episode.settings
rss.title shouldBe "Episode#1"
rss.page shouldBe "https://scalalaz.ru/series-01.html"
rss.date shouldBe LocalDate.of(2016, 11, 28)
rss.audio.url shouldBe "https://scalalaz.ru/mp3/scalalaz-podcast-1.mp3"
rss.audio.length shouldBe 6
}
}
}
| scalalaz-podcast/scalalaz-gen | src/test/scala/ru/scalalaz/gen/parsing/EpisodeParserSpec.scala | Scala | apache-2.0 | 1,799 |
object Test extends App {
import collection.mutable.ListBuffer
def newLB = ListBuffer(Symbol("a"), Symbol("b"), Symbol("c"), Symbol("d"), Symbol("e"))
val lb0 = newLB
try {
lb0.insert(9, Symbol("x"))
} catch {
case ex: IndexOutOfBoundsException => println(ex)
}
val lb1 = newLB
try {
lb1.insert(9, Symbol("x"))
} catch {
case ex: IndexOutOfBoundsException =>
}
val replStr = scala.runtime.ScalaRunTime.replStringOf(lb1, 100)
if (replStr == "ListBuffer(Symbol(a), Symbol(b), Symbol(c), Symbol(d), Symbol(e))\\n")
println("replStringOf OK")
else
println("replStringOf FAILED: " + replStr)
val len = lb1.length
if (len == 5)
println("length OK")
else
println("length FAILED: " + len)
}
| dotty-staging/dotty | tests/run/t6633.scala | Scala | apache-2.0 | 753 |
package router
package dto
import com.wordnik.swagger.annotations.{ ApiModelProperty, ApiModel }
import spray.json.DefaultJsonProtocol
import scala.annotation.meta.field
/**
* Created by gneotux on 18/07/15.
*/
@ApiModel(description = "A Location creation entity")
case class LocationDto(
@(ApiModelProperty@field)(required = true, value = "location name")
name: String,
@(ApiModelProperty@field)(value = "location's code")
code: Option[String] = None,
@(ApiModelProperty @field)(required = true, value = "location's latitude")
latitude: Double,
@(ApiModelProperty @field)(required = true, value = "location's longitude")
longitude: Double,
@(ApiModelProperty @field)(required = true, value = "location's people capacity")
capacity: Int,
@(ApiModelProperty@field)(value = "location's description")
description: Option[String] = None,
@(ApiModelProperty@field)(value = "location's photo url")
photoUrl: Option[String] = None
)
object LocationDto extends DefaultJsonProtocol {
implicit val locationDtoFormat = jsonFormat7(LocationDto.apply)
}
| Gneotux/pfc | src/main/scala/router/dto/LocationDto.scala | Scala | apache-2.0 | 1,083 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.ui
import javax.servlet.http.HttpServletRequest
import scala.xml._
import org.apache.commons.lang3.StringEscapeUtils
import org.apache.spark.status.api.v1.{JobData, StageData}
import org.apache.spark.streaming.Time
import org.apache.spark.streaming.ui.StreamingJobProgressListener.SparkJobId
import org.apache.spark.ui.{UIUtils => SparkUIUtils, WebUIPage}
private[ui] case class SparkJobIdWithUIData(sparkJobId: SparkJobId, jobData: Option[JobData])
private[ui] class BatchPage(parent: StreamingTab) extends WebUIPage("batch") {
private val streamingListener = parent.listener
private val store = parent.parent.store
private def columns: Seq[Node] = {
<th>Output Op Id</th>
<th>Description</th>
<th>Output Op Duration</th>
<th>Status</th>
<th>Job Id</th>
<th>Job Duration</th>
<th class="sorttable_nosort">Stages: Succeeded/Total</th>
<th class="sorttable_nosort">Tasks (for all stages): Succeeded/Total</th>
<th>Error</th>
}
private def generateJobRow(
outputOpData: OutputOperationUIData,
outputOpDescription: Seq[Node],
formattedOutputOpDuration: String,
numSparkJobRowsInOutputOp: Int,
isFirstRow: Boolean,
jobIdWithData: SparkJobIdWithUIData): Seq[Node] = {
if (jobIdWithData.jobData.isDefined) {
generateNormalJobRow(outputOpData, outputOpDescription, formattedOutputOpDuration,
numSparkJobRowsInOutputOp, isFirstRow, jobIdWithData.jobData.get)
} else {
generateDroppedJobRow(outputOpData, outputOpDescription, formattedOutputOpDuration,
numSparkJobRowsInOutputOp, isFirstRow, jobIdWithData.sparkJobId)
}
}
private def generateOutputOpRowWithoutSparkJobs(
outputOpData: OutputOperationUIData,
outputOpDescription: Seq[Node],
formattedOutputOpDuration: String): Seq[Node] = {
<tr>
<td class="output-op-id-cell" >{outputOpData.id.toString}</td>
<td>{outputOpDescription}</td>
<td>{formattedOutputOpDuration}</td>
{outputOpStatusCell(outputOpData, rowspan = 1)}
<!-- Job Id -->
<td>-</td>
<!-- Duration -->
<td>-</td>
<!-- Stages: Succeeded/Total -->
<td>-</td>
<!-- Tasks (for all stages): Succeeded/Total -->
<td>-</td>
<!-- Error -->
<td>-</td>
</tr>
}
/**
* Generate a row for a Spark Job. Because duplicated output op infos needs to be collapsed into
* one cell, we use "rowspan" for the first row of an output op.
*/
private def generateNormalJobRow(
outputOpData: OutputOperationUIData,
outputOpDescription: Seq[Node],
formattedOutputOpDuration: String,
numSparkJobRowsInOutputOp: Int,
isFirstRow: Boolean,
sparkJob: JobData): Seq[Node] = {
val duration: Option[Long] = {
sparkJob.submissionTime.map { start =>
val end = sparkJob.completionTime.map(_.getTime()).getOrElse(System.currentTimeMillis())
end - start.getTime()
}
}
val lastFailureReason =
sparkJob.stageIds.sorted.reverse.flatMap(getStageData).
dropWhile(_.failureReason == None).take(1). // get the first info that contains failure
flatMap(info => info.failureReason).headOption.getOrElse("")
val formattedDuration = duration.map(d => SparkUIUtils.formatDuration(d)).getOrElse("-")
val detailUrl = s"${SparkUIUtils.prependBaseUri(parent.basePath)}/jobs/job?id=${sparkJob.jobId}"
// In the first row, output op id and its information needs to be shown. In other rows, these
// cells will be taken up due to "rowspan".
// scalastyle:off
val prefixCells =
if (isFirstRow) {
<td class="output-op-id-cell" rowspan={numSparkJobRowsInOutputOp.toString}>{outputOpData.id.toString}</td>
<td rowspan={numSparkJobRowsInOutputOp.toString}>
{outputOpDescription}
</td>
<td rowspan={numSparkJobRowsInOutputOp.toString}>{formattedOutputOpDuration}</td> ++
{outputOpStatusCell(outputOpData, numSparkJobRowsInOutputOp)}
} else {
Nil
}
// scalastyle:on
<tr>
{prefixCells}
<td sorttable_customkey={sparkJob.jobId.toString}>
<a href={detailUrl}>
{sparkJob.jobId}{sparkJob.jobGroup.map(id => s"($id)").getOrElse("")}
</a>
</td>
<td sorttable_customkey={duration.getOrElse(Long.MaxValue).toString}>
{formattedDuration}
</td>
<td class="stage-progress-cell">
{sparkJob.numCompletedStages}/{sparkJob.stageIds.size - sparkJob.numSkippedStages}
{if (sparkJob.numFailedStages > 0) s"(${sparkJob.numFailedStages} failed)"}
{if (sparkJob.numSkippedStages > 0) s"(${sparkJob.numSkippedStages} skipped)"}
</td>
<td class="progress-cell">
{
SparkUIUtils.makeProgressBar(
started = sparkJob.numActiveTasks,
completed = sparkJob.numCompletedTasks,
failed = sparkJob.numFailedTasks,
skipped = sparkJob.numSkippedTasks,
reasonToNumKilled = sparkJob.killedTasksSummary,
total = sparkJob.numTasks - sparkJob.numSkippedTasks)
}
</td>
{UIUtils.failureReasonCell(lastFailureReason)}
</tr>
}
/**
* If a job is dropped by sparkListener due to exceeding the limitation, we only show the job id
* with "-" cells.
*/
private def generateDroppedJobRow(
outputOpData: OutputOperationUIData,
outputOpDescription: Seq[Node],
formattedOutputOpDuration: String,
numSparkJobRowsInOutputOp: Int,
isFirstRow: Boolean,
jobId: Int): Seq[Node] = {
// In the first row, output op id and its information needs to be shown. In other rows, these
// cells will be taken up due to "rowspan".
// scalastyle:off
val prefixCells =
if (isFirstRow) {
<td class="output-op-id-cell" rowspan={numSparkJobRowsInOutputOp.toString}>{outputOpData.id.toString}</td>
<td rowspan={numSparkJobRowsInOutputOp.toString}>{outputOpDescription}</td>
<td rowspan={numSparkJobRowsInOutputOp.toString}>{formattedOutputOpDuration}</td> ++
{outputOpStatusCell(outputOpData, numSparkJobRowsInOutputOp)}
} else {
Nil
}
// scalastyle:on
<tr>
{prefixCells}
<td sorttable_customkey={jobId.toString}>
{if (jobId >= 0) jobId.toString else "-"}
</td>
<!-- Duration -->
<td>-</td>
<!-- Stages: Succeeded/Total -->
<td>-</td>
<!-- Tasks (for all stages): Succeeded/Total -->
<td>-</td>
<!-- Error -->
<td>-</td>
</tr>
}
private def generateOutputOpIdRow(
outputOpData: OutputOperationUIData,
sparkJobs: Seq[SparkJobIdWithUIData]): Seq[Node] = {
val formattedOutputOpDuration =
if (outputOpData.duration.isEmpty) {
"-"
} else {
SparkUIUtils.formatDuration(outputOpData.duration.get)
}
val description = generateOutputOpDescription(outputOpData)
if (sparkJobs.isEmpty) {
generateOutputOpRowWithoutSparkJobs(outputOpData, description, formattedOutputOpDuration)
} else {
val firstRow =
generateJobRow(
outputOpData,
description,
formattedOutputOpDuration,
sparkJobs.size,
true,
sparkJobs.head)
val tailRows =
sparkJobs.tail.map { sparkJob =>
generateJobRow(
outputOpData,
description,
formattedOutputOpDuration,
sparkJobs.size,
false,
sparkJob)
}
(firstRow ++ tailRows).flatten
}
}
private def generateOutputOpDescription(outputOp: OutputOperationUIData): Seq[Node] = {
<div>
{outputOp.name}
<span
onclick="this.parentNode.querySelector('.stage-details').classList.toggle('collapsed')"
class="expand-details">
+details
</span>
<div class="stage-details collapsed">
<pre>{outputOp.description}</pre>
</div>
</div>
}
private def getJobData(sparkJobId: SparkJobId): Option[JobData] = {
try {
Some(store.job(sparkJobId))
} catch {
case _: NoSuchElementException => None
}
}
private def getStageData(stageId: Int): Option[StageData] = {
try {
Some(store.lastStageAttempt(stageId))
} catch {
case _: NoSuchElementException => None
}
}
private def generateOutputOperationStatusForUI(failure: String): String = {
if (failure.startsWith("org.apache.spark.SparkException")) {
"Failed due to Spark job error\\n" + failure
} else {
var nextLineIndex = failure.indexOf("\\n")
if (nextLineIndex < 0) {
nextLineIndex = failure.length
}
val firstLine = failure.substring(0, nextLineIndex)
s"Failed due to error: $firstLine\\n$failure"
}
}
/**
* Generate the job table for the batch.
*/
private def generateJobTable(batchUIData: BatchUIData): Seq[Node] = {
val outputOpIdToSparkJobIds = batchUIData.outputOpIdSparkJobIdPairs.groupBy(_.outputOpId).
map { case (outputOpId, outputOpIdAndSparkJobIds) =>
// sort SparkJobIds for each OutputOpId
(outputOpId, outputOpIdAndSparkJobIds.map(_.sparkJobId).toSeq.sorted)
}
val outputOps: Seq[(OutputOperationUIData, Seq[SparkJobId])] =
batchUIData.outputOperations.map { case (outputOpId, outputOperation) =>
val sparkJobIds = outputOpIdToSparkJobIds.getOrElse(outputOpId, Seq.empty)
(outputOperation, sparkJobIds)
}.toSeq.sortBy(_._1.id)
val outputOpWithJobs = outputOps.map { case (outputOpData, sparkJobIds) =>
(outputOpData, sparkJobIds.map { jobId => SparkJobIdWithUIData(jobId, getJobData(jobId)) })
}
<table id="batch-job-table" class="table table-bordered table-striped table-condensed">
<thead>
{columns}
</thead>
<tbody>
{
outputOpWithJobs.map { case (outputOpData, sparkJobs) =>
generateOutputOpIdRow(outputOpData, sparkJobs)
}
}
</tbody>
</table>
}
def render(request: HttpServletRequest): Seq[Node] = streamingListener.synchronized {
// stripXSS is called first to remove suspicious characters used in XSS attacks
val batchTime =
Option(SparkUIUtils.stripXSS(request.getParameter("id"))).map(id => Time(id.toLong))
.getOrElse {
throw new IllegalArgumentException(s"Missing id parameter")
}
val formattedBatchTime =
UIUtils.formatBatchTime(batchTime.milliseconds, streamingListener.batchDuration)
val batchUIData = streamingListener.getBatchUIData(batchTime).getOrElse {
throw new IllegalArgumentException(s"Batch $formattedBatchTime does not exist")
}
val formattedSchedulingDelay =
batchUIData.schedulingDelay.map(SparkUIUtils.formatDuration).getOrElse("-")
val formattedProcessingTime =
batchUIData.processingDelay.map(SparkUIUtils.formatDuration).getOrElse("-")
val formattedTotalDelay = batchUIData.totalDelay.map(SparkUIUtils.formatDuration).getOrElse("-")
val inputMetadatas = batchUIData.streamIdToInputInfo.values.flatMap { inputInfo =>
inputInfo.metadataDescription.map(desc => inputInfo.inputStreamId -> desc)
}.toSeq
val summary: NodeSeq =
<div>
<ul class="unstyled">
<li>
<strong>Batch Duration: </strong>
{SparkUIUtils.formatDuration(streamingListener.batchDuration)}
</li>
<li>
<strong>Input data size: </strong>
{batchUIData.numRecords} records
</li>
<li>
<strong>Scheduling delay: </strong>
{formattedSchedulingDelay}
</li>
<li>
<strong>Processing time: </strong>
{formattedProcessingTime}
</li>
<li>
<strong>Total delay: </strong>
{formattedTotalDelay}
</li>
{
if (inputMetadatas.nonEmpty) {
<li>
<strong>Input Metadata:</strong>{generateInputMetadataTable(inputMetadatas)}
</li>
}
}
</ul>
</div>
val content = summary ++ generateJobTable(batchUIData)
SparkUIUtils.headerSparkPage(s"Details of batch at $formattedBatchTime", content, parent)
}
def generateInputMetadataTable(inputMetadatas: Seq[(Int, String)]): Seq[Node] = {
<table class={SparkUIUtils.TABLE_CLASS_STRIPED_SORTABLE}>
<thead>
<tr>
<th>Input</th>
<th>Metadata</th>
</tr>
</thead>
<tbody>
{inputMetadatas.flatMap(generateInputMetadataRow)}
</tbody>
</table>
}
def generateInputMetadataRow(inputMetadata: (Int, String)): Seq[Node] = {
val streamId = inputMetadata._1
<tr>
<td>{streamingListener.streamName(streamId).getOrElse(s"Stream-$streamId")}</td>
<td>{metadataDescriptionToHTML(inputMetadata._2)}</td>
</tr>
}
private def metadataDescriptionToHTML(metadataDescription: String): Seq[Node] = {
// tab to 4 spaces and "\\n" to "<br/>"
Unparsed(StringEscapeUtils.escapeHtml4(metadataDescription).
replaceAllLiterally("\\t", " ").replaceAllLiterally("\\n", "<br/>"))
}
private def outputOpStatusCell(outputOp: OutputOperationUIData, rowspan: Int): Seq[Node] = {
outputOp.failureReason match {
case Some(failureReason) =>
val failureReasonForUI = UIUtils.createOutputOperationFailureForUI(failureReason)
UIUtils.failureReasonCell(
failureReasonForUI, rowspan, includeFirstLineInExpandDetails = false)
case None =>
if (outputOp.endTime.isEmpty) {
<td rowspan={rowspan.toString}>-</td>
} else {
<td rowspan={rowspan.toString}>Succeeded</td>
}
}
}
}
| brad-kaiser/spark | streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala | Scala | apache-2.0 | 14,721 |
package com.twitter.finagle.mux.lease.exp
import com.twitter.util.{StorageUnit, Time}
import com.twitter.conversions.DurationOps._
import com.twitter.conversions.StorageUnitOps._
import org.scalactic.source.Position
import org.scalatest.Tag
import org.scalatest.funsuite.AnyFunSuite
class AlarmTest extends AnyFunSuite with LocalConductors {
val skipWholeTest: Boolean = sys.props.contains("SKIP_FLAKY")
override def test(
testName: String,
testTags: Tag*
)(
testFun: => Any
)(
implicit pos: Position
): Unit = {
if (skipWholeTest)
ignore(testName)(testFun)
else
super.test(testName, testTags: _*)(testFun)
}
test("DurationAlarm should work") {
val conductor = new Conductor
import conductor._
Time.withCurrentTimeFrozen { ctl =>
localThread(conductor) {
Alarm.arm({ () => new DurationAlarm(5.seconds) })
}
localThread(conductor) {
waitForBeat(1)
ctl.advance(5.seconds)
}
conduct()
}
}
test("MinAlarm should take the min time") {
val conductor = new Conductor
import conductor._
Time.withCurrentTimeFrozen { ctl =>
localThread(conductor) {
Alarm.arm({ () => new DurationAlarm(5.seconds) min new DurationAlarm(2.seconds) })
}
localThread(conductor) {
waitForBeat(1)
ctl.advance(2.seconds)
}
conduct()
}
}
test("Alarm should continue if not yet finished") {
val conductor = new Conductor
import conductor._
Time.withCurrentTimeFrozen { ctl =>
localThread(conductor) {
Alarm.arm({ () => new DurationAlarm(5.seconds) min new IntervalAlarm(1.second) })
}
localThread(conductor) {
waitForBeat(1)
ctl.advance(2.seconds)
waitForBeat(2)
ctl.advance(3.seconds)
}
conduct()
}
}
if (!sys.props.contains("SKIP_FLAKY"))
test("DurationAlarm should sleep until it's over") {
val conductor = new Conductor
import conductor._
@volatile var ctr = 0
Time.withCurrentTimeFrozen { ctl =>
localThread(conductor) {
Alarm.armAndExecute({ () => new DurationAlarm(5.seconds) }, { () => ctr += 1 })
}
localThread(conductor) {
waitForBeat(1)
assert(ctr == 1)
ctl.advance(2.seconds)
waitForBeat(2)
assert(ctr == 1)
ctl.advance(3.seconds)
}
}
localWhenFinished(conductor) {
assert(ctr == 2)
}
}
trait GenerationAlarmHelper {
val fakePool = new FakeMemoryPool(new FakeMemoryUsage(StorageUnit.zero, 10.megabytes))
val fakeBean = new FakeGarbageCollectorMXBean(0, 0)
val nfo = new JvmInfo(fakePool, fakeBean)
val ctr = FakeByteCounter(1, Time.now, nfo)
}
test("GenerationAlarm should sleep until the next alarm") {
val h = new GenerationAlarmHelper {}
import h._
val conductor = new Conductor
import conductor._
Time.withCurrentTimeFrozen { ctl =>
localThread(conductor) {
Alarm.arm({ () => new GenerationAlarm(ctr) min new IntervalAlarm(1.second) })
}
localThread(conductor) {
waitForBeat(1)
fakeBean.getCollectionCount = 1
ctl.advance(1.second)
}
conduct()
}
}
test("PredicateAlarm") {
val conductor = new Conductor
import conductor._
Time.withCurrentTimeFrozen { ctl =>
@volatile var bool = false
localThread(conductor) {
Alarm.arm({ () => new PredicateAlarm(() => bool) min new IntervalAlarm(1.second) })
}
localThread(conductor) {
waitForBeat(1)
bool = true
ctl.advance(1.second)
}
conduct()
}
}
case class FakeByteCounter(rte: Double, gc: Time, nfo: JvmInfo) extends ByteCounter {
def rate(): Double = rte
def lastGc: Time = gc
def info: JvmInfo = nfo
}
test("BytesAlarm should finish when we have enough bytes") {
val h = new GenerationAlarmHelper {}
import h._
val conductor = new Conductor
import conductor._
Time.withCurrentTimeFrozen { ctl =>
val ctr = new FakeByteCounter(1000, Time.now, nfo)
@volatile var bool = false
val usage = new FakeMemoryUsage(0.bytes, 10.megabytes)
fakePool.setSnapshot(usage)
localThread(conductor) {
Alarm.arm({ () => new BytesAlarm(ctr, () => 5.megabytes) })
}
localThread(conductor) {
waitForBeat(1)
fakePool.setSnapshot(usage.copy(used = 5.megabytes))
ctl.advance(100.milliseconds)
}
conduct()
}
}
test("BytesAlarm should use 80% of the target") {
val h = new GenerationAlarmHelper {}
import h._
val ctr = FakeByteCounter(50.kilobytes.inBytes, Time.now, nfo)
val alarm = new BytesAlarm(ctr, () => 5.megabytes)
// 5MB / (50 KB/ms) * 8 / 10 == 80.milliseconds
// 80.milliseconds < 100.milliseconds
assert(alarm.sleeptime == ((80 * 1.kilobyte.inBytes / 1000).milliseconds))
}
test("BytesAlarm should use the default if the gap is too big") {
val h = new GenerationAlarmHelper {}
import h._
val ctr = FakeByteCounter(1000, Time.now, nfo)
val alarm = new BytesAlarm(ctr, () => 5.megabytes)
// 5MB / 1000000B/S * 8 / 10 == 4.seconds
// 4.seconds > 100.milliseconds
assert(alarm.sleeptime == 100.milliseconds)
}
test("BytesAlarm should use zero if we're past") {
val h = new GenerationAlarmHelper {}
import h._
val ctr = FakeByteCounter(1000, Time.now, nfo)
val alarm = new BytesAlarm(ctr, () => 5.megabytes)
fakePool.setSnapshot(new FakeMemoryUsage(6.megabytes, 10.megabytes))
// -1MB / 1000000B/S * 8 / 10 == -800.milliseconds
// -800.milliseconds < 10.milliseconds
assert(alarm.sleeptime == 10.milliseconds)
}
}
| twitter/finagle | finagle-mux/src/test/scala/com/twitter/finagle/mux/lease/exp/AlarmTest.scala | Scala | apache-2.0 | 5,837 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.arrow.io
import java.io.{Closeable, Flushable, OutputStream}
import java.nio.channels.Channels
import com.typesafe.scalalogging.LazyLogging
import org.apache.arrow.vector.dictionary.{Dictionary, DictionaryProvider}
import org.apache.arrow.vector.ipc.ArrowStreamWriter
import org.locationtech.geomesa.arrow.vector.SimpleFeatureVector.SimpleFeatureEncoding
import org.locationtech.geomesa.arrow.vector.{ArrowDictionary, SimpleFeatureVector}
import org.locationtech.geomesa.utils.io.CloseWithLogging
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
/**
* For writing simple features to an arrow file.
*
* Uses arrow streaming format (no footer).
*
* @param vector simple feature vector
* @param provider dictionary provider
* @param os output stream
*/
class SimpleFeatureArrowFileWriter private (
vector: SimpleFeatureVector,
provider: DictionaryProvider with Closeable,
os: OutputStream,
sort: Option[(String, Boolean)]
) extends Closeable with Flushable with LazyLogging {
private val metadata = sort.map { case (field, reverse) => getSortAsMetadata(field, reverse) }.orNull
private val root = createRoot(vector.underlying, metadata)
private val writer = new ArrowStreamWriter(root, provider, Channels.newChannel(os))
private var index = 0
def sft: SimpleFeatureType = vector.sft
/**
* Buffer a feature to write
*
* @param sf simple feature
*/
def add(sf: SimpleFeature): Unit = {
vector.writer.set(index, sf)
index += 1
}
/**
* Writes any currently buffered features to disk. This will create an ArrowBatch
* containing the currently buffered features. Note that if there are no features,
* an empty record batch will be created
*/
override def flush(): Unit = {
vector.writer.setValueCount(index)
root.setRowCount(index)
writer.writeBatch()
vector.clear()
index = 0
}
/**
* Close the writer and flush any buffered features
*/
override def close(): Unit = {
try {
if (index > 0) {
flush()
}
writer.end()
} finally {
// note: don't close the vector schema root as it closes the vector as well
CloseWithLogging.raise(Seq(writer, provider, vector))
}
}
}
object SimpleFeatureArrowFileWriter {
/**
* For writing simple features to an arrow file.
*
* Uses arrow streaming format (no footer).
*
* @param os output stream
* @param sft simple feature type
* @param dictionaries map of field names to dictionary values, used for dictionary encoding fields.
* All values must be provided up front.
* @param encoding encoding options
*/
def apply(
os: OutputStream,
sft: SimpleFeatureType,
dictionaries: Map[String, ArrowDictionary] = Map.empty,
encoding: SimpleFeatureEncoding = SimpleFeatureEncoding.Min,
sort: Option[(String, Boolean)] = None): SimpleFeatureArrowFileWriter = {
val vector = SimpleFeatureVector.create(sft, dictionaries, encoding)
// convert the dictionary values into arrow vectors
// make sure we load dictionaries before instantiating the stream writer
val provider: DictionaryProvider with Closeable = new DictionaryProvider with Closeable {
private val dictionaries = vector.dictionaries.collect { case (_, d) => d.id -> d.toDictionary(vector.encoding) }
override def lookup(id: Long): Dictionary = dictionaries(id)
override def close(): Unit = CloseWithLogging(dictionaries.values)
}
new SimpleFeatureArrowFileWriter(vector, provider, os, sort)
}
// convert the dictionary values into arrow vectors
def provider(
dictionaries: Map[String, ArrowDictionary],
encoding: SimpleFeatureEncoding): DictionaryProvider with Closeable = {
new DictionaryProvider with Closeable {
private val dicts = dictionaries.collect { case (_, d) => d.id -> d.toDictionary(encoding) }
override def lookup(id: Long): Dictionary = dicts(id)
override def close(): Unit = CloseWithLogging(dicts.values)
}
}
}
| ccri/geomesa | geomesa-arrow/geomesa-arrow-gt/src/main/scala/org/locationtech/geomesa/arrow/io/SimpleFeatureArrowFileWriter.scala | Scala | apache-2.0 | 4,575 |
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
@deprecated("Use co.verdigris.spark.connector.cql.S3ConnectionFactory with spark.executorEnv.AWS_REGION instead", "0.4.0")
object AwsS3CACentral1ConnectionFactory extends S3ConnectionFactory {
}
| VerdigrisTech/spark-cassandra-connection-factory | src/main/scala/co/verdigris/spark/connector/cql/AwsS3CACentral1ConnectionFactory.scala | Scala | apache-2.0 | 275 |
package com.sksamuel.elastic4s.http
import java.nio.charset.Charset
import com.fasterxml.jackson.annotation.JsonInclude
import com.fasterxml.jackson.core.JsonParser
import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper
import com.sksamuel.exts.Logging
object JacksonSupport {
val mapper: ObjectMapper with ScalaObjectMapper = new ObjectMapper with ScalaObjectMapper
mapper.registerModule(DefaultScalaModule)
mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL)
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
mapper.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false)
mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, true)
mapper.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true)
mapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true)
}
import org.apache.http.HttpEntity
import org.elasticsearch.client.{Response, ResponseException}
import scala.io.{Codec, Source}
import scala.util.{Failure, Try}
trait ResponseHandler[U] {
def onResponse(response: Response): Try[U]
def onError(e: Exception): Try[U] = Failure(e)
}
// a ResponseHandler that marshalls the body into the required type using Jackson
// the response body is converted into a string using a codec derived from the content encoding header
// if the content encoding header is null, then UTF-8 is assumed
object ResponseHandler extends Logging {
def fromEntity[U: Manifest](entity: HttpEntity): U = {
logger.debug(s"Attempting to unmarshall response to ${manifest.runtimeClass.getName}")
val charset = Option(entity.getContentEncoding).map(_.getValue).getOrElse("UTF-8")
implicit val codec = Codec(Charset.forName(charset))
val body = Source.fromInputStream(entity.getContent).mkString
logger.debug(body)
JacksonSupport.mapper.readValue[U](body)
}
def default[U: Manifest] = new DefaultResponseHandler[U]
def failure404[U: Manifest] = new NotFound404ResponseHandler[U]
}
class DefaultResponseHandler[U: Manifest] extends ResponseHandler[U] {
override def onResponse(response: Response): Try[U] = Try(ResponseHandler.fromEntity[U](response.getEntity))
override def onError(e: Exception): Try[U] = Failure(e)
}
class NotFound404ResponseHandler[U: Manifest] extends DefaultResponseHandler[U] {
override def onError(e: Exception): Try[U] = {
e match {
case re: ResponseException if re.getResponse.getStatusLine.getStatusCode == 404 =>
Try(ResponseHandler.fromEntity[U](re.getResponse.getEntity))
case _ => Failure(e)
}
}
}
| aroundus-inc/elastic4s | elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/ResponseHandler.scala | Scala | apache-2.0 | 2,734 |
package org.bitcoins.core.number
import org.scalatest.{ FlatSpec, MustMatchers }
class UInt8Test extends FlatSpec with MustMatchers {
"UInt8" must "convert a byte to a UInt8 correctly" in {
UInt8.toUInt8(0.toByte) must be(UInt8.zero)
UInt8.toUInt8(1.toByte) must be(UInt8.one)
UInt8.toUInt8(255.toByte) must be(UInt8(255.toShort))
}
}
| Christewart/bitcoin-s-core | src/test/scala/org/bitcoins/core/number/UInt8Test.scala | Scala | mit | 355 |
package com.xah.chat.ui.fragments
import android.app.Fragment
import com.xah.chat.framework.TraitFragmentContext
/**
* some fragment helpers
* Created by lemonxah on 2014/10/10.
*/
abstract class BaseFragment extends Fragment with TraitFragmentContext | lemonxah/xaHChat | src/main/scala/com/xah/chat/ui/fragments/BaseFragment.scala | Scala | mit | 256 |
package io.scalaland.chimney.dsl
import io.scalaland.chimney.internal.TransformerCfg._
import io.scalaland.chimney.internal._
import io.scalaland.chimney.internal.macros.dsl.{TransformerBlackboxMacros, TransformerIntoWhiteboxMacros}
import scala.language.experimental.macros
/** Provides DSL for configuring [[io.scalaland.chimney.Transformer]]'s
* generation and using the result to transform value at the same time
*
* @param source object to transform
* @param td transformer definition
* @tparam From type of input value
* @tparam To type of output value
* @tparam C type-level encoded config
*/
final class TransformerInto[From, To, C <: TransformerCfg, Flags <: TransformerFlags](
val source: From,
val td: TransformerDefinition[From, To, C, Flags]
) extends FlagsDsl[Lambda[`F1 <: TransformerFlags` => TransformerInto[From, To, C, F1]], Flags] {
/** Lifts current transformation with provided type constructor `F`.
*
* It keeps all the configuration, provided missing values, renames,
* coproduct instances etc.
*
* @tparam F wrapper type constructor
* @return [[io.scalaland.chimney.dsl.TransformerFInto]]
*/
def lift[F[+_]]: TransformerFInto[F, From, To, WrapperType[F, C], Flags] =
new TransformerFInto[F, From, To, WrapperType[F, C], Flags](source, td.lift[F])
/** Use `value` provided here for field picked using `selector`.
*
* By default if `From` is missing field picked by `selector` compilation fails.
*
* @see [[https://scalalandio.github.io/chimney/transformers/customizing-transformers.html#providing-missing-values]] for more details
* @return [[io.scalaland.chimney.dsl.TransformerInto]]
*/
def withFieldConst[T, U](selector: To => T, value: U): TransformerInto[From, To, _ <: TransformerCfg, Flags] =
macro TransformerIntoWhiteboxMacros.withFieldConstImpl
/** Use wrapped `value` provided here for field picked using `selector`.
*
* By default if `From` is missing field picked by `selector` compilation fails.
*
* @see [[https://scalalandio.github.io/chimney/transformers/customizing-transformers.html#providing-missing-values]] for more details
* @param selector target field in `To`, defined like `_.name`
* @param value constant value to use for the target field
* @return [[io.scalaland.chimney.dsl.TransformerFInto]]
*/
def withFieldConstF[F[+_], T, U](
selector: To => T,
value: F[U]
): TransformerFInto[F, From, To, _ <: TransformerCfg, Flags] =
macro TransformerIntoWhiteboxMacros.withFieldConstFImpl[F]
/** Use `map` provided here to compute value of field picked using `selector`.
*
* By default if `From` is missing field picked by `selector` compilation fails.
*
* @see [[https://scalalandio.github.io/chimney/transformers/customizing-transformers.html#providing-missing-values]] for more details
* @param selector target field in `To`, defined like `_.name`
* @param map function used to compute value of the target field
* @return [[io.scalaland.chimney.dsl.TransformerInto]]
* */
def withFieldComputed[T, U](
selector: To => T,
map: From => U
): TransformerInto[From, To, _ <: TransformerCfg, Flags] =
macro TransformerIntoWhiteboxMacros.withFieldComputedImpl
/** Use `map` provided here to compute wrapped value of field picked using `selector`.
*
* By default if `From` is missing field picked by `selector` compilation fails.
*
* @see [[https://scalalandio.github.io/chimney/transformers/customizing-transformers.html#providing-missing-values]] for more details
* @param selector target field in `To`, defined like `_.name`
* @param map function used to compute value of the target field
* @return [[io.scalaland.chimney.dsl.TransformerFInto]]
*/
def withFieldComputedF[F[+_], T, U](
selector: To => T,
map: From => F[U]
): TransformerFInto[F, From, To, _ <: TransformerCfg, Flags] =
macro TransformerIntoWhiteboxMacros.withFieldComputedFImpl[F]
/** Use `selectorFrom` field in `From` to obtain the value of `selectorTo` field in `To`
*
* By default if `From` is missing field picked by `selectorTo` compilation fails.
*
* @see [[https://scalalandio.github.io/chimney/transformers/customizing-transformers.html#fields-renaming]] for more details
* @param selectorFrom source field in `From`, defined like `_.originalName`
* @param selectorTo target field in `To`, defined like `_.newName`
* @return [[io.scalaland.chimney.dsl.TransformerInto]]
* */
def withFieldRenamed[T, U](
selectorFrom: From => T,
selectorTo: To => U
): TransformerInto[From, To, _ <: TransformerCfg, Flags] =
macro TransformerIntoWhiteboxMacros.withFieldRenamedImpl
/** Use `f` to calculate the (missing) coproduct instance when mapping one coproduct into another
*
* By default if mapping one coproduct in `From` into another coproduct in `To` derivation
* expects that coproducts will have matching names of its components, and for every component
* in `To` field's type there is matching component in `From` type. If some component is missing
* it will fail.
*
* @see [[https://scalalandio.github.io/chimney/transformers/customizing-transformers.html#transforming-coproducts]] for more details
* @param f function to calculate values of components that cannot be mapped automatically
* @return [[io.scalaland.chimney.dsl.TransformerInto]]
*/
def withCoproductInstance[Inst](f: Inst => To): TransformerInto[From, To, _ <: TransformerCfg, Flags] =
macro TransformerIntoWhiteboxMacros.withCoproductInstanceImpl
/** Use `f` to calculate the (missing) wrapped coproduct instance when mapping one coproduct into another
*
* By default if mapping one coproduct in `From` into another coproduct in `To` derivation
* expects that coproducts to have matching names of its components, and for every component
* in `To` field's type there is matching component in `From` type. If some component is missing
* it fails compilation unless provided replacement with this operation.
*
* @see [[https://scalalandio.github.io/chimney/transformers/customizing-transformers.html#transforming-coproducts]] for more details
* @param f function to calculate values of components that cannot be mapped automatically
* @return [[io.scalaland.chimney.dsl.TransformerFInto]]
*/
def withCoproductInstanceF[F[+_], Inst](f: Inst => F[To]): TransformerFInto[F, From, To, _ <: TransformerCfg, Flags] =
macro TransformerIntoWhiteboxMacros.withCoproductInstanceFImpl[F]
/** Apply configured transformation in-place.
*
* It runs macro that tries to derive instance of `Transformer[From, To]`
* and immediately apply it to captured `source` value.
* When transformation can't be derived, it results with compilation error.
*
* @return transformed value of type `To`
*/
def transform[ScopeFlags <: TransformerFlags](
implicit tc: io.scalaland.chimney.dsl.TransformerConfiguration[ScopeFlags]
): To =
macro TransformerBlackboxMacros.transformImpl[From, To, C, Flags, ScopeFlags]
/** Used internally by macro. Please don't use in your code.
*/
def __refineTransformerDefinition[C1 <: TransformerCfg](
f: TransformerDefinition[From, To, C, Flags] => TransformerDefinition[From, To, C1, Flags]
): TransformerInto[From, To, C1, Flags] =
new TransformerInto[From, To, C1, Flags](source, f(td))
}
| scalalandio/chimney | chimney/src/main/scala/io/scalaland/chimney/dsl/TransformerInto.scala | Scala | apache-2.0 | 7,586 |
package documentation
import org.qirx.littlespec.assertion.Assertion
import org.qirx.littlespec.fragments.Code
import org.qirx.littlespec.fragments.CompoundResult
import org.qirx.littlespec.fragments.Fragment
import org.qirx.littlespec.Specification
import org.qirx.littlespec.fragments.Success
import org.qirx.littlespec.fragments.Text
import org.qirx.littlespec.io.Source
import org.qirx.littlespec.macros.Location
import testUtils.ExampleUtils
import testUtils.FailWith
object `_5_Customization` extends Specification with ExampleUtils {
"There are a lot of ways in which you can make things more readable and usable" - {
"""|The easiest form of customization is the use of `Assertion` classes
|that work with the `must` enhancement.""".stripMargin -
example {
val beAnElephant =
new Assertion[String /* this assertions only works on strings */ ] {
def assert(s: => String) =
if (s != "elephant") Left("The given string is not an elephant")
else Right(success)
}
"elephant" must beAnElephant
("mouse" must beAnElephant) failsWith "The given string is not an elephant"
}
"You could also rename or combine existing assertions" -
example {
def beAFailure = throwA[Fragment.Failure]
def beAFailureWithMessage(message: String) = beAFailure withMessage message
failure("test") must beAFailureWithMessage("test")
}
"Another form is by using enhancements" - {
example {
implicit class FailWith(t: => FragmentBody) {
def failsWith(message: String) =
t must (throwA[Fragment.Failure] withMessage message)
}
failure("test") failsWith "test"
}
example {
implicit class IntAssertEnhancement(i: Int) {
def isThree = i is 3
}
3.isThree
}
}
"It's also possible to use the source code in custom fragments" - {
new SpecificationExample {
import org.qirx.littlespec.io.Source
import org.qirx.littlespec.macros.Location
// Make sure the location is passed in implicitly,
// this allows the macro to materialize it.
class Example(implicit location: Location) { self =>
// Source.codeAtLocation will grab the source code
// between { and }
def expecting(result: self.type => FragmentBody) =
createFragment(Source.codeAtLocation(location), result(self))
}
trait MyLibraryTrait {
def name: String
}
// Usage example:
"API documentation" -
new Example {
// Extend the library trait and implement the name property
object CustomObject extends MyLibraryTrait {
lazy val name = "test"
}
}.expecting {
_.CustomObject.name is "test"
}
}.expecting {
val Expected =
"""|// Extend the library trait and implement the name property
|object CustomObject extends MyLibraryTrait {
| lazy val name = "test"
|}""".stripMargin
_ isLike {
case Seq(
CompoundResult(Text("API documentation"), Seq(
Success(Code(Expected))))) => success
}
}
}
}
} | EECOLOR/little-spec | core/src/test/scala/documentation/5_Customization.scala | Scala | mit | 3,348 |
package spire.math
import spire.algebra.{IsIntegral, Order, Rig, Signed}
object UByte extends UByteInstances {
@inline final def apply(n: Byte): UByte = new UByte(n)
@inline final def apply(n: Int): UByte = new UByte(n.toByte)
@inline final def MinValue: UByte = UByte(0)
@inline final def MaxValue: UByte = UByte(-1)
}
class UByte(val signed: Byte) extends AnyVal with scala.math.ScalaNumericAnyConversions {
override def toByte: Byte = signed
override def toChar: Char = (signed & 0xff).toChar
override def toShort: Short = (signed & 0xff).toShort
override def toInt: Int = signed & 0xff
override def toLong: Long = signed & 0xffL
override def toFloat: Float = toInt.toFloat
override def toDouble: Double = toInt.toDouble
def toBigInt: BigInt = BigInt(toInt)
def byteValue(): Byte = toByte
def shortValue(): Short = toShort
def intValue(): Int = toInt
def longValue(): Long = toLong
def floatValue(): Float = toFloat
def doubleValue(): Double = toDouble
def isWhole(): Boolean = true
def underlying(): Any = signed
override def isValidByte: Boolean = signed >= 0
override def isValidShort: Boolean = true
override def isValidChar: Boolean = true
override def isValidInt: Boolean = true
def isValidLong: Boolean = true
override def toString: String = toInt.toString
def == (that: UByte): Boolean = this.signed == that.signed
def != (that: UByte): Boolean = this.signed != that.signed
def ===(that: UByte): Boolean = this.signed == that.signed
def =!=(that: UByte): Boolean = this.signed != that.signed
def <= (that: UByte): Boolean = this.toInt <= that.toInt
def < (that: UByte): Boolean = this.toInt < that.toInt
def >= (that: UByte): Boolean = this.toInt >= that.toInt
def > (that: UByte): Boolean = this.toInt > that.toInt
def unary_- : UByte = UByte(-this.signed)
def + (that: UByte): UByte = UByte(this.signed + that.signed)
def - (that: UByte): UByte = UByte(this.signed - that.signed)
def * (that: UByte): UByte = UByte(this.signed * that.signed)
def / (that: UByte): UByte = UByte(this.toInt / that.toInt)
def % (that: UByte): UByte = UByte(this.toInt % that.toInt)
def unary_~ : UByte = UByte(~this.signed)
def << (shift: Int): UByte = UByte((signed & 0xff) << (shift & 7))
def >> (shift: Int): UByte = UByte((signed & 0xff) >>> (shift & 7))
def >>> (shift: Int): UByte = UByte((signed & 0xff) >>> (shift & 7))
def & (that: UByte): UByte = UByte((this.signed & 0xff) & (that.signed & 0xff))
def | (that: UByte): UByte = UByte((this.signed & 0xff) | (that.signed & 0xff))
def ^ (that: UByte): UByte = UByte((this.signed & 0xff) ^ (that.signed & 0xff))
def ** (that: UByte): UByte = UByte(pow(this.toLong, that.toLong).toInt)
}
trait UByteInstances {
implicit final val UByteAlgebra = new UByteAlgebra
implicit final val UByteBitString = new UByteBitString
import spire.math.NumberTag._
implicit final val UByteTag = new UnsignedIntTag[UByte](UByte.MinValue, UByte.MaxValue)
}
private[math] trait UByteIsRig extends Rig[UByte] {
def one: UByte = UByte(1)
def plus(a:UByte, b:UByte): UByte = a + b
override def pow(a:UByte, b:Int): UByte = {
if (b < 0)
throw new IllegalArgumentException("negative exponent: %s" format b)
a ** UByte(b)
}
override def times(a:UByte, b:UByte): UByte = a * b
def zero: UByte = UByte(0)
}
private[math] trait UByteOrder extends Order[UByte] {
override def eqv(x:UByte, y:UByte): Boolean = x == y
override def neqv(x:UByte, y:UByte): Boolean = x != y
override def gt(x: UByte, y: UByte): Boolean = x > y
override def gteqv(x: UByte, y: UByte): Boolean = x >= y
override def lt(x: UByte, y: UByte): Boolean = x < y
override def lteqv(x: UByte, y: UByte): Boolean = x <= y
def compare(x: UByte, y: UByte): Int = if (x < y) -1 else if (x > y) 1 else 0
}
private[math] trait UByteIsSigned extends Signed[UByte] {
def signum(a: UByte): Int = java.lang.Integer.signum(a.signed) & 1
def abs(a: UByte): UByte = a
}
private[math] trait UByteIsReal extends IsIntegral[UByte] with UByteOrder with UByteIsSigned {
def toDouble(n: UByte): Double = n.toDouble
def toBigInt(n: UByte): BigInt = n.toBigInt
}
@SerialVersionUID(0L)
private[math] class UByteBitString extends BitString[UByte] with Serializable {
def one: UByte = UByte(-1: Byte)
def zero: UByte = UByte(0: Byte)
def and(a: UByte, b: UByte): UByte = a & b
def or(a: UByte, b: UByte): UByte = a | b
def complement(a: UByte): UByte = ~a
override def xor(a: UByte, b: UByte): UByte = a ^ b
def signed: Boolean = false
def width: Int = 8
def toHexString(n: UByte): String = Integer.toHexString(n.toInt)
def bitCount(n: UByte): Int = Integer.bitCount(n.toInt)
def highestOneBit(n: UByte): UByte = UByte(Integer.highestOneBit(n.toInt))
def lowestOneBit(n: UByte): UByte = UByte(Integer.lowestOneBit(n.toInt))
def numberOfLeadingZeros(n: UByte): Int = Integer.numberOfLeadingZeros(n.toInt)
def numberOfTrailingZeros(n: UByte): Int = Integer.numberOfTrailingZeros(n.toInt)
def leftShift(n: UByte, i: Int): UByte = n << i
def rightShift(n: UByte, i: Int): UByte = n >> i
def signedRightShift(n: UByte, i: Int): UByte = n >>> i
def rotateLeft(n: UByte, i: Int): UByte = {
val j = i & 7
(n << j) | (n >>> (8 - j))
}
def rotateRight(n: UByte, i: Int): UByte = {
val j = i & 7
(n >>> j) | (n << (8 - j))
}
}
@SerialVersionUID(0L)
private[math] class UByteAlgebra extends UByteIsRig with UByteIsReal with Serializable
| guersam/spire | core/shared/src/main/scala/spire/math/UByte.scala | Scala | mit | 5,523 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.util.regexp
/** This runtime exception is thrown if an attempt to instantiate a
* syntactically incorrect expression is detected.
*
* @author Burak Emir
* @version 1.0
*/
class SyntaxError(e: String) extends RuntimeException(e)
| cran/rkafkajars | java/scala/util/regexp/SyntaxError.scala | Scala | apache-2.0 | 780 |
package io.mca.oauth
import org.scalatest._
class OAuthClientTest extends FunSpec with Matchers {
describe("Twitter API example") {
it("generates the expected signature base") {
val token = Fixtures.TwitterAPITest.token
val requestParams = Fixtures.TwitterAPITest.requestParams
val oAuthParams = Fixtures.TwitterAPITest.createOAuthParams :+ ("oauth_token", token)
val params = requestParams ++ oAuthParams
val parameterBase = Fixtures.TwitterAPITest.parameterBase(params)
val signatureBase = Fixtures.TwitterAPITest.signatureBase("POST", "https://api.twitter.com/1/statuses/update.json", parameterBase)
signatureBase should equal("POST&https%3A%2F%2Fapi.twitter.com%2F1%2Fstatuses%2Fupdate.json&include_entities%3Dtrue%26oauth_consumer_key%3Dxvz1evFS4wEEPTGEFPHBog%26oauth_nonce%3DkYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1318622958%26oauth_token%3D370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb%26oauth_version%3D1.0%26status%3DHello%2520Ladies%2520%252B%2520Gentlemen%252C%2520a%2520signed%2520OAuth%2520request%2521")
}
it("generates the expected signing key") {
val signingKey = Fixtures.TwitterAPITest.signingKey("LswwdoUaIvS8ltyTt5jkRh4J50vUPVVHtR2YPi5kE")
signingKey should equal("kAcSOqF21Fu85e7zjz7ZN2U4ZRhfV3WpwPAoE3Z7kBw&LswwdoUaIvS8ltyTt5jkRh4J50vUPVVHtR2YPi5kE")
}
it("generates the expected signature") {
val token = Fixtures.TwitterAPITest.token
val requestParams = Fixtures.TwitterAPITest.requestParams
val oAuthParams = Fixtures.TwitterAPITest.createOAuthParams :+ ("oauth_token", token)
val params = requestParams ++ oAuthParams
val signature = Fixtures.TwitterAPITest.createSignature("POST",
"https://api.twitter.com/1/statuses/update.json",
params,
Fixtures.TwitterAPITest.tokenSecret)
signature should equal("tnnArxj06cWHq44gCs1OSKk/jLY=")
}
it("generates the expected header") {
val token = Fixtures.TwitterAPITest.token
val tokenSecret = Fixtures.TwitterAPITest.tokenSecret
val requestParams = Fixtures.TwitterAPITest.requestParams
val header = Fixtures.TwitterAPITest.resourceHeader("POST",
"https://api.twitter.com/1/statuses/update.json",
requestParams,
token,
tokenSecret)
val expected = "OAuth oauth_consumer_key=\\"xvz1evFS4wEEPTGEFPHBog\\", oauth_nonce=\\"kYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg\\", oauth_signature_method=\\"HMAC-SHA1\\", oauth_timestamp=\\"1318622958\\", oauth_version=\\"1.0\\", oauth_token=\\"370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb\\", oauth_signature=\\"tnnArxj06cWHq44gCs1OSKk%2FjLY%3D\\""
header should equal(expected)
}
}
describe("Resource Header") {
it("starts with OAuth followed by a space") {
Fixtures.resourceHeader should startWith("OAuth ")
}
it("should include the consumer key") {
Fixtures.resourceHeader should include("oauth_consumer_key=\\"testconsumerkey\\"")
}
it("should include the nonce") {
Fixtures.resourceHeader should include("oauth_nonce=\\"testnonce\\"")
}
it("should include the signature") {
Fixtures.resourceHeader should include("oauth_signature=\\"testsignature\\"")
}
it("should include the signature method") {
Fixtures.resourceHeader should include("oauth_signature_method=\\"HMAC-SHA1\\"")
}
it("should include the timestamp") {
Fixtures.resourceHeader should include("oauth_timestamp=\\"1318622958\\"")
}
it("should include the token") {
Fixtures.resourceHeader should include("oauth_token=\\"testtoken\\"")
}
it("should include the OAuth version") {
Fixtures.resourceHeader should include("oauth_version=\\"1.0\\"")
}
}
describe("Request Token Header") {
it("starts with OAuth followed by a space") {
Fixtures.tokenRequestHeader should startWith("OAuth ")
}
it("should include the consumer key") {
Fixtures.tokenRequestHeader should include("oauth_consumer_key=\\"testconsumerkey\\"")
}
it("should include the nonce") {
Fixtures.tokenRequestHeader should include("oauth_nonce=\\"testnonce\\"")
}
it("should include the signature") {
Fixtures.tokenRequestHeader should include("oauth_signature=\\"testsignature\\"")
}
it("should include the signature method") {
Fixtures.tokenRequestHeader should include("oauth_signature_method=\\"HMAC-SHA1\\"")
}
it("should include the timestamp") {
Fixtures.tokenRequestHeader should include("oauth_timestamp=\\"1318622958\\"")
}
it("should include the callback") {
Fixtures.tokenRequestHeader should include("oauth_callback=\\"http%3A%2F%2Fexample.com%2Fcallback\\"")
}
it("should include the OAuth version") {
Fixtures.tokenRequestHeader should include("oauth_version=\\"1.0\\"")
}
describe("when a callback is not supplied") {
it("should insert 'oob' in the callback parameter") {
Fixtures.tokenRequestHeaderNoCallback should include("oauth_callback=\\"oob\\"")
}
}
}
describe("Signing Key") {
it("should append the token secret to the consumer secret") {
val signingKey = Fixtures.OAuthTestClient.signingKey(Fixtures.OAuthTestClient.tokenSecret)
signingKey should equal("testconsumersecret&testtokensecret")
}
}
describe("Signature Base") {
it("should start with the HTTP method") {
val method = "GET"
val baseUri = "http://example.com"
val parameterBase = "parameterBase"
val signatureBase = Fixtures.OAuthTestClient.signatureBase(method, baseUri, parameterBase)
signatureBase should startWith("GET")
}
it("should uppercase the HTTP method") {
val method = "get"
val baseUri = "http://example.com"
val parameterBase = "parameterBase"
val signatureBase = Fixtures.OAuthTestClient.signatureBase(method, baseUri, parameterBase)
signatureBase should startWith("GET")
}
it("should append the percent encoded base URI after the HTTP method") {
val method = "GET"
val baseUri = "http://example.com"
val parameterBase = "parameterBase"
val signatureBase = Fixtures.OAuthTestClient.signatureBase(method, baseUri, parameterBase)
signatureBase should startWith("GET&http%3A%2F%2Fexample.com")
}
it("should append the percent encoded parameter string after the base URI") {
val method = "GET"
val baseUri = "http://example.com"
val token = Fixtures.OAuthTestClient.token
val requestParams = Fixtures.OAuthTestClient.requestParams
val oAuthParams = Fixtures.OAuthTestClient.createOAuthParams :+ ("oauth_token", token)
val params = requestParams ++ oAuthParams
val parameterBase = Fixtures.OAuthTestClient.parameterBase(params)
val signatureBase = Fixtures.OAuthTestClient.signatureBase(method, baseUri, parameterBase)
signatureBase should equal("GET&http%3A%2F%2Fexample.com&include_entities%3Dtrue%26oauth_consumer_key%3Dtestconsumerkey%26oauth_nonce%3Dtestnonce%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1318622958%26oauth_token%3Dtesttoken%26oauth_version%3D1.0%26status%3DHello%2520Ladies%2520%252B%2520Gentlemen%252C%2520a%2520signed%2520OAuth%2520request%2521")
}
}
describe("Parameter Base") {
it("should include base oauth params") {
val token = Fixtures.OAuthTestClient.token
val requestParams = Fixtures.OAuthTestClient.requestParams
val oAuthParams = Fixtures.OAuthTestClient.createOAuthParams :+ ("oauth_token", token)
val params = requestParams ++ oAuthParams
val parameterBase = Fixtures.OAuthTestClient.parameterBase(params)
parameterBase should include("oauth_consumer_key=testconsumerkey")
parameterBase should include("oauth_nonce=testnonce")
parameterBase should include("oauth_signature_method=HMAC-SHA1")
parameterBase should include("oauth_timestamp=1318622958")
parameterBase should include("oauth_token=testtoken")
parameterBase should include("oauth_version=1.0")
}
it("should add any request params passed to it") {
val parameterBase = Fixtures.OAuthTestClient.parameterBase(Seq(("foo", "bar"), ("baz", "qux")))
parameterBase should include("foo=bar")
parameterBase should include("baz=qux")
}
}
describe("Percent Encoding") {
it("should return an empty string for null values") {
Fixtures.OAuthTestClient.percentEncode(null) should equal("")
}
it("should not encode alphanumeric characters") {
Fixtures.OAuthTestClient.percentEncode("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890") should equal("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890")
}
it("should not encode reserved characters") {
Fixtures.OAuthTestClient.percentEncode("-._~") should equal("-._~")
}
it("should encode other chars") {
Fixtures.OAuthTestClient.percentEncode("Ladies + Gentlemen") should equal("Ladies%20%2B%20Gentlemen")
Fixtures.OAuthTestClient.percentEncode("An encoded string!") should equal("An%20encoded%20string%21")
Fixtures.OAuthTestClient.percentEncode("Dogs, Cats & Mice") should equal("Dogs%2C%20Cats%20%26%20Mice")
Fixtures.OAuthTestClient.percentEncode("☃") should equal("%E2%98%83")
}
}
}
| mmacaulay/scala-oauth | src/test/scala/io/mca/oauth/OAuthClientTest.scala | Scala | mit | 9,424 |
package fp
import scala.annotation.tailrec
sealed abstract class Free[F[_], A] {
def flatMap[B](f: A => Free[F, B]): Free[F, B] =
FlatMap(this, f)
def map[B](f: A => B): Free[F, B] =
flatMap(f andThen (Return(_)))
}
case class Return[F[_], A](a: A) extends Free[F, A]
case class Suspend[F[_], A](r: F[A]) extends Free[F, A]
case class FlatMap[F[_], A, B](s: Free[F, A], f: A => Free[F, B]) extends Free[F, B]
object Free {
@tailrec
def runTrampoline[A](t: TailRec[A]): A = t match {
case Return(a) => a
case Suspend(r) => r()
case FlatMap(x, f) => x match {
case Return(a) => runTrampoline(f(a))
case Suspend(r) => runTrampoline(f(r()))
case FlatMap(y, g) => runTrampoline(y flatMap (y => g(y) flatMap f))
}
}
def run[F[_], A](free: Free[F, A])(implicit F: Monad[F]): F[A] = step(free) match {
case Return(a) => F.unit(a)
case Suspend(r) => r
case FlatMap(Suspend(r), f) => F.flatMap(r)(a => run(f(a)))
case _ => sys.error("Impossible, since `step` eliminates these cases")
}
@tailrec
def step[F[_], A](free: Free[F, A]): Free[F, A] = free match {
case FlatMap(FlatMap(x, f), g) => step(x flatMap (a => f(a) flatMap g))
case FlatMap(Return(x), f) => step(f(x))
case _ => free
}
def runFree[F[_], G[_], A](free: Free[F, A])(t: F ~> G)
(implicit G: Monad[G]): G[A] =
step(free) match {
case Return(a) => G.unit(a)
case Suspend(r) => t(r)
case FlatMap(Suspend(r), f) => G.flatMap(t(r))(a => runFree(f(a))(t))
case _ => sys.error("Impossible, since `step` eliminates these cases")
}
def translate[F[_], G[_], A](f: Free[F, A])(fg: F ~> G): Free[G, A] = {
type FreeG[B] = Free[G, B]
val t = new (F ~> FreeG) {
def apply[B](b: F[B]): FreeG[B] = Suspend { fg(b) }
}
runFree(f)(t)(freeMonad[G])
}
implicit def freeMonad[F[_]]: Monad[({type f[a] = Free[F, a]})#f] =
new Monad[({type f[a] = Free[F, a]})#f] {
def unit[A](a: => A) = Return(a)
override def flatMap[A, B](fa: Free[F, A])(f: A => Free[F, B]) = fa flatMap f
}
}
| adamgfraser/fp | src/main/scala/fp/Free.scala | Scala | apache-2.0 | 2,126 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import java.util.Properties
import java.util.concurrent._
import java.util.concurrent.atomic._
import scala.collection._
import junit.framework.Assert._
import kafka.message._
import kafka.server._
import kafka.utils.TestUtils._
import kafka.utils._
import org.junit.Test
import kafka.serializer._
import kafka.cluster.{Broker, Cluster}
import org.scalatest.junit.JUnit3Suite
import kafka.integration.KafkaServerTestHarness
class ConsumerIteratorTest extends JUnit3Suite with KafkaServerTestHarness {
val numNodes = 1
val overridingProps = new Properties()
overridingProps.put(KafkaConfig.ZkConnectProp, TestZKUtils.zookeeperConnect)
val configs =
for(props <- TestUtils.createBrokerConfigs(numNodes))
yield KafkaConfig.fromProps(props, overridingProps)
val messages = new mutable.HashMap[Int, Seq[Message]]
val topic = "topic"
val group = "group1"
val consumer0 = "consumer0"
val consumedOffset = 5
val cluster = new Cluster(configs.map(c => new Broker(c.brokerId, "localhost", c.port)))
val queue = new LinkedBlockingQueue[FetchedDataChunk]
val topicInfos = configs.map(c => new PartitionTopicInfo(topic,
0,
queue,
new AtomicLong(consumedOffset),
new AtomicLong(0),
new AtomicInteger(0),
""))
val consumerConfig = new ConsumerConfig(TestUtils.createConsumerProperties(zkConnect, group, consumer0))
override def setUp() {
super.setUp
createTopic(zkClient, topic, partitionReplicaAssignment = Map(0 -> Seq(configs.head.brokerId)), servers = servers)
}
@Test
def testConsumerIteratorDeduplicationDeepIterator() {
val messageStrings = (0 until 10).map(_.toString).toList
val messages = messageStrings.map(s => new Message(s.getBytes))
val messageSet = new ByteBufferMessageSet(DefaultCompressionCodec, new AtomicLong(0), messages:_*)
topicInfos(0).enqueue(messageSet)
assertEquals(1, queue.size)
queue.put(ZookeeperConsumerConnector.shutdownCommand)
val iter = new ConsumerIterator[String, String](queue,
consumerConfig.consumerTimeoutMs,
new StringDecoder(),
new StringDecoder(),
clientId = "")
val receivedMessages = (0 until 5).map(i => iter.next.message).toList
assertFalse(iter.hasNext)
assertEquals(0, queue.size) // Shutdown command has been consumed.
assertEquals(5, receivedMessages.size)
val unconsumed = messageSet.filter(_.offset >= consumedOffset).map(m => Utils.readString(m.message.payload))
assertEquals(unconsumed, receivedMessages)
}
@Test
def testConsumerIteratorDecodingFailure() {
val messageStrings = (0 until 10).map(_.toString).toList
val messages = messageStrings.map(s => new Message(s.getBytes))
val messageSet = new ByteBufferMessageSet(NoCompressionCodec, new AtomicLong(0), messages:_*)
topicInfos(0).enqueue(messageSet)
assertEquals(1, queue.size)
val iter = new ConsumerIterator[String, String](queue,
ConsumerConfig.ConsumerTimeoutMs,
new FailDecoder(),
new FailDecoder(),
clientId = "")
val receivedMessages = (0 until 5).map{ i =>
assertTrue(iter.hasNext)
val message = iter.next
assertEquals(message.offset, i + consumedOffset)
try {
message.message // should fail
}
catch {
case e: UnsupportedOperationException => // this is ok
case e2: Throwable => fail("Unexpected exception when iterating the message set. " + e2.getMessage)
}
}
}
class FailDecoder(props: VerifiableProperties = null) extends Decoder[String] {
def fromBytes(bytes: Array[Byte]): String = {
throw new UnsupportedOperationException("This decoder does not work at all..")
}
}
}
| WillCh/cs286A | dataMover/kafka/core/src/test/scala/unit/kafka/consumer/ConsumerIteratorTest.scala | Scala | bsd-2-clause | 5,040 |
package net.bhardy.braintree.scala
import net.bhardy.braintree.scala.util.NodeWrapper
import scala.collection.JavaConversions._
import java.util.Iterator
import java.lang.Iterable
/**
* A collection used to page through query or search results.
*
* @tparam T
* type of object being paged, e.g. { @link Transaction} or
* { @link Customer}.
*/
class ResourceCollection[T](val pager: Pager[T], response: NodeWrapper) extends Iterable[T] {
val pageSize = response.findInteger("page-size")
val ids = response.findAllStrings("ids/*").toIndexedSeq
/**
* Returns the approximate total size of the collection.
*
* @return Approximate size of collection
*/
def getMaximumSize = ids.size
def iterator: Iterator[T] = new PagedIterator[T](this)
def getFirst: T = pager.getPage(asScalaIterable(ids.subList(0, 1)).toList).head
private class PagedIterator[E](resourceCollection: ResourceCollection[E]) extends Iterator[E] {
private var index: Int = 0
private var nextIndexToFetch: Int = 0
private var items: List[E] = Nil
private def nextBatchOfIds: IndexedSeq[String] = {
var lastIdIndex: Int = nextIndexToFetch + pageSize
if (lastIdIndex > ids.size) {
lastIdIndex = ids.size
}
val nextIds = ids.slice(nextIndexToFetch, lastIdIndex)
nextIndexToFetch = lastIdIndex
nextIds
}
def hasNext: Boolean = {
if (nextIndexToFetch < ids.size && index == items.size) {
this.items = resourceCollection.pager.getPage(nextBatchOfIds.toList)
this.index = 0
}
(index < items.size)
}
def next: E = {
val item: E = items(index)
index += 1
item
}
def remove {
throw new UnsupportedOperationException
}
}
} | benhardy/braintree-scala | src/main/scala/ResourceCollection.scala | Scala | mit | 1,823 |
/*
* Copyright (c) 2013 Oculus Info Inc.
* http://www.oculusinfo.com/
*
* Released under the MIT License.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.oculusinfo.twitter.tilegen
import java.text.SimpleDateFormat
import java.util.Date
import java.util.{List => JavaList}
import scala.reflect.ClassTag
import org.apache.spark.rdd.RDD
import com.oculusinfo.binning.TileData
import com.oculusinfo.binning.TileIndex
import com.oculusinfo.binning.impl.WebMercatorTilePyramid
import com.oculusinfo.tilegen.spark.MavenReference
import com.oculusinfo.tilegen.spark.SparkConnector
import com.oculusinfo.tilegen.tiling.AnalysisDescription
import com.oculusinfo.tilegen.tiling.CompositeAnalysisDescription
import com.oculusinfo.tilegen.tiling.CartesianIndexScheme
import com.oculusinfo.tilegen.tiling.RDDBinner
import com.oculusinfo.tilegen.tiling.TileIO
import com.oculusinfo.tilegen.util.ArgumentParser
import com.oculusinfo.twitter.binning.TwitterDemoTopicRecord
object TwitterTopicBinner {
def main (args: Array[String]) {
val argParser = new ArgumentParser(args)
argParser.debug
val versions = SparkConnector.getDefaultVersions
val jars =
Seq(new MavenReference("com.oculusinfo", "twitter-topics-utilities", versions("base"))
) union SparkConnector.getDefaultLibrariesFromMaven
val sc = argParser.getSparkConnector(jars).getSparkContext("Twitter demo data tiling")
val source = argParser.getString("source", "The source location at which to find twitter data")
val dateParser = new SimpleDateFormat("yyyy/MM/dd.HH:mm:ss.zzzz")
// Note: don't need a start time for this binning project. Start time is assumed to be 31 days prior to end time.
val endTime = dateParser.parse(argParser.getString("end", "The end time for binning. Format is yyyy/MM/dd.HH:mm:ss.+zzzz"))
val levelSets = argParser.getString("levels",
"The level sets (;-separated) of ,-separated levels to bin.")
.split(";").map(_.split(",").map(_.toInt))
val levelBounds = levelSets.map(_.map(a => (a, a))
.reduce((a, b) => (a._1 min b._1, a._2 max b._2)))
.reduce((a, b) => (a._1 min b._1, a._2 max b._2))
val pyramidId = argParser.getString("id", "An ID by which to identify the finished pyramid.")
val pyramidName = argParser.getString("name", "A name with which to label the finished pyramid").replace("_", " ")
val pyramidDescription = argParser.getString("description", "A description with which to present the finished pyramid").replace("_", " ")
val partitions = argParser.getInt("partitions", "The number of partitions into which to read the raw data", Some(0))
val topicList = argParser.getString("topicList", "Path and filename of list of extracted topics and English translations")
val tileIO = TileIO.fromArguments(argParser)
val rawData = if (0 == partitions) {
sc.textFile(source)
} else {
sc.textFile(source, partitions)
}
val minAnalysis:
AnalysisDescription[TileData[JavaList[TwitterDemoTopicRecord]],
List[TwitterDemoTopicRecord]] =
new TwitterTopicListAnalysis(new TwitterMinRecordAnalytic)
val maxAnalysis:
AnalysisDescription[TileData[JavaList[TwitterDemoTopicRecord]],
List[TwitterDemoTopicRecord]] =
new TwitterTopicListAnalysis(new TwitterMaxRecordAnalytic)
val tileAnalytics: Option[AnalysisDescription[TileData[JavaList[TwitterDemoTopicRecord]],
(List[TwitterDemoTopicRecord],
List[TwitterDemoTopicRecord])]] =
Some(new CompositeAnalysisDescription(minAnalysis, maxAnalysis))
val dataAnalytics: Option[AnalysisDescription[((Double, Double),
Map[String, TwitterDemoTopicRecord]),
Int]] =
None
genericProcessData(rawData, levelSets, tileIO, tileAnalytics, dataAnalytics,
endTime, pyramidId, pyramidName, pyramidDescription, topicList)
}
private def genericProcessData[AT, DT]
(rawData: RDD[String],
levelSets: Array[Array[Int]],
tileIO: TileIO,
tileAnalytics: Option[AnalysisDescription[TileData[JavaList[TwitterDemoTopicRecord]], AT]],
dataAnalytics: Option[AnalysisDescription[((Double, Double),
Map[String, TwitterDemoTopicRecord]), DT]],
endTime: Date,
pyramidId: String,
pyramidName: String,
pyramidDescription: String,
topicList: String) =
{
val tileAnalyticsTag: ClassTag[AT] = tileAnalytics.map(_.analysisTypeTag).getOrElse(ClassTag.apply(classOf[Int]))
val dataAnalyticsTag: ClassTag[DT] = dataAnalytics.map(_.analysisTypeTag).getOrElse(ClassTag.apply(classOf[Int]))
processData(rawData, levelSets, tileIO, tileAnalytics, dataAnalytics,
endTime, pyramidId, pyramidName, pyramidDescription, topicList)(tileAnalyticsTag, dataAnalyticsTag)
}
private def processData[AT: ClassTag, DT: ClassTag]
(rawData: RDD[String],
levelSets: Array[Array[Int]],
tileIO: TileIO,
tileAnalytics: Option[AnalysisDescription[TileData[JavaList[TwitterDemoTopicRecord]], AT]],
dataAnalytics: Option[AnalysisDescription[((Double, Double),
Map[String, TwitterDemoTopicRecord]), DT]],
endTime: Date,
pyramidId: String,
pyramidName: String,
pyramidDescription: String,
topicList: String) =
{
val endTimeSecs = endTime.getTime()/1000; // convert time from msec to sec
val topicMatcher = new TopicMatcher
val topicsMap = topicMatcher.getKeywordList(topicList) // get pre-extracted topics
// append topics to end of data entries
val rawDataWithTopics = topicMatcher.appendTopicsToData(rawData.sparkContext, rawData, topicsMap, endTimeSecs)
val data = rawDataWithTopics.mapPartitions(i =>
{
val recordParser = new TwitterTopicRecordParser(endTimeSecs)
i.flatMap(line =>
{
try {
recordParser.getRecordsByTopic(line)
} catch {
// Just ignore bad records, there aren't many
case _: Throwable => Seq[((Double, Double), Map[String, TwitterDemoTopicRecord])]()
}
}
)
}
).map(record => (record._1, record._2, dataAnalytics.map(_.convert(record))))
data.cache
val binner = new RDDBinner
binner.debug = true
val tilePyramid = new WebMercatorTilePyramid
// Add global analytic accumulators
val sc = rawData.context
tileAnalytics.map(_.addGlobalAccumulator(sc))
dataAnalytics.map(_.addGlobalAccumulator(sc))
levelSets.foreach(levelSet =>
{
println()
println()
println()
println("Starting binning levels "+levelSet.mkString("[", ",", "]")+" at "+new Date())
// Add whole-level analytic accumulators for these levels
tileAnalytics.map(analytic =>
levelSet.map(level => analytic.addLevelAccumulator(sc, level))
)
dataAnalytics.map(analytic =>
levelSet.map(level => analytic.addLevelAccumulator(sc, level))
)
// Do actual binning
val taskStart = System.currentTimeMillis
val tiles = binner.processDataByLevel(data,
new CartesianIndexScheme,
new TwitterTopicBinningAnalytic,
tileAnalytics,
dataAnalytics,
tilePyramid,
levelSet,
xBins=1,
yBins=1)
tileIO.writeTileSet(tilePyramid,
pyramidId,
tiles,
new TwitterTopicValueDescription,
tileAnalytics,
dataAnalytics,
pyramidName,
pyramidDescription)
val taskEnd = System.currentTimeMillis()
val elapsedMinutes = (taskEnd - taskStart)/60000.0
println("Finished binning levels "+levelSet.mkString("[", ",", "]")+" at "+new Date())
println("\\telapsed time: "+elapsedMinutes+" minutes")
println()
}
)
}
}
| aashish24/aperture-tiles | tile-examples/twitter-topics/twitter-topics-utilities/src/main/scala/com/oculusinfo/twitter/tilegen/TwitterTopicBinner.scala | Scala | mit | 9,263 |
/*
* Copyright 2013 Turkcell Teknoloji Inc. and individual
* contributors by the 'Created by' comments.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.swarm
import com.fasterxml.uuid.{EthernetAddress, Generators}
/**
* Created by Anil Chalil on 11/11/13.
*/
object UUIDGenerator {
val secretGenerator = Generators.timeBasedGenerator(EthernetAddress.fromInterface())
val randomGenerator = Generators.randomBasedGenerator()
}
| Turkcell/swarm | core/src/main/scala/io/swarm/UUIDGenerator.scala | Scala | apache-2.0 | 959 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.nio.ByteBuffer
import kafka.common.LongRef
import kafka.message.{DefaultCompressionCodec, GZIPCompressionCodec, NoCompressionCodec, SnappyCompressionCodec}
import org.apache.kafka.common.errors.InvalidTimestampException
import org.apache.kafka.common.record._
import org.junit.Assert._
import org.junit.Test
import scala.collection.JavaConverters._
class LogValidatorTest {
@Test
def testLogAppendTimeNonCompressedV1() {
checkLogAppendTimeNonCompressed(RecordBatch.MAGIC_VALUE_V1)
}
private def checkLogAppendTimeNonCompressed(magic: Byte) {
val now = System.currentTimeMillis()
// The timestamps should be overwritten
val records = createRecords(magicValue = magic, timestamp = 0L, codec = CompressionType.NONE)
val validatedResults = LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(0),
now = now,
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
compactedTopic = false,
magic = magic,
timestampType = TimestampType.LOG_APPEND_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH)
val validatedRecords = validatedResults.validatedRecords
assertEquals("message set size should not change", records.records.asScala.size, validatedRecords.records.asScala.size)
validatedRecords.batches.asScala.foreach(batch => validateLogAppendTime(now, batch))
assertEquals(s"Max timestamp should be $now", now, validatedResults.maxTimestamp)
assertEquals(s"The offset of max timestamp should be 0", 0, validatedResults.shallowOffsetOfMaxTimestamp)
assertFalse("Message size should not have been changed", validatedResults.messageSizeMaybeChanged)
}
def testLogAppendTimeNonCompressedV2() {
checkLogAppendTimeNonCompressed(RecordBatch.MAGIC_VALUE_V2)
}
@Test
def testLogAppendTimeWithRecompressionV1() {
checkLogAppendTimeWithRecompression(RecordBatch.MAGIC_VALUE_V1)
}
private def checkLogAppendTimeWithRecompression(targetMagic: Byte) {
val now = System.currentTimeMillis()
// The timestamps should be overwritten
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V0, codec = CompressionType.GZIP)
val validatedResults = LogValidator.validateMessagesAndAssignOffsets(
records,
offsetCounter = new LongRef(0),
now = now,
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
compactedTopic = false,
magic = targetMagic,
timestampType = TimestampType.LOG_APPEND_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH)
val validatedRecords = validatedResults.validatedRecords
assertEquals("message set size should not change", records.records.asScala.size, validatedRecords.records.asScala.size)
validatedRecords.batches.asScala.foreach(batch => validateLogAppendTime(now, batch))
assertTrue("MessageSet should still valid", validatedRecords.batches.iterator.next().isValid)
assertEquals(s"Max timestamp should be $now", now, validatedResults.maxTimestamp)
assertEquals(s"The offset of max timestamp should be ${records.records.asScala.size - 1}",
records.records.asScala.size - 1, validatedResults.shallowOffsetOfMaxTimestamp)
assertTrue("Message size may have been changed", validatedResults.messageSizeMaybeChanged)
}
@Test
def testLogAppendTimeWithRecompressionV2() {
checkLogAppendTimeWithRecompression(RecordBatch.MAGIC_VALUE_V2)
}
@Test
def testLogAppendTimeWithoutRecompressionV1() {
checkLogAppendTimeWithoutRecompression(RecordBatch.MAGIC_VALUE_V1)
}
private def checkLogAppendTimeWithoutRecompression(magic: Byte) {
val now = System.currentTimeMillis()
// The timestamps should be overwritten
val records = createRecords(magicValue = magic, timestamp = 0L, codec = CompressionType.GZIP)
val validatedResults = LogValidator.validateMessagesAndAssignOffsets(
records,
offsetCounter = new LongRef(0),
now = now,
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
compactedTopic = false,
magic = magic,
timestampType = TimestampType.LOG_APPEND_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH)
val validatedRecords = validatedResults.validatedRecords
assertEquals("message set size should not change", records.records.asScala.size,
validatedRecords.records.asScala.size)
validatedRecords.batches.asScala.foreach(batch => validateLogAppendTime(now, batch))
assertTrue("MessageSet should still valid", validatedRecords.batches.iterator.next().isValid)
assertEquals(s"Max timestamp should be $now", now, validatedResults.maxTimestamp)
assertEquals(s"The offset of max timestamp should be ${records.records.asScala.size - 1}",
records.records.asScala.size - 1, validatedResults.shallowOffsetOfMaxTimestamp)
assertFalse("Message size should not have been changed", validatedResults.messageSizeMaybeChanged)
}
@Test
def testLogAppendTimeWithoutRecompressionV2() {
checkLogAppendTimeWithoutRecompression(RecordBatch.MAGIC_VALUE_V2)
}
@Test
def testNonCompressedV1() {
checkNonCompressed(RecordBatch.MAGIC_VALUE_V1)
}
private def checkNonCompressed(magic: Byte) {
val now = System.currentTimeMillis()
val timestampSeq = Seq(now - 1, now + 1, now)
val producerId = if (magic >= RecordBatch.MAGIC_VALUE_V2) 1324L else RecordBatch.NO_PRODUCER_ID
val producerEpoch = if (magic >= RecordBatch.MAGIC_VALUE_V2) 10: Short else RecordBatch.NO_PRODUCER_EPOCH
val baseSequence = if (magic >= RecordBatch.MAGIC_VALUE_V2) 20 else RecordBatch.NO_SEQUENCE
val partitionLeaderEpoch = if (magic >= RecordBatch.MAGIC_VALUE_V2) 40 else RecordBatch.NO_PARTITION_LEADER_EPOCH
val records = MemoryRecords.withRecords(magic, 0L, CompressionType.NONE, producerId, producerEpoch, baseSequence,
partitionLeaderEpoch, new SimpleRecord(timestampSeq(0), "hello".getBytes),
new SimpleRecord(timestampSeq(1), "there".getBytes), new SimpleRecord(timestampSeq(2), "beautiful".getBytes))
val validatingResults = LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(0),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
compactedTopic = false,
magic = magic,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = partitionLeaderEpoch)
val validatedRecords = validatingResults.validatedRecords
var i = 0
for (batch <- validatedRecords.batches.asScala) {
assertTrue(batch.isValid)
assertEquals(batch.timestampType, TimestampType.CREATE_TIME)
assertEquals(batch.maxTimestamp, batch.asScala.map(_.timestamp).max)
assertEquals(producerEpoch, batch.producerEpoch)
assertEquals(producerId, batch.producerId)
assertEquals(baseSequence, batch.baseSequence)
assertEquals(partitionLeaderEpoch, batch.partitionLeaderEpoch)
for (record <- batch.asScala) {
assertTrue(record.isValid)
assertEquals(timestampSeq(i), record.timestamp)
i += 1
}
}
assertEquals(s"Max timestamp should be ${now + 1}", now + 1, validatingResults.maxTimestamp)
assertEquals(s"Offset of max timestamp should be 1", 1, validatingResults.shallowOffsetOfMaxTimestamp)
assertFalse("Message size should not have been changed", validatingResults.messageSizeMaybeChanged)
}
@Test
def testNonCompressedV2() {
checkNonCompressed(RecordBatch.MAGIC_VALUE_V2)
}
@Test
def testRecompressionV1(): Unit = {
checkRecompression(RecordBatch.MAGIC_VALUE_V1)
}
private def checkRecompression(magic: Byte): Unit = {
val now = System.currentTimeMillis()
val timestampSeq = Seq(now - 1, now + 1, now)
val producerId = if (magic >= RecordBatch.MAGIC_VALUE_V2) 1324L else RecordBatch.NO_PRODUCER_ID
val producerEpoch = if (magic >= RecordBatch.MAGIC_VALUE_V2) 10: Short else RecordBatch.NO_PRODUCER_EPOCH
val baseSequence = if (magic >= RecordBatch.MAGIC_VALUE_V2) 20 else RecordBatch.NO_SEQUENCE
val partitionLeaderEpoch = if (magic >= RecordBatch.MAGIC_VALUE_V2) 40 else RecordBatch.NO_PARTITION_LEADER_EPOCH
val records = MemoryRecords.withRecords(magic, 0L, CompressionType.NONE, producerId, producerEpoch, baseSequence,
partitionLeaderEpoch, new SimpleRecord(timestampSeq(0), "hello".getBytes),
new SimpleRecord(timestampSeq(1), "there".getBytes), new SimpleRecord(timestampSeq(2), "beautiful".getBytes))
val validatingResults = LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(0),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = GZIPCompressionCodec,
compactedTopic = false,
magic = magic,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = partitionLeaderEpoch)
val validatedRecords = validatingResults.validatedRecords
var i = 0
for (batch <- validatedRecords.batches.asScala) {
assertTrue(batch.isValid)
assertEquals(batch.timestampType, TimestampType.CREATE_TIME)
assertEquals(batch.maxTimestamp, batch.asScala.map(_.timestamp).max)
assertEquals(producerEpoch, batch.producerEpoch)
assertEquals(producerId, batch.producerId)
assertEquals(baseSequence, batch.baseSequence)
assertEquals(partitionLeaderEpoch, batch.partitionLeaderEpoch)
for (record <- batch.asScala) {
assertTrue(record.isValid)
assertEquals(timestampSeq(i), record.timestamp)
i += 1
}
}
assertEquals(s"Max timestamp should be ${now + 1}", now + 1, validatingResults.maxTimestamp)
assertEquals("Offset of max timestamp should be 2", 2, validatingResults.shallowOffsetOfMaxTimestamp)
assertTrue("Message size should have been changed", validatingResults.messageSizeMaybeChanged)
}
@Test
def testRecompressionV2(): Unit = {
checkRecompression(RecordBatch.MAGIC_VALUE_V2)
}
@Test
def testCreateTimeUpConversionV0ToV1(): Unit = {
checkCreateTimeUpConversionFromV0(RecordBatch.MAGIC_VALUE_V1)
}
private def checkCreateTimeUpConversionFromV0(toMagic: Byte) {
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V0, codec = CompressionType.GZIP)
val validatedResults = LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(0),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
magic = toMagic,
compactedTopic = false,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH)
val validatedRecords = validatedResults.validatedRecords
for (batch <- validatedRecords.batches.asScala) {
assertTrue(batch.isValid)
assertEquals(RecordBatch.NO_TIMESTAMP, batch.maxTimestamp)
assertEquals(TimestampType.CREATE_TIME, batch.timestampType)
assertEquals(RecordBatch.NO_PRODUCER_EPOCH, batch.producerEpoch)
assertEquals(RecordBatch.NO_PRODUCER_ID, batch.producerId)
assertEquals(RecordBatch.NO_SEQUENCE, batch.baseSequence)
}
assertEquals(s"Max timestamp should be ${RecordBatch.NO_TIMESTAMP}", RecordBatch.NO_TIMESTAMP, validatedResults.maxTimestamp)
assertEquals(s"Offset of max timestamp should be ${validatedRecords.records.asScala.size - 1}",
validatedRecords.records.asScala.size - 1, validatedResults.shallowOffsetOfMaxTimestamp)
assertTrue("Message size should have been changed", validatedResults.messageSizeMaybeChanged)
}
@Test
def testCreateTimeUpConversionV0ToV2() {
checkCreateTimeUpConversionFromV0(RecordBatch.MAGIC_VALUE_V2)
}
@Test
def testCreateTimeUpConversionV1ToV2() {
val timestamp = System.currentTimeMillis()
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V1, codec = CompressionType.GZIP, timestamp = timestamp)
val validatedResults = LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(0),
now = timestamp,
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
magic = RecordBatch.MAGIC_VALUE_V2,
compactedTopic = false,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH)
val validatedRecords = validatedResults.validatedRecords
for (batch <- validatedRecords.batches.asScala) {
assertTrue(batch.isValid)
assertEquals(timestamp, batch.maxTimestamp)
assertEquals(TimestampType.CREATE_TIME, batch.timestampType)
assertEquals(RecordBatch.NO_PRODUCER_EPOCH, batch.producerEpoch)
assertEquals(RecordBatch.NO_PRODUCER_ID, batch.producerId)
assertEquals(RecordBatch.NO_SEQUENCE, batch.baseSequence)
}
assertEquals(timestamp, validatedResults.maxTimestamp)
assertEquals(s"Offset of max timestamp should be ${validatedRecords.records.asScala.size - 1}",
validatedRecords.records.asScala.size - 1, validatedResults.shallowOffsetOfMaxTimestamp)
assertTrue("Message size should have been changed", validatedResults.messageSizeMaybeChanged)
}
@Test
def testCompressedV1() {
checkCompressed(RecordBatch.MAGIC_VALUE_V1)
}
private def checkCompressed(magic: Byte) {
val now = System.currentTimeMillis()
val timestampSeq = Seq(now - 1, now + 1, now)
val producerId = if (magic >= RecordBatch.MAGIC_VALUE_V2) 1324L else RecordBatch.NO_PRODUCER_ID
val producerEpoch = if (magic >= RecordBatch.MAGIC_VALUE_V2) 10: Short else RecordBatch.NO_PRODUCER_EPOCH
val baseSequence = if (magic >= RecordBatch.MAGIC_VALUE_V2) 20 else RecordBatch.NO_SEQUENCE
val partitionLeaderEpoch = if (magic >= RecordBatch.MAGIC_VALUE_V2) 40 else RecordBatch.NO_PARTITION_LEADER_EPOCH
val records = MemoryRecords.withRecords(magic, 0L, CompressionType.GZIP, producerId, producerEpoch, baseSequence,
partitionLeaderEpoch, new SimpleRecord(timestampSeq(0), "hello".getBytes),
new SimpleRecord(timestampSeq(1), "there".getBytes), new SimpleRecord(timestampSeq(2), "beautiful".getBytes))
val validatedResults = LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(0),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
magic = magic,
compactedTopic = false,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = partitionLeaderEpoch)
val validatedRecords = validatedResults.validatedRecords
var i = 0
for (batch <- validatedRecords.batches.asScala) {
assertTrue(batch.isValid)
assertEquals(batch.timestampType, TimestampType.CREATE_TIME)
assertEquals(batch.maxTimestamp, batch.asScala.map(_.timestamp).max)
assertEquals(producerEpoch, batch.producerEpoch)
assertEquals(producerId, batch.producerId)
assertEquals(baseSequence, batch.baseSequence)
assertEquals(partitionLeaderEpoch, batch.partitionLeaderEpoch)
for (record <- batch.asScala) {
assertTrue(record.isValid)
assertEquals(timestampSeq(i), record.timestamp)
i += 1
}
}
assertEquals(s"Max timestamp should be ${now + 1}", now + 1, validatedResults.maxTimestamp)
assertEquals(s"Offset of max timestamp should be ${validatedRecords.records.asScala.size - 1}",
validatedRecords.records.asScala.size - 1, validatedResults.shallowOffsetOfMaxTimestamp)
assertFalse("Message size should not have been changed", validatedResults.messageSizeMaybeChanged)
}
@Test
def testCompressedV2() {
checkCompressed(RecordBatch.MAGIC_VALUE_V2)
}
@Test(expected = classOf[InvalidTimestampException])
def testInvalidCreateTimeNonCompressedV1() {
val now = System.currentTimeMillis()
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V1, timestamp = now - 1001L,
codec = CompressionType.NONE)
LogValidator.validateMessagesAndAssignOffsets(
records,
offsetCounter = new LongRef(0),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V1,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH)
}
@Test(expected = classOf[InvalidTimestampException])
def testInvalidCreateTimeNonCompressedV2() {
val now = System.currentTimeMillis()
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V2, timestamp = now - 1001L,
codec = CompressionType.NONE)
LogValidator.validateMessagesAndAssignOffsets(
records,
offsetCounter = new LongRef(0),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V2,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH)
}
@Test(expected = classOf[InvalidTimestampException])
def testInvalidCreateTimeCompressedV1() {
val now = System.currentTimeMillis()
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V1, timestamp = now - 1001L,
codec = CompressionType.GZIP)
LogValidator.validateMessagesAndAssignOffsets(
records,
offsetCounter = new LongRef(0),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
magic = RecordBatch.MAGIC_VALUE_V1,
compactedTopic = false,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH)
}
@Test(expected = classOf[InvalidTimestampException])
def testInvalidCreateTimeCompressedV2() {
val now = System.currentTimeMillis()
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V2, timestamp = now - 1001L,
codec = CompressionType.GZIP)
LogValidator.validateMessagesAndAssignOffsets(
records,
offsetCounter = new LongRef(0),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
magic = RecordBatch.MAGIC_VALUE_V1,
compactedTopic = false,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH)
}
@Test
def testAbsoluteOffsetAssignmentNonCompressed() {
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V0, codec = CompressionType.NONE)
val offset = 1234567
checkOffsets(records, 0)
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
magic = RecordBatch.MAGIC_VALUE_V0,
compactedTopic = false,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testAbsoluteOffsetAssignmentCompressed() {
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V0, codec = CompressionType.GZIP)
val offset = 1234567
checkOffsets(records, 0)
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V0,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testRelativeOffsetAssignmentNonCompressedV1() {
val now = System.currentTimeMillis()
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V1, timestamp = now, codec = CompressionType.NONE)
val offset = 1234567
checkOffsets(records, 0)
val messageWithOffset = LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V1,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 5000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords
checkOffsets(messageWithOffset, offset)
}
@Test
def testRelativeOffsetAssignmentNonCompressedV2() {
val now = System.currentTimeMillis()
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V2, timestamp = now, codec = CompressionType.NONE)
val offset = 1234567
checkOffsets(records, 0)
val messageWithOffset = LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V2,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 5000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords
checkOffsets(messageWithOffset, offset)
}
@Test
def testRelativeOffsetAssignmentCompressedV1() {
val now = System.currentTimeMillis()
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V1, timestamp = now, codec = CompressionType.GZIP)
val offset = 1234567
checkOffsets(records, 0)
val compressedMessagesWithOffset = LogValidator.validateMessagesAndAssignOffsets(
records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V1,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 5000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords
checkOffsets(compressedMessagesWithOffset, offset)
}
@Test
def testRelativeOffsetAssignmentCompressedV2() {
val now = System.currentTimeMillis()
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V2, timestamp = now, codec = CompressionType.GZIP)
val offset = 1234567
checkOffsets(records, 0)
val compressedMessagesWithOffset = LogValidator.validateMessagesAndAssignOffsets(
records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V2,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 5000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords
checkOffsets(compressedMessagesWithOffset, offset)
}
@Test
def testOffsetAssignmentAfterUpConversionV0ToV1NonCompressed() {
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V0, codec = CompressionType.NONE)
checkOffsets(records, 0)
val offset = 1234567
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V1,
timestampType = TimestampType.LOG_APPEND_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testOffsetAssignmentAfterUpConversionV0ToV2NonCompressed() {
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V0, codec = CompressionType.NONE)
checkOffsets(records, 0)
val offset = 1234567
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V2,
timestampType = TimestampType.LOG_APPEND_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testOffsetAssignmentAfterUpConversionV0ToV1Compressed() {
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V0, codec = CompressionType.GZIP)
val offset = 1234567
checkOffsets(records, 0)
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V1,
timestampType = TimestampType.LOG_APPEND_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testOffsetAssignmentAfterUpConversionV0ToV2Compressed() {
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V0, codec = CompressionType.GZIP)
val offset = 1234567
checkOffsets(records, 0)
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V2,
timestampType = TimestampType.LOG_APPEND_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testOffsetAssignmentAfterDownConversionV1ToV0NonCompressed() {
val offset = 1234567
val now = System.currentTimeMillis()
val records = createRecords(RecordBatch.MAGIC_VALUE_V1, now, codec = CompressionType.NONE)
checkOffsets(records, 0)
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V0,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 5000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testOffsetAssignmentAfterDownConversionV1ToV0Compressed() {
val offset = 1234567
val now = System.currentTimeMillis()
val records = createRecords(RecordBatch.MAGIC_VALUE_V1, now, CompressionType.GZIP)
checkOffsets(records, 0)
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V0,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 5000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testOffsetAssignmentAfterUpConversionV1ToV2NonCompressed() {
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V1, codec = CompressionType.NONE)
checkOffsets(records, 0)
val offset = 1234567
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V2,
timestampType = TimestampType.LOG_APPEND_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testOffsetAssignmentAfterUpConversionV1ToV2Compressed() {
val records = createRecords(magicValue = RecordBatch.MAGIC_VALUE_V1, codec = CompressionType.GZIP)
val offset = 1234567
checkOffsets(records, 0)
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V2,
timestampType = TimestampType.LOG_APPEND_TIME,
timestampDiffMaxMs = 1000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testOffsetAssignmentAfterDownConversionV2ToV1NonCompressed() {
val offset = 1234567
val now = System.currentTimeMillis()
val records = createRecords(RecordBatch.MAGIC_VALUE_V2, now, codec = CompressionType.NONE)
checkOffsets(records, 0)
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V1,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 5000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testOffsetAssignmentAfterDownConversionV2ToV1Compressed() {
val offset = 1234567
val now = System.currentTimeMillis()
val records = createRecords(RecordBatch.MAGIC_VALUE_V2, now, CompressionType.GZIP)
checkOffsets(records, 0)
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V1,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 5000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testOffsetAssignmentAfterDownConversionV2ToV0NonCompressed() {
val offset = 1234567
val now = System.currentTimeMillis()
val records = createRecords(RecordBatch.MAGIC_VALUE_V2, now, codec = CompressionType.NONE)
checkOffsets(records, 0)
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V0,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 5000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test
def testOffsetAssignmentAfterDownConversionV2ToV0Compressed() {
val offset = 1234567
val now = System.currentTimeMillis()
val records = createRecords(RecordBatch.MAGIC_VALUE_V2, now, CompressionType.GZIP)
checkOffsets(records, 0)
checkOffsets(LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = DefaultCompressionCodec,
targetCodec = DefaultCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V0,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 5000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH).validatedRecords, offset)
}
@Test(expected = classOf[InvalidRecordException])
def testInvalidInnerMagicVersion(): Unit = {
val offset = 1234567
val records = recordsWithInvalidInnerMagic(offset)
LogValidator.validateMessagesAndAssignOffsets(records,
offsetCounter = new LongRef(offset),
now = System.currentTimeMillis(),
sourceCodec = SnappyCompressionCodec,
targetCodec = SnappyCompressionCodec,
compactedTopic = false,
magic = RecordBatch.MAGIC_VALUE_V1,
timestampType = TimestampType.CREATE_TIME,
timestampDiffMaxMs = 5000L,
partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH)
}
private def createRecords(magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE,
timestamp: Long = RecordBatch.NO_TIMESTAMP,
codec: CompressionType = CompressionType.NONE): MemoryRecords = {
val buf = ByteBuffer.allocate(512)
val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, 0L)
builder.appendWithOffset(0, timestamp, null, "hello".getBytes)
builder.appendWithOffset(1, timestamp, null, "there".getBytes)
builder.appendWithOffset(2, timestamp, null, "beautiful".getBytes)
builder.build()
}
/* check that offsets are assigned consecutively from the given base offset */
def checkOffsets(records: MemoryRecords, baseOffset: Long) {
assertTrue("Message set should not be empty", records.records.asScala.nonEmpty)
var offset = baseOffset
for (entry <- records.records.asScala) {
assertEquals("Unexpected offset in message set iterator", offset, entry.offset)
offset += 1
}
}
private def recordsWithInvalidInnerMagic(initialOffset: Long): MemoryRecords = {
val records = (0 until 20).map(id =>
LegacyRecord.create(RecordBatch.MAGIC_VALUE_V0,
RecordBatch.NO_TIMESTAMP,
id.toString.getBytes,
id.toString.getBytes))
val buffer = ByteBuffer.allocate(math.min(math.max(records.map(_.sizeInBytes()).sum / 2, 1024), 1 << 16))
val builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V1, CompressionType.GZIP,
TimestampType.CREATE_TIME, 0L)
var offset = initialOffset
records.foreach { record =>
builder.appendUncheckedWithOffset(offset, record)
offset += 1
}
builder.build()
}
def validateLogAppendTime(now: Long, batch: RecordBatch) {
assertTrue(batch.isValid)
assertTrue(batch.timestampType() == TimestampType.LOG_APPEND_TIME)
assertEquals(s"Timestamp of message $batch should be $now", now, batch.maxTimestamp)
for (record <- batch.asScala) {
assertTrue(record.isValid)
assertEquals(s"Timestamp of message $record should be $now", now, record.timestamp)
}
}
}
| rhauch/kafka | core/src/test/scala/unit/kafka/log/LogValidatorTest.scala | Scala | apache-2.0 | 36,350 |
package epic.parser.kbest
import epic.parser.{ViterbiDecoder, ParserTestHarness}
import org.scalatest.FunSuite
/**
*
* @author dlwh
*/
class TopDownKBestAStarTest extends FunSuite with ParserTestHarness {
test("KBest recovers viterbi tree") {
val parser = ParserTestHarness.viterbiParser
val kbestParser = new AStarKBestParser(parser)
val trees = getTestTrees()
trees.foreach { ti =>
val vit = parser.bestBinarizedTree(ti.words)
val kbest = kbestParser.bestKParses(ti.words, 5)
assert(kbest.head._1 === vit, kbest)
assert(kbest.sliding(2).forall(seq => seq.head._2 >= seq.last._2))
}
}
}
| maxim-rabinovich/epic | src/test/scala/epic/parser/kbest/TopDownKBestAStarTest.scala | Scala | apache-2.0 | 643 |
package io.corbel.resources.rem.utils
import io.corbel.resources.rem.plugin.RemPlugin
import org.springframework.context.ApplicationContext
import org.springframework.context.annotation.AnnotationConfigApplicationContext
import org.springframework.context.support.GenericApplicationContext
import scala.reflect.{ClassTag, classTag}
/**
* @author Alexander De Leon (alex.deleon@devialab.com)
*/
trait InjectableServices extends ScalaRemPlugin {
def bootstrapApplicationContext[IoC: ClassTag]: ApplicationContext = {
val parent: GenericApplicationContext = new GenericApplicationContext
parent.getBeanFactory.registerSingleton("remService", implicitRemService)
parent.getBeanFactory.registerSingleton("serviceLocator", implicitServiceLocator)
parent.refresh
val context: AnnotationConfigApplicationContext = new AnnotationConfigApplicationContext
context.register(classTag[IoC].runtimeClass)
context.setParent(parent)
context.refresh
return context
}
}
/**
* @deprecated use InjectableServices
*/
trait InjectableRemService extends InjectableServices | devialab/corbel-rem-utils | src/main/scala/io/corbel/resources/rem/utils/InjectableServices.scala | Scala | mit | 1,102 |
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.schedoscope.dsl.transformations
import java.sql.DriverManager
import java.util.Properties
import _root_.test.views._
import org.apache.commons.net.ftp.FTPClient
import org.apache.curator.test.TestingServer
import org.codehaus.jackson.map.ObjectMapper
import org.codehaus.jackson.map.`type`.TypeFactory
import org.rarefiedredis.redis.adapter.jedis.JedisAdapter
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
import org.schedoscope.Schedoscope
import org.schedoscope.dsl.Field.v
import org.schedoscope.dsl.Parameter.p
import org.schedoscope.export.testsupport.{EmbeddedFtpSftpServer, EmbeddedKafkaCluster, SimpleTestKafkaConsumer}
import org.schedoscope.export.utils.BigQueryUtils.{bigQueryService, dropDataset, existsDataset}
import org.schedoscope.export.utils.CloudStorageUtils.{createBucket, deleteBucket, storageService}
import org.schedoscope.export.utils.RedisMRJedisFactory
import org.schedoscope.test.{rows, test}
import scala.collection.JavaConversions.iterableAsScalaIterable
import scala.collection.JavaConverters._
class ExportTest extends FlatSpec with Matchers with BeforeAndAfter {
private val CALL_BIG_QUERY = false
private val CLEAN_UP_BIG_QUERY = true
before {
if (CALL_BIG_QUERY) {
val bigQuery = bigQueryService
val storage = storageService
if (existsDataset(bigQuery, null, "default"))
dropDataset(bigQuery, null, "default")
createBucket(storage, "schedoscope_export_big_query_full_test", "europe-west3")
}
}
after {
if (CALL_BIG_QUERY && CLEAN_UP_BIG_QUERY) {
val bigQuery = bigQueryService
val storage = storageService
if (existsDataset(bigQuery, null, "default"))
dropDataset(bigQuery, null, "default")
deleteBucket(storage, "schedoscope_export_big_query_full_test")
}
}
Class.forName("org.apache.derby.jdbc.EmbeddedDriver")
val dbConnection = DriverManager.getConnection("jdbc:derby:memory:TestingDB;create=true")
val jedisAdapter = new JedisAdapter()
RedisMRJedisFactory.setJedisMock(jedisAdapter)
val ec0101Clicks = new Click(p("EC0101"), p("2014"), p("01"), p("01")) with rows {
set(
v(id, "event01"),
v(url, "http://ec0101.com/url1"))
set(
v(id, "event02"),
v(url, "http://ec0101.com/url2"))
set(
v(id, "event03"),
v(url, "http://ec0101.com/url3"))
}
val ec0106Clicks = new Click(p("EC0106"), p("2014"), p("01"), p("01")) with rows {
set(
v(id, "event04"),
v(url, "http://ec0106.com/url1"))
set(
v(id, "event05"),
v(url, "http://ec0106.com/url2"))
set(
v(id, "event06"),
v(url, "http://ec0106.com/url3"))
}
"The test framework" should "execute hive transformations and perform JDBC export" in {
new ClickOfEC0101WithJdbcExport(p("2014"), p("01"), p("01")) with test {
basedOn(ec0101Clicks, ec0106Clicks)
`then`()
numRows shouldBe 3
row(
v(id) shouldBe "event01",
v(url) shouldBe "http://ec0101.com/url1")
row(
v(id) shouldBe "event02",
v(url) shouldBe "http://ec0101.com/url2")
row(
v(id) shouldBe "event03",
v(url) shouldBe "http://ec0101.com/url3")
}
val statement = dbConnection.createStatement()
val resultSet = statement.executeQuery("SELECT COUNT(*) FROM DEV_TEST_VIEWS_CLICK_OF_E_C0101_WITH_JDBC_EXPORT")
resultSet.next()
resultSet.getInt(1) shouldBe 3
resultSet.close()
statement.close()
}
it should "execute hive transformations and perform BigQuery export" in {
if (CALL_BIG_QUERY)
new ClickOfEC0101WithBigQueryExport(p("2014"), p("01"), p("01")) with test {
basedOn(ec0101Clicks, ec0106Clicks)
`then`()
numRows shouldBe 3
row(
v(id) shouldBe "event01",
v(url) shouldBe "http://ec0101.com/url1")
row(
v(id) shouldBe "event02",
v(url) shouldBe "http://ec0101.com/url2")
row(
v(id) shouldBe "event03",
v(url) shouldBe "http://ec0101.com/url3")
}
}
it should "execute hive transformations and perform Redis export" in {
new ClickOfEC0101WithRedisExport(p("2014"), p("01"), p("01")) with test {
basedOn(ec0101Clicks, ec0106Clicks)
`then`()
numRows shouldBe 3
row(
v(id) shouldBe "event01",
v(url) shouldBe "http://ec0101.com/url1")
row(
v(id) shouldBe "event02",
v(url) shouldBe "http://ec0101.com/url2")
row(
v(id) shouldBe "event03",
v(url) shouldBe "http://ec0101.com/url3")
}
jedisAdapter.hget("event01", "url") shouldBe "http://ec0101.com/url1"
jedisAdapter.hget("event02", "url") shouldBe "http://ec0101.com/url2"
jedisAdapter.hget("event03", "url") shouldBe "http://ec0101.com/url3"
}
it should "execute hive transformations and perform Kafka export" in {
val zkServer = new TestingServer(2182);
zkServer.start()
Thread.sleep(500)
val kafkaServer = new EmbeddedKafkaCluster(zkServer.getConnectString, new Properties(), List(new Integer(9092)).asJava)
kafkaServer.startup()
val v = new ClickOfEC01WithKafkaExport(p("2014"), p("01"), p("01")) with test {
basedOn(ec0101Clicks, ec0106Clicks)
`then`()
numRows shouldBe 3
row(
v(id) shouldBe "event01",
v(url) shouldBe "http://ec0101.com/url1")
row(
v(id) shouldBe "event02",
v(url) shouldBe "http://ec0101.com/url2")
row(
v(id) shouldBe "event03",
v(url) shouldBe "http://ec0101.com/url3")
}
val consumer = new SimpleTestKafkaConsumer(v.dbName + "_" + v.n, zkServer.getConnectString, 3)
for (r <- consumer) {
val record: java.util.HashMap[String, _] = new ObjectMapper().readValue(r, TypeFactory.mapType(classOf[java.util.HashMap[_, _]], classOf[String], classOf[Any]))
record.get("date_id") shouldBe "20140101"
}
kafkaServer.shutdown()
zkServer.stop()
}
it should "execute hive transformations and perform Ftp export" in {
val ftpServer = new EmbeddedFtpSftpServer()
ftpServer.startEmbeddedFtpServer()
val v = new ClickOfEC0101WithFtpExport(p("2014"), p("01"), p("01")) with test {
basedOn(ec0101Clicks, ec0106Clicks)
`then`()
numRows shouldBe 3
row(
v(id) shouldBe "event01",
v(url) shouldBe "http://ec0101.com/url1")
row(
v(id) shouldBe "event02",
v(url) shouldBe "http://ec0101.com/url2")
row(
v(id) shouldBe "event03",
v(url) shouldBe "http://ec0101.com/url3")
}
val ftp = new FTPClient();
ftp.connect("localhost", 2221);
ftp.login(EmbeddedFtpSftpServer.FTP_USER_FOR_TESTING, EmbeddedFtpSftpServer.FTP_PASS_FOR_TESTING);
val files = ftp.listFiles();
files.filter {
_.getName().contains(v.filePrefix)
}.length shouldBe Schedoscope.settings.ftpExportNumReducers
ftpServer.stopEmbeddedFtpServer()
}
} | ottogroup/schedoscope | schedoscope-core/src/test/scala/org/schedoscope/dsl/transformations/ExportTest.scala | Scala | apache-2.0 | 7,632 |
package com.cloudray.scalapress.plugin.ecommerce.tag
import org.scalatest.{FlatSpec, OneInstancePerTest}
import org.scalatest.mock.MockitoSugar
import javax.servlet.http.HttpServletRequest
import com.cloudray.scalapress.item.Item
import com.cloudray.scalapress.plugin.ecommerce.tags.AddToBasketTag
import com.cloudray.scalapress.framework.{ScalapressRequest, ScalapressContext}
/** @author Stephen Samuel */
class AddToBasketTagTest extends FlatSpec with MockitoSugar with OneInstancePerTest {
val obj = new Item
obj.id = 41
obj.backorders = true
val tag = new AddToBasketTag()
val req = mock[HttpServletRequest]
val context = mock[ScalapressContext]
val sreq = new ScalapressRequest(req, context).withItem(obj)
"a AddToBasketTag" should "render the correct basket url" in {
val render = tag.render(sreq)
assert(render.get.contains("/basket/add/41"))
}
}
| vidyacraghav/scalapress | src/test/scala/com/cloudray/scalapress/plugin/ecommerce/tag/AddToBasketTagTest.scala | Scala | apache-2.0 | 887 |
trait First {type Out}
given First with {type Out = 123}
trait Second {type Out}
transparent inline given (using f: First): Second = new Second {type Out = f.Out}
val s = summon[Second]
val x = summon[s.Out =:= 123] | dotty-staging/dotty | tests/pos/i13503.scala | Scala | apache-2.0 | 217 |
package aiouniya.spark.common
import java.text.SimpleDateFormat
import java.util.Date
import aiouniya.spark.Loaders._
import aiouniya.spark.MyRDDFunctions._
import aiouniya.spark.util.MD5Util
import magneto.analysis.util.ImeiUtil
import org.apache.commons.codec.digest.DigestUtils
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.client.{ConnectionFactory, Put}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.spark.SparkContext
import org.apache.spark.storage.StorageLevel
import org.json4s.JsonAST.JString
import org.json4s._
import org.json4s.jackson.JsonMethods._
/**
* Created by wangqy on 2017/5/19.
*/
object MacMD5MapComm_2 {
def
execute(sc: SparkContext, lastday: String, hbaseTable: String) {
val requestFiles: String = s"/drp/tyv2/*/AdRequestLog/$lastday"
val requestRDD = sc.forPath(requestFiles).combineWithPath
val finalPartNum = requestRDD.partitions.size / 12
val macRDD = requestRDD.filter(_._2.contains("{")).map { case (pathStr, line) =>
val ssp = new Path(pathStr).getParent.getParent.getParent.getName
try {
val json = parse(line.substring(line.indexOf("{")))
val data = ssp match {
case "adview" =>
val devJ = json \\ "device"
devJ \\ "ext" match {
case j: JObject =>
(getUserid(optString(j, "uuid"), optString(j, "idfa")).toUpperCase, optString(j, "mac"))
case _ => ("", "")
}
case _ =>
(getUserid(optString(json, "imei"), optString(json, "idfa")).toUpperCase, optString(json, "mac"))
}
val userid = data._1.trim
val mac = data._2.trim.toUpperCase()
val u = userid match {
case s if s.length == 32 || s.length == 40 => ""
case s => s
}
((ssp, MD5Util.toMD516(userid)), (u, mac))
} catch {
case parse: com.fasterxml.jackson.core.JsonParseException =>
println(parse.getMessage)
((null, null), (null, ""))
}
}.filter { case ((ssp, md5), (u, mac)) =>
md5 != null && mac.length > 0 &&
!mac.equals("02:00:00:00:00:00") && !mac.equals("FF:FF:FF:FF:FF:FF")
}.reduceByKey((v1, v2) =>
(getNonEmpty(v1._1, v2._1), getNonEmpty(v1._2, v2._2))
).map { r =>
val macdata = getMac(r._2._2)
(r._1, (r._2._1, macdata._1, macdata._2, macdata._3))
}.filter(r =>
r._2._3.length + r._2._4.length > 0
)
macRDD.persist(StorageLevel.MEMORY_ONLY_SER)
val outputDir = s"/drp/tyv2/data/mac_data_3/$lastday"
macRDD.coalesce(finalPartNum).map { r =>
val u = r._2._1
val standard = u.length match {
case 0 | 32 | 36 | 40 => ""
case _ if ImeiUtil.is(u) => "1"
case _ => "0"
}
r._1.productIterator.mkString("\\t") + "\\t" + standard + "\\t" + r._2.productIterator.mkString("\\t")
}.saveAsTextFileWithCheck(outputDir, classOf[GzipCodec])
// add hive partition
// val hiveContext = new HiveContext(sc)
// hiveContext.sql(s"alter table mac_data add if not exists partition (d=$lastday) location '$outputDir'")
// write to HBase
val hbaseConf = HBaseConfiguration.create
hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
// set "zookeeper.znode.parent" in hbase-site.xml
hbaseConf.set("zookeeper.znode.parent", "/hbase-unsecure")
hbaseConf.set("hbase.zookeeper.quorum", "master.hdp,slave01.hdp,slave02.hdp,slave25.hdp,slave26.hdp")
val conn = ConnectionFactory.createConnection(hbaseConf)
val tableName = TableName.valueOf(hbaseTable)
val table = conn.getTable(tableName)
val regionLocator = conn.getRegionLocator(tableName)
val startKeys = regionLocator.getStartKeys
val job = Job.getInstance(hbaseConf)
job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
job.setMapOutputValueClass(classOf[Put])
job.getConfiguration.set(FileOutputFormat.OUTDIR, "/drp/tmp/mac_data_3")
HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator)
// ((ssp,md5),(userid,mac,macmd5,macsha1))
val dtFormat = new SimpleDateFormat("yyyy-MM-dd 00:00:00")
val tsStr = dtFormat.format(new Date)
val ts = dtFormat.parse(tsStr).getTime
val macPartitioner = new MacPartitioner(startKeys)
val fRDD = macRDD.flatMap { case (((ssp, md5), (userid, mac, macmd5, macsha1))) =>
List((macmd5, (md5, userid, mac)), (macsha1, (md5, userid, mac)))
}.filter(_._1.length > 0)
.map(r =>
convert(r, ts)).repartitionAndSortWithinPartitions(
macPartitioner).saveAsNewAPIHadoopFile(
"/drp/tmp/mac_data_3", classOf[ImmutableBytesWritable],
classOf[Put], classOf[HFileOutputFormat2], job.getConfiguration)
// .saveAsNewAPIHadoopDataset(job.getConfiguration)
macRDD.unpersist()
}
def convert(r: (String, (String, String, String)), timestamp: Long) = {
val rowKey = Bytes.toBytes(r._1)
val p = new Put(rowKey)
p.addColumn(Bytes.toBytes("0"), Bytes.toBytes("m"), Bytes.toBytes(r._2._1))
val userid = r._2._2
val len = userid.length
if (len != 0 && len != 32 && len != 36 && len != 40 && ImeiUtil.is(userid))
p.addColumn(Bytes.toBytes("0"), Bytes.toBytes("i"), timestamp, Bytes.toBytes(userid))
val plainMac = r._2._3
if (plainMac.length > 0) {
p.addColumn(Bytes.toBytes("0"), Bytes.toBytes("p"), timestamp, Bytes.toBytes(plainMac))
}
(new ImmutableBytesWritable(rowKey), p)
}
def optString(json: JValue, key: String): String = {
json \\ key match {
case JString(x: String) => x
case _ => ""
}
}
def getUserid(imei: String, idfa: String): String = {
(imei.trim, idfa.trim) match {
case (m, a) if m.length > 4 => m
case (m, a) if a.length > 4 => a
case _ => ""
}
}
def getNonEmpty(left: String, right: String): String = {
(left, right) match {
case ("", s2) => s2
case (s1, s2) => s1
case _ => ""
}
}
def getMac(mac: String): (String, String, String) = {
mac match {
case m if m.length == 32 && m.matches("[0-9A-F]{32}") => ("", m.toLowerCase, "")
case m if m.length == 40 && m.matches("[0-9A-F]{40}") => ("", "", m.toLowerCase)
case m if m.matches("([0-9A-F]{2}:){5}[0-9A-F]{2}") =>
(m, DigestUtils.md5Hex(m), DigestUtils.shaHex(m))
case m if m.matches("[0-9A-F]{12}") =>
val sb = new StringBuilder(m)
var i = 0
for (i <- 1 to 5) {
sb.insert(3 * i - 1, ':')
}
val newm = sb.toString
(newm, DigestUtils.md5Hex(newm), DigestUtils.shaHex(newm))
case _ => ("", "", "")
}
}
}
| 7u/spark-learning | spark.learning/src/main/scala/aiouniya/spark/common/MacMD5MapComm_2.scala | Scala | apache-2.0 | 7,214 |
//
// $Id$
//
// Wiggle - a 2D game development library - http://code.google.com/p/wiggle/
// Copyright 2008-2010 Michael Bayne
// Distributed under the "Simplified BSD License" in LICENSE.txt
package wiggle.gfx
import scala.collection.mutable.ArrayBuffer
/**
* Convenience methods for creating groups of elements.
*/
object Group
{
def of (elems :Element*) = {
val group :Group = new Group
for (elem <- elems) group.add(elem)
group
}
}
/**
* A group of elements. The elements are rendered relative to the transform of the group.
*/
class Group extends Element
{
/** Returns a view of this element's children. */
def children :Seq[Element] = _children
/** Adds the specified element as a child of this element. */
def add (elem :Element) {
elem.setParent(Some(this))
_children += elem
}
/** Removes the specified child element. */
def remove (elem :Element) = _children.indexOf(elem) match {
case -1 => false
case idx => {
_children.remove(idx, 1)
elem.setParent(None)
true
}
}
override protected def renderElement (rend :Renderer, time :Float) {
// this is the only way to iterate without creating garbage, sigh
var idx = 0; val len = _children.length; while (idx < len) {
_children(idx).render(rend, time)
idx = idx+1
}
}
private[this] val _children :ArrayBuffer[Element] = new ArrayBuffer()
}
| zdevzee/wiggle | src/main/scala/wiggle/gfx/Group.scala | Scala | bsd-3-clause | 1,408 |
package org.kapunga.tm
import akka.actor.{Props, Actor}
import akka.event.Logging
import org.kapunga.tm.command.{CommandExecutorService}
import org.kapunga.tm.soul.{Spirit, SpiritAgent, Agent, Soul}
/**
* This actor is responsible for handling sanitized input from a player once that player
* has been logged in, and giving in game notification.
*
* @param soul The soul of the logged in player.
*/
// TODO Support dynamic prompts definable by the player.
// TODO Support quit messages so a user can be notified of why they may have been forced to quit.
class SoulHandler(soul: Soul) extends Actor {
import context.parent
val log = Logging(context.system, this)
val agent: Agent = new SpiritAgent(soul, Spirit.getSpirit(soul), self)
agent.spawn()
def receive = {
/*
* This message is received once the ConnectionHandler has cleaned up some input.
* It is logged and then passed off to the CommandExecutorService.
*/
case Input(content) =>
log.info(s"Received input: '$content'")
CommandExecutorService.command(content, agent.context)
/*
* This message is received when a quit action is received either as a result of the
* player's input or something like a reboot or a Kick
*/
case Quit =>
parent ! CloseConnection("Bye Bye!\n")
/*
* This message is received when a player inputs a tab. It should dispatch the tab to
* the CommandExecuterService for completion. Currently unimplemented.
*/
case TabComplete(partialCommand) =>
CommandExecutorService.tabComplete(partialCommand, agent.context) match {
case EmptyTab =>
parent ! Output("\n")
prompt()
case TabResult(output, options) =>
parent ! TabResult(output, options)
}
/*
* This message is received when an in-game event happens and the player needs to be notified.
* This can generally come from just about anywhere.
*/
case Notify(item) =>
parent ! Output(item)
/*
* This message is received when a player needs to receive a prompt. It should generally be
* sent after a single or related bunch of Notify messages has been sent.
*/
case Prompt =>
prompt()
}
override def postStop(): Unit = {
agent.deSpawn()
super.postStop()
}
/**
* Issues a prompt to the player with a default of the soul name. The ConnectionHandler
* will append the current command buffer to the output.
*
* @param content What we wish the prompt to be. Defaults to the soul name.
*/
def prompt(content: String = soul.name + " > ") = parent ! ShowPrompt("\n" + content)
}
/**
* A companion Object to the soul handler. Provides Props for constructing a SoulHandler actor.
*/
object SoulHandler {
def connectionProps(soul: Soul): Props = Props(new SoulHandler(soul))
}
/**
* Sent by in game processes when a line of text needs to be sent to the player.
*
* @param item The string to be displayed to the player.
*/
case class Notify(item: String)
/**
* This message is sent by in-game processes when a chunk of output has finished being sent and the player
* needs to be given a prompt.
*/
case object Prompt
/**
* This message is sent by the CommandExecutorService when the player has entered a command to quit the game.
*/
case object Quit
| kapunga/thor-mud | src/main/scala/org/kapunga/tm/SoulHandler.scala | Scala | mit | 3,346 |
/* NSC -- new Scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author Stephane Micheloud
*/
package scala.man1
/**
* @author Lex Spoon
*/
object fsc extends Command {
import _root_.scala.tools.docutil.ManPage._
protected def cn = new Error().getStackTrace()(0).getClassName()
val name = Section("NAME",
MBold(command) & " " & NDash & " Fast offline compiler for the " &
Link("Scala 2", "http://scala-lang.org/") & " language")
val synopsis = Section("SYNOPSIS",
CmdLine(" [ " & Argument("options") & " ] " &
Argument("source files")))
val parameters = scalac.parameters
val description = Section("DESCRIPTION",
"The "&MBold("fsc")&" tool submits Scala compilation jobs to " &
"a compilation daemon. "&
"The first time it is executed, the daemon is started automatically. "&
"On subsequent "&
"runs, the same daemon can be reused, thus resulting in a faster compilation. "&
"The tool is especially effective when repeatedly compiling with the same "&
"class paths, because the compilation daemon can reuse a compiler instance.",
"The compilation daemon is smart enough to flush its cached compiler "&
"when the class path changes. However, if the contents of the class path "&
"change, for example due to upgrading a library, then the daemon "&
"should be explicitly shut down with " & MBold("-shutdown") & ".",
"Note that the " & Link(MBold("scala"), "scala.html") & " script runner " &
"will also use " &
"the offline compiler by default, with the same advantages and caveats.")
val options = Section("OPTIONS",
"The offline compiler supports " &
Link("all options of " & MBold("scalac"), "scalac.html#options") &
" plus the following:",
DefinitionList(
Definition(
CmdOption("reset"),
"Reset compile server caches."),
Definition(
CmdOption("shutdown"),
"Shut down the compilation daemon. The daemon attempts to restart "&
"itself as necessary, but sometimes an explicit shutdown is required. "&
"A common example is if jars on the class path have changed."),
Definition(
CmdOption("server", Argument("hostname:portnumber")),
"Specify compile server host at port number. Usually this option " &
"is not needed. Note that the hostname must be for a host that shares " &
"the same filesystem."),
Definition(
CmdOptionBound("J", Argument("flag")),
"Pass " & Mono(Argument("flag")) & " directly to the Java VM for the compilation daemon.")
))
val example = Section("EXAMPLE",
"The following session shows a typical speed up due to using the "&
"offline compiler.",
CodeSample(
"""> fsc -verbose -d /tmp test.scala
|\&...
|[Port number: 32834]
|[Starting new Scala compile server instance]
|[Classpath = ...]
|[loaded directory path ... in 692ms]
|\&...
|[parsing test.scala]
|\&...
|[total in 943ms]
|
|> fsc -verbose -d /tmp test.scala
|\&...
|[Port number: 32834]
|[parsing test.scala]
|\&...
|[total in 60ms]
|
|> fsc -verbose -d /tmp test.scala
|\&...
|[Port number: 32834]
|[parsing test.scala]
|\&...
|[total in 42ms]
|
|> fsc -verbose -shutdown
|[Scala compile server exited]
|""".stripMargin))
val environment = Section("ENVIRONMENT",
DefinitionList(
Definition(
MBold("JAVACMD"),
"Specify the " & MBold("java") & " command to be used " &
"for running the Scala code. Arguments may be specified " &
"as part of the environment variable; spaces, quotation marks, " &
"etc., will be passed directly to the shell for expansion."),
Definition(
MBold("JAVA_HOME"),
"Specify JDK/JRE home directory. This directory is used to locate " &
"the " & MBold("java") & " command unless " & MBold("JAVACMD") & " variable set."),
Definition(
MBold("JAVA_OPTS"),
SeqPara(
"Specify the options to be passed to the " & MBold("java") &
" command defined by " & MBold("JAVACMD") & ".",
"With Java 1.5 (or newer) one may for example configure the " &
"memory usage of the JVM as follows: " &
Mono("JAVA_OPTS=\"-Xmx512M -Xms16M -Xss16M\"")
))))
val exitStatus = Section("EXIT STATUS",
MBold(command) & " returns a zero exit status if it succeeds to " &
"compile the specified input files. Non zero is returned in case " &
"of failure.")
val seeAlso = Section("SEE ALSO",
Link(Bold("scala") & "(1)", "scala.html") & ", " &
Link(Bold("scalac") & "(1)", "scalac.html") & ", " &
Link(Bold("scaladoc") & "(1)", "scaladoc.html") & ", " &
Link(Bold("scalap") & "(1)", "scalap.html"))
def manpage = new Document {
title = command
date = "March 2012"
author = "Lex Spoon"
version = "0.5"
sections = List(
name,
synopsis,
parameters,
options,
description,
example,
environment,
exitStatus,
authors,
bugs,
copyright,
seeAlso)
}
}
| martijnhoekstra/scala | src/manual/scala/man1/fsc.scala | Scala | apache-2.0 | 5,345 |
package com.softwaremill.macwire
/**
* - object graph
* - case class -> toString()
* - manual DI
*/
object Step1Complete extends App with Macwire {
case class Field()
case class Digger()
case class PotatoFarm(field: Field, digger: Digger)
case class CowPasture()
case class Meatery(cowPasture: CowPasture)
case class Restaurant(potatoFarm: PotatoFarm, meatery: Meatery) {
def orderSteakWithPotatoes() = {
println(s"Welcome to $this. Here's your order.")
}
}
val field = new Field()
val digger = new Digger()
val potatoFarm = new PotatoFarm(field, digger)
val cowPasture = new CowPasture()
val meatery = new Meatery(cowPasture)
val restaurant = new Restaurant(potatoFarm, meatery)
restaurant.orderSteakWithPotatoes()
}
| adamw/nodi-macwire-pres | core/src/main/scala/com/softwaremill/macwire/Step1Complete.scala | Scala | apache-2.0 | 771 |
package com.countrygamer.arcanacraft.client.gui
import com.countrygamer.arcanacraft.common.ArcanaCraft
import com.countrygamer.arcanacraft.common.extended.ArcanePlayer
import com.countrygamer.arcanacraft.common.quom.Quom
import com.countrygamer.cgo.wrapper.common.extended.ExtendedEntityHandler
import cpw.mods.fml.common.eventhandler.{EventPriority, SubscribeEvent}
import cpw.mods.fml.relauncher.{Side, SideOnly}
import net.minecraft.client.Minecraft
import net.minecraft.client.gui.Gui
import net.minecraft.util.ResourceLocation
import net.minecraftforge.client.event.RenderGameOverlayEvent
import net.minecraftforge.client.event.RenderGameOverlayEvent.ElementType
import org.lwjgl.opengl.GL11
/**
*
*
* @author CountryGamer
*/
@SideOnly(Side.CLIENT)
object ArcaneOverlay extends Gui {
val mc: Minecraft = Minecraft.getMinecraft
@SubscribeEvent(priority = EventPriority.NORMAL)
def renderGameOverlay(event: RenderGameOverlayEvent.Post): Unit = {
if (event.isCanceled) return
val arcanePlayer: ArcanePlayer = ExtendedEntityHandler
.getExtended(this.mc.thePlayer, classOf[ArcanePlayer]).asInstanceOf[ArcanePlayer]
if (arcanePlayer.isArcaic() && arcanePlayer.isHoldingValidCaster()) {
val width: Int = event.resolution.getScaledWidth
val height: Int = event.resolution.getScaledHeight
if (event.`type` == ElementType.HOTBAR) {
GL11.glPushMatrix()
GL11.glColor4f(1.0F, 1.0F, 1.0F, 1.0F)
GL11.glEnable(GL11.GL_BLEND)
GL11.glDisable(GL11.GL_LIGHTING)
val quomX: Int = width / 2 - 9
val quomY: Int = height - 55
this.mc.getTextureManager.bindTexture(
new ResourceLocation(ArcanaCraft.pluginID,
"textures/gui/GuiIcons.png"))
this.drawTexturedModalRect(quomX, quomY, 0, 20, 18, 18)
val quom: Quom = arcanePlayer.getCurrentQuom()
if (quom != null) quom.draw(this, quomX + 1, quomY + 1)
GL11.glDisable(GL11.GL_BLEND)
GL11.glPopMatrix()
}
}
}
}
| TheTemportalist/ArcanaCraft | src/main/scala/com/countrygamer/arcanacraft/client/gui/ArcaneOverlay.scala | Scala | apache-2.0 | 1,940 |
package dpla.ingestion3.messages
import scala.collection.mutable.ListBuffer
class MessageCollector[IngestMessage] {
protected val messages: ListBuffer[IngestMessage] = ListBuffer[IngestMessage]()
def add(msg: IngestMessage) = messages += msg
def getAll() = messages
def deleteAll() = messages.remove(0, messages.size)
}
| dpla/ingestion3 | src/main/scala/dpla/ingestion3/messages/MessageCollector.scala | Scala | mit | 334 |
package io.finch
import java.util.UUID
import scala.reflect.ClassTag
import cats.data.NonEmptyList
import cats.laws.discipline.AlternativeTests
import com.twitter.conversions.time._
import com.twitter.finagle.http.{Cookie, Method, Request}
import com.twitter.io.Buf
import com.twitter.util.{Future, Throw, Try}
import io.finch.data.Foo
import io.finch.items.BodyItem
class EndpointSpec extends FinchSpec {
checkAll("Endpoint[String]", AlternativeTests[Endpoint].applicative[String, String, String])
behavior of "Endpoint"
private[this] val emptyRequest = Request()
it should "extract one path segment" in {
def extractOne[A](e: Endpoint[A], f: String => A): Input => Boolean = { i: Input =>
val o = e(i)
val v = i.headOption.flatMap(s => Try(f(s)).toOption)
o.awaitValueUnsafe() === v && (v.isEmpty || o.remainder === Some(i.drop(1)))
}
check(extractOne(string, identity))
check(extractOne(int, _.toInt))
check(extractOne(boolean, _.toBoolean))
check(extractOne(long, _.toLong))
check(extractOne(uuid, UUID.fromString))
}
it should "extract tail of the path" in {
def extractTail[A](e: Endpoint[Seq[A]]): Seq[A] => Boolean = { s: Seq[A] =>
val i = Input(emptyRequest, s.map(_.toString))
e(i).remainder === Some(i.copy(path = Nil))
}
check(extractTail(strings))
check(extractTail(ints))
check(extractTail(booleans))
check(extractTail(longs))
check(extractTail(uuids))
}
it should "support very basic map" in {
check { i: Input =>
string.map(_ * 2)(i).awaitValueUnsafe() === i.headOption.map(_ * 2)
}
}
it should "support transform" in {
check { i: Input =>
val fn = (fs: Future[Output[String]]) => fs.map(_.map(_ * 2))
string.transform(fn)(i).awaitValueUnsafe() === i.headOption.map(_ * 2)
}
}
it should "propagate the default (Ok) output" in {
check { i: Input =>
string(i).awaitOutputUnsafe() === i.headOption.map(s => Ok(s))
}
}
it should "propagate the default (Ok) output through its map'd/mapAsync'd version" in {
check { i: Input =>
val expected = i.headOption.map(s => Ok(s.length))
string.map(s => s.length)(i).awaitOutputUnsafe() === expected &&
string.mapAsync(s => Future.value(s.length))(i).awaitOutputUnsafe() === expected
}
}
it should "propagate the output through mapOutputAsync and /" in {
def expected(i: Int): Output[Int] =
Created(i)
.withHeader("A" -> "B")
.withCookie(new Cookie("C", "D"))
check { i: Input =>
string.mapOutputAsync(s => Future.value(expected(s.length)))(i).awaitOutputUnsafe() ===
i.headOption.map(s => expected(s.length))
}
check { i: Input =>
val e = i.path.dropRight(1)
.map(s => s: Endpoint0)
.foldLeft[Endpoint0](/)((acc, ee) => acc :: ee)
val v = (e :: string).mapOutputAsync(s => Future.value(expected(s.length)))(i)
v.awaitOutputUnsafe() === i.path.lastOption.map(s => expected(s.length))
}
}
it should "match one patch segment" in {
def matchOne[A](f: String => A)(implicit ev: A => Endpoint0): Input => Boolean = { i: Input =>
val v = i.path.headOption
.flatMap(s => Try(f(s)).toOption)
.map(ev)
.flatMap(e => e(i).remainder)
v.isEmpty|| v === Some(i.drop(1))
}
check(matchOne(identity))
check(matchOne(_.toInt))
check(matchOne(_.toBoolean))
}
it should "always match the entire input with *" in {
check { i: Input =>
*(i).remainder === Some(i.copy(path = Nil))
}
}
it should "match the HTTP method" in {
def matchMethod(m: Method, f: Endpoint0 => Endpoint0): Input => Boolean = { i: Input =>
val v = f(/)(i)
(i.request.method === m && v.remainder === Some(i)) ||
(i.request.method != m && v.remainder === None)
}
check(matchMethod(Method.Get, get))
check(matchMethod(Method.Post, post))
check(matchMethod(Method.Trace, trace))
check(matchMethod(Method.Put, put))
check(matchMethod(Method.Patch, patch))
check(matchMethod(Method.Head, head))
check(matchMethod(Method.Options, options))
check(matchMethod(Method.Connect, connect))
check(matchMethod(Method.Delete, delete))
}
it should "always match the identity instance" in {
check { i: Input =>
/(i).remainder === Some(i)
}
}
it should "match the entire input" in {
check { i: Input =>
val e = i.path.map(s => s: Endpoint0).foldLeft[Endpoint0](/)((acc, e) => acc :: e)
e(i).remainder === Some(i.copy(path = Nil))
}
}
it should "not match the entire input if one of the underlying endpoints is failed" in {
check { (i: Input, s: String) =>
(* :: s).apply(i).remainder === None
}
}
it should "match the input if one of the endpoints succeed" in {
def matchOneOfTwo(f: String => Endpoint0): Input => Boolean = { i: Input =>
val v = i.path.headOption.map(f).flatMap(e => e(i).remainder)
v.isEmpty || v === Some(i.drop(1))
}
check(matchOneOfTwo(s => (s: Endpoint0) | (s.reverse: Endpoint0)))
check(matchOneOfTwo(s => (s.reverse: Endpoint0) | (s: Endpoint0)))
}
it should "have the correct string representation" in {
def standaloneMatcher[A](implicit f: A => Endpoint0): A => Boolean = { a: A =>
f(a).toString == a.toString
}
check(standaloneMatcher[String])
check(standaloneMatcher[Int])
check(standaloneMatcher[Boolean])
def methodMatcher(m: Method, f: Endpoint0 => Endpoint0): String => Boolean = { s: String =>
f(s).toString === m.toString().toUpperCase + " /" + s
}
check(methodMatcher(Method.Get, get))
check(methodMatcher(Method.Post, post))
check(methodMatcher(Method.Trace, trace))
check(methodMatcher(Method.Put, put))
check(methodMatcher(Method.Patch, patch))
check(methodMatcher(Method.Head, head))
check(methodMatcher(Method.Options, options))
check(methodMatcher(Method.Connect, connect))
check(methodMatcher(Method.Delete, delete))
check { (s: String, i: Int) => (s: Endpoint0).map(_ => i).toString === s }
check { (s: String, t: String) => ((s: Endpoint0) | (t: Endpoint0)).toString === s"($s|$t)" }
check { (s: String, t: String) => ((s: Endpoint0) :: (t: Endpoint0)).toString === s"$s :: $t" }
check { s: String => (s: Endpoint0).product[String](*.map(_ => "foo")).toString === s }
check { (s: String, t: String) => (s: Endpoint0).mapAsync(_ => Future.value(t)).toString === s }
*.toString shouldBe "*"
/.toString shouldBe ""
int.toString shouldBe ":int"
string.toString shouldBe ":string"
long.toString shouldBe ":long"
uuid.toString shouldBe ":uuid"
boolean.toString shouldBe ":boolean"
ints.toString shouldBe ":int*"
strings.toString shouldBe ":string*"
longs.toString shouldBe ":long*"
uuids.toString shouldBe ":uuid*"
booleans.toString shouldBe ":boolean*"
(int :: string).toString shouldBe ":int :: :string"
(boolean :+: long).toString shouldBe "(:boolean|:long)"
}
it should "always respond with the same output if it's a constant Endpoint" in {
check { s: String =>
Endpoint.const(s)(Input.get("/")).awaitValueUnsafe() === Some(s) &&
Endpoint.lift(s)(Input.get("/")).awaitValueUnsafe() === Some(s) &&
Endpoint.liftFuture(Future.value(s))(Input.get("/")).awaitValueUnsafe() === Some(s)
}
check { o: Output[String] =>
Endpoint.liftOutput(o)(Input.get("/")).awaitOutputUnsafe() === Some(o) &&
Endpoint.liftFutureOutput(Future.value(o))(Input.get("/")).awaitOutputUnsafe() === Some(o)
}
}
it should "support the as[A] method" in {
case class Foo(s: String, i: Int, b: Boolean)
val foo = (string :: int :: boolean).as[Foo]
check { (s: String, i: Int, b: Boolean) =>
foo(Input(emptyRequest, Seq(s, i.toString, b.toString))).awaitValueUnsafe() ===
Some(Foo(s, i, b))
}
}
it should "throw Error.NotParsed if as[A] method fails" in {
val cause = new Exception("can't parse this")
implicit val failingDecodeEntity: DecodeEntity[Foo] =
DecodeEntity.instance(_ => Throw(cause))
val foo = stringBody.as[Foo]
val fooOption = stringBodyOption.as[Foo]
val i = (s: String) => Input.post("/").withBody[Text.Plain](Buf.Utf8(s))
check { (s: String) =>
foo(i(s)).awaitValue() === Some(Throw(
Error.NotParsed(BodyItem, implicitly[ClassTag[Foo]], cause)
))
}
check { (s: String) =>
fooOption(i(s)).awaitValue() === Some(Throw(
Error.NotParsed(BodyItem, implicitly[ClassTag[Foo]], cause)
))
}
}
it should "rescue the exception occurred in it" in {
check { (i: Input, s: String, e: Exception) =>
Endpoint.liftFuture[Unit](Future.exception(e))
.handle({ case _ => Created(s) })(i)
.awaitOutputUnsafe() === Some(Created(s))
}
}
it should "not split comma separated param values" in {
val i = Input.get("/index", "foo" -> "a,b")
val e = params("foo")
e(i).awaitValueUnsafe() shouldBe Some(Seq("a,b"))
}
it should "throw NotPresent if an item is not found" in {
val i = Input.get("/")
Seq(
param("foo"), header("foo"), cookie("foo").map(_.value),
fileUpload("foo").map(_.fileName), paramsNel("foo").map(_.toList.mkString),
paramsNel("foor").map(_.toList.mkString), binaryBody.map(new String(_)), stringBody
).foreach { ii => ii(i).awaitValue() shouldBe Some(Throw(Error.NotPresent(ii.item))) }
}
it should "maps lazily to values" in {
val i = Input(emptyRequest, Seq.empty)
var c = 0
val e = * { c = c + 1; Ok(c) }
e(i).awaitValueUnsafe() shouldBe Some(1)
e(i).awaitValueUnsafe() shouldBe Some(2)
}
it should "not evaluate Futures until matched" in {
val i = Input(emptyRequest, Seq("a", "10"))
var flag = false
val endpointWithFailedFuture = "a".mapAsync { nil =>
Future { flag = true; nil }
}
val e = ("a" :: 10) | endpointWithFailedFuture
e(i).isMatched shouldBe true
flag shouldBe false
}
it should "be greedy in terms of | compositor" in {
val a = Input(emptyRequest, Seq("a", "10"))
val b = Input(emptyRequest, Seq("a"))
val e1: Endpoint0 = "a" | "b" | ("a" :: 10)
val e2: Endpoint0 = ("a" :: 10) | "b" | "a"
e1(a).remainder shouldBe Some(a.drop(2))
e1(b).remainder shouldBe Some(b.drop(2))
e2(a).remainder shouldBe Some(a.drop(2))
e2(b).remainder shouldBe Some(b.drop(2))
}
it should "accumulate errors on its product" in {
check { (a: Either[Error, Errors], b: Either[Error, Errors]) =>
val aa = a.fold[Exception](identity, identity)
val bb = b.fold[Exception](identity, identity)
val left = Endpoint.liftFuture[Unit](Future.exception(aa))
val right = Endpoint.liftFuture[Unit](Future.exception(bb))
val lr = left.product(right)
val rl = right.product(left)
val all =
a.fold[Set[Error]](e => Set(e), es => es.errors.toList.toSet) ++
b.fold[Set[Error]](e => Set(e), es => es.errors.toList.toSet)
val Some(Throw(first)) = lr(Input.get("/")).awaitValue()
val Some(Throw(second)) = rl(Input.get("/")).awaitValue()
first.asInstanceOf[Errors].errors.toList.toSet === all &&
second.asInstanceOf[Errors].errors.toList.toSet === all
}
}
it should "fail-fast with the first non-error observed" in {
check { (a: Error, b: Errors, e: Exception) =>
val aa = Endpoint.liftFuture[Unit](Future.exception(a))
val bb = Endpoint.liftFuture[Unit](Future.exception(b))
val ee = Endpoint.liftFuture[Unit](Future.exception(e))
val aaee = aa.product(ee)
val eeaa = ee.product(aa)
val bbee = bb.product(ee)
val eebb = ee.product(bb)
aaee(Input.get("/")).awaitValue() === Some(Throw(e)) &&
eeaa(Input.get("/")).awaitValue() === Some(Throw(e)) &&
bbee(Input.get("/")).awaitValue() === Some(Throw(e)) &&
eebb(Input.get("/")).awaitValue() === Some(Throw(e))
}
}
it should "support the as[A] method on Endpoint[Seq[String]]" in {
val foos = params("testEndpoint").as[Foo]
foos(Input.get("/index", "testEndpoint" -> "a")).awaitValueUnsafe() shouldBe Some(Seq(Foo("a")))
}
it should "liftToTry" in {
check { e: Endpoint[Unit] =>
val i = Input.get("/")
e(i).awaitValue() === e.liftToTry(i).awaitValueUnsafe()
}
}
it should "collect errors on Endpoint[Seq[String]] failure" in {
val endpoint: Endpoint[Seq[UUID]] = params("testEndpoint").as[UUID]
an[Errors] shouldBe thrownBy (
endpoint(Input.get("/index", "testEndpoint" -> "a")).awaitValueUnsafe()
)
}
it should "support the as[A] method on Endpoint[NonEmptyList[A]]" in {
val foos = paramsNel("testEndpoint").as[Foo]
foos(Input.get("/index", "testEndpoint" -> "a")).awaitValueUnsafe() shouldBe
Some(NonEmptyList.of(Foo("a")))
}
it should "collect errors on Endpoint[NonEmptyList[String]] failure" in {
val endpoint: Endpoint[NonEmptyList[UUID]] = paramsNel("testEndpoint").as[UUID]
an[Errors] shouldBe thrownBy (
endpoint(Input.get("/index", "testEndpoint" -> "a")).awaitValueUnsafe(10.seconds)
)
}
}
| yanana/finch | core/src/test/scala/io/finch/EndpointSpec.scala | Scala | apache-2.0 | 13,293 |
package com.ybrikman.ping.scalaapi.bigpipe
import play.api.libs.iteratee.{Enumeratee, Enumerator}
import play.twirl.api.{Appendable, HtmlFormat, Format, Html}
import play.api.mvc.{Codec, Result}
import scala.language.implicitConversions
import scala.concurrent.{ExecutionContext, Future}
/**
* A custom Appendable that lets you create .scala.stream templates instead of .scala.html. These templates can mix Html
* markup with Enumerators that contain Html markup so that as soon as the content is available, Play can stream it
* back to the client. You need to add this class as a custom template type in build.sbt.
*
* @param enumerator
*/
class HtmlStream(val enumerator: Enumerator[Html]) extends Appendable[HtmlStream] {
def andThen(other: HtmlStream): HtmlStream = HtmlStream.fromHtmlEnumerator(enumerator.andThen(other.enumerator))
}
/**
* Companion object for HtmlStream that contains convenient factory and composition methods.
*/
object HtmlStream {
/**
* Create an empty HtmlStream
*
* @return
*/
def empty: HtmlStream = {
fromString("")
}
/**
* Create an HtmlStream from a String
*
* @param text
* @return
*/
def fromString(text: String): HtmlStream = {
fromHtml(Html(text))
}
/**
* Create an HtmlStream from a Future that will eventually contain a String
*
* @param eventuallyString
* @return
*/
def fromStringFuture(eventuallyString: Future[String])(implicit ec: ExecutionContext): HtmlStream = {
fromHtmlFuture(eventuallyString.map(Html.apply))
}
/**
* Create an HtmlStream from Html
*
* @param html
* @return
*/
def fromHtml(html: Html): HtmlStream = {
fromHtmlEnumerator(Enumerator(html))
}
/**
* Create an HtmlStream from an Enumerator of Html
*
* @param enumerator
* @return
*/
def fromHtmlEnumerator(enumerator: Enumerator[Html]): HtmlStream = {
new HtmlStream(enumerator)
}
/**
* Create an HtmlStream from a Future that will eventually contain Html
*
* @param eventuallyHtml
* @return
*/
def fromHtmlFuture(eventuallyHtml: Future[Html])(implicit ec: ExecutionContext): HtmlStream = {
flatten(eventuallyHtml.map(fromHtml))
}
/**
* Create an HtmlStream from the body of the Result.
*
* @param result
* @return
*/
def fromResult(result: Result)(implicit ec: ExecutionContext, codec: Codec): HtmlStream = {
HtmlStream.fromHtmlEnumerator(result.body.map(bytes => Html(codec.decode(bytes))))
}
/**
* Create an HtmlStream from a the body of a Future[Result].
*
* @param result
* @return
*/
def fromResultFuture(result: Future[Result])(implicit ec: ExecutionContext): HtmlStream = {
flatten(result.map(fromResult))
}
/**
* Interleave multiple HtmlStreams together. Interleaving is done based on whichever HtmlStream next has input ready,
* if multiple have input ready, the order is undefined.
*
* @param streams
* @return
*/
def interleave(streams: HtmlStream*): HtmlStream = {
fromHtmlEnumerator(Enumerator.interleave(streams.map(_.enumerator)))
}
/**
* Create an HtmlStream from a Future that will eventually contain an HtmlStream.
*
* @param eventuallyStream
* @return
*/
def flatten(eventuallyStream: Future[HtmlStream])(implicit ec: ExecutionContext): HtmlStream = {
fromHtmlEnumerator(Enumerator.flatten(eventuallyStream.map(_.enumerator)))
}
}
/**
* A custom Appendable that lets you create .scala.stream templates instead of .scala.html. These templates can mix Html
* markup with Enumerators that contain Html markup so that as soon as the content is available, Play can stream it
* back to the client.
*/
object HtmlStreamFormat extends Format[HtmlStream] {
def raw(text: String): HtmlStream = {
HtmlStream.fromString(text)
}
def escape(text: String): HtmlStream = {
raw(HtmlFormat.escape(text).body)
}
def empty: HtmlStream = {
raw("")
}
def fill(elements: scala.collection.immutable.Seq[HtmlStream]): HtmlStream = {
elements.reduce((agg, curr) => agg.andThen(curr))
}
}
/**
* Useful implicits when working with HtmlStreams
*/
object HtmlStreamImplicits {
/**
* Implicit conversion so HtmlStream can be passed directly to Ok.feed and Ok.chunked
*
* @param stream
* @param ec
* @return
*/
implicit def toEnumerator(stream: HtmlStream)(implicit ec: ExecutionContext): Enumerator[Html] = {
// Skip empty chunks, as these mean EOF in chunked encoding
stream.enumerator.through(Enumeratee.filter(!_.body.isEmpty))
}
}
| carlosFattor/ping-play | big-pipe/src/main/scala/com/ybrikman/ping/scalaapi/bigpipe/HtmlStream.scala | Scala | mit | 4,588 |
// scalac: -Xfatal-warnings
sealed abstract class X
sealed case class A(x: Boolean) extends X
case object B extends X
object Test {
def test(x: X) = x match {
case A(true) =>
case A(false) | B =>
}
}
| scala/scala | test/files/pos/exhaust_alternatives.scala | Scala | apache-2.0 | 213 |
package pl.pholda.malpompaaligxilo.dsl
object MathExprParserJVMTest extends MathExprParserTest {
override def testForm: TestForm = TestFormJVM
}
| pholda/MalpompaAligxilo | dsl/jvm/src/test/scala/pl/pholda/malpompaaligxilo/dsl/MathExprParserJVMTest.scala | Scala | gpl-3.0 | 148 |
package eu.daxiongmao.training.scala.chp6
/**
* This represents a rational number (x) where x = n/d <br>
* n = numerator<br>
* d = denominator<br>
* <br>
* Note that all values are immutable (~ Java: private final)
*/
class Rational(numer: Int, denom: Int) {
// Construct validators
require(denom != 0)
// private attribute
val greatestCommonDivisor = findGreatestCommonDivisor(numer.abs, denom.abs)
// Associate constructor values to local attributes
val numerator: Int = numer / greatestCommonDivisor
val denominator: Int = denom / greatestCommonDivisor
// Auxiliary constructor
def this(realNumber: Int) = this(realNumber, 1)
override def toString = numerator + "/" + denominator
// Example of math operator + polymorphism
def add(other: Rational): Rational = {
new Rational(numerator * other.denominator + other.numerator * denominator, denominator * other.denominator)
}
def + (other: Rational): Rational = add(other)
def + (other: Int): Rational = add(new Rational(other, 1))
def multiple(other: Rational): Rational = {
new Rational(numerator * other.numerator, denominator * other.denominator)
}
def * (other: Rational): Rational = multiple(other)
def * (other: Int): Rational = multiple(new Rational(other, 1))
def / (other: Rational): Rational = {
new Rational(numerator * other.denominator, denominator * other.numerator)
}
def / (other: Int): Rational = /(new Rational(other, 1))
// !!!! '-' is a keyword. You cannot call method - without this.
def - (other: Rational): Rational = {
new Rational(numerator * other.denominator - other.numerator * denominator, denominator * other.denominator)
}
def - (other: Int): Rational = this.-(new Rational(other, 1))
def lessThan(other: Rational): Boolean = numerator * other.denominator < other.numerator * denominator
def getMax(other: Rational): Rational = {
if (this.lessThan(other)) {
return other
} else {
return this
}
}
private def findGreatestCommonDivisor(a: Int, b: Int): Int = {
if (b == 0) {
return a
} else {
return findGreatestCommonDivisor(b, a % b)
}
}
}
| guihome-diaz/training | progInScala/src/main/scala/eu/daxiongmao/training/scala/chp6/Rational.scala | Scala | gpl-2.0 | 2,189 |
package twatcher.actors
import twatcher.globals.twitter
import twatcher.logics.TwitterLogic
import twatcher.models.Account
import play.api.Logger
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import scala.concurrent.Future
import akka.actor._
class TwitterActor extends Actor {
def receive = {
case AccountMessage(account) => executeTwitterAction(account)
case Exit() => finish()
case _ => // Do nothing
}
private[this] def executeTwitterAction(account: Account) = {
// Execute action asynchronously
if(account.goodbyeFlag) {
Logger.info(s"@${account.screenName} says goodbye")
createExecuter() ! Goodbye(account)
}
if(account.tweetDeleteFlag) {
Logger.info(s"@${account.screenName} deletes tweets")
createExecuter() ! TweetDelete(account)
}
if(account.favoriteDeleteFlag) {
Logger.info(s"@${account.screenName} deletes favorite")
createExecuter() ! FavoriteDelete(account)
}
if(account.updateProfile.isDefined) {
Logger.info(s"@${account.screenName} changes profile")
createExecuter() ! UpdateProfile(account)
}
// Finish executing if no action registered
self ! Exit()
}
/**
* Create child TwitterExecuteActor
*/
private[this] def createExecuter(): ActorRef = {
Logger.info("Twitter Execute Actor created.")
context.actorOf(Props(classOf[TwitterExecuteActor]))
}
/**
* Count children and if no children: finish all executing,
* tell parent BatchActor to have finished and kill itself
*/
private[this] def finish() {
if (context.children.size == 0) {
Logger.info("Twitter Actor finish")
context.parent ! Exit()
self ! PoisonPill
} else {
Logger.info(s"Twitter Actor has ${context.children.size} task(s).")
}
}
}
class TwitterExecuteActor extends Actor {
def receive = {
case Goodbye(account) => execute(TwitterLogic.goodbye(twitter, account))
case TweetDelete(account) => execute(TwitterLogic.deleteTweets(twitter, account))
case FavoriteDelete(account) => execute(TwitterLogic.unfavorite(twitter, account))
case UpdateProfile(account) => execute(TwitterLogic.updateTwitterProfile(twitter, account))
case Exit() => // Do nothing: Twitter Execute Actor takes suicide if action finish
case _ => // Do nothing
}
private[this] def execute(f: Future[Unit]) = {
f onSuccess {
case _ => finish()
}
f onFailure {
case e: Throwable =>
Logger.error("TwitterExecuteActor gets error", e)
finish()
}
}
/**
* Tell parent TwitterActor to have finished executing and kill itself
*/
private[this] def finish() {
Logger.info("Twitter Execute Actor finish")
context.parent ! Exit()
self ! PoisonPill
}
}
| srd7/twatcher | app/actors/TwitterActor.scala | Scala | mit | 2,895 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.bforms.models
import play.api.libs.json.{ Format, JsError, JsString, JsSuccess, Reads, Writes }
case class FormId(value: String) extends AnyVal {
override def toString = value
}
object FormId {
implicit val format: Format[FormId] = ValueClassFormat.format(FormId.apply)(_.value)
}
| VlachJosef/bforms-frontend | app/uk/gov/hmrc/bforms/models/FormId.scala | Scala | apache-2.0 | 913 |
package modules.time
import java.io.{BufferedReader, File}
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import us.feliscat.ir.fulltext.indri.IndriResult
import us.feliscat.m17n.MultiLingual
import us.feliscat.text.{StringNone, StringOption, StringSome}
import us.feliscat.time.{TimeExtractor, TimeTmp}
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
/**
* <pre>
* Created on 2017/02/08.
* </pre>
*
* @author K.Sakamoto
*/
trait MultiLingualTimeExtractorFromPreviousParagraphInTextbook extends TimeExtractor with MultiLingual {
protected val trecTextFormatData: Seq[String]
private lazy val docnoTimeTmpListMap: Map[String, Seq[TimeTmp]] = initialize
protected def extractForWorldHistory(sentenceOpt: StringOption): Seq[TimeTmp]
protected def toIndriResultMap(lines: Iterator[String],
keywordOriginalTextOpt: StringOption,
expansionOnlyList: Seq[String],
indriResultMap: mutable.Map[String, IndriResult]): Map[String, IndriResult]
def initialize: Map[String, Seq[TimeTmp]] = {
val map = mutable.Map.empty[String, Seq[TimeTmp]]
val buffer = ListBuffer.empty[String]
trecTextFormatData foreach {
dir: String =>
new File(dir).listFiles foreach {
case file: File if file.canRead && file.isFile && file.getName.endsWith(".xml") =>
val reader: BufferedReader = Files.newBufferedReader(file.toPath, StandardCharsets.UTF_8)
val iterator: java.util.Iterator[String] = reader.lines.iterator
while (iterator.hasNext) {
val line: String = iterator.next
buffer += line
}
reader.close()
case _ =>
// Do nothing
}
}
val iterator: Iterator[IndriResult] = toIndriResultMap(
buffer.result.iterator,
StringNone,
Nil,
mutable.Map.empty[String, IndriResult]).valuesIterator
while (iterator.hasNext) {
val result: IndriResult = iterator.next
result.docno match {
case StringSome(docno) if !map.contains(docno) =>
map(docno) = extractForWorldHistory(result.text)
case _ =>
// Do nothing
}
}
map.toMap
}
override def extract(docnoOpt: StringOption): Seq[TimeTmp] = {
docnoOpt match {
case StringSome(docno) if isOk(docno) =>
docnoTimeTmpListMap(docno)
case _ =>
Nil
}
}
private def isOk(docno: String): Boolean = {
docno.matches("""^(?:T-WH-[ABS]|Y-JH)-.+$""") &&
!isFirstDoc(docno) &&
!docno.startsWith("YamakawaWorldHistoryGlossary") && // just in case
docnoTimeTmpListMap.contains(docno)
}
private def isFirstDoc(docno: String): Boolean = {
Seq[String](
"T-WH-A-1-0-0-0",
"T-WH-B-1-0-0-0",
"T-WH-S-1-0-0-0",
"Y-JH-00-00-0"
) contains docno
}
}
| ktr-skmt/FelisCatusZero-multilingual | src/main/scala/modules/time/MultiLingualTimeExtractorFromPreviousParagraphInTextbook.scala | Scala | apache-2.0 | 2,958 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.lang._
object Monotonic {
def composeMonotonic(f: BigInt => BigInt, g: BigInt => BigInt): BigInt => BigInt = {
require(forall((a: BigInt, b: BigInt) => (a > b ==> f(a) > f(b)) && (a > b ==> g(a) > g(b))))
(x: BigInt) => f(g(x))
} ensuring { res => forall((a: BigInt, b: BigInt) => a > b ==> res(a) > res(b)) }
}
// vim: set ts=4 sw=4 et:
| epfl-lara/leon | src/test/resources/regression/verification/purescala/valid/Monotonic.scala | Scala | gpl-3.0 | 406 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager
import java.util.Properties
import akka.actor.{ActorRef, ActorSystem, Kill, Props}
import akka.pattern._
import akka.util.Timeout
import akka.util.Timeout._
import com.typesafe.config.{Config, ConfigFactory}
import kafka.manager.actor.cluster.{KafkaManagedOffsetCacheConfig, KafkaStateActor, KafkaStateActorConfig}
import kafka.manager.base.LongRunningPoolConfig
import kafka.manager.features.ClusterFeatures
import kafka.manager.model.{ActorModel, ClusterConfig, ClusterContext}
import kafka.manager.utils.KafkaServerInTest
import ActorModel._
import kafka.test.SeededBroker
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.Try
/**
* @author hiral
*/
class TestKafkaStateActor extends KafkaServerInTest with BaseTest {
private[this] val akkaConfig: Properties = new Properties()
akkaConfig.setProperty("pinned-dispatcher.type","PinnedDispatcher")
akkaConfig.setProperty("pinned-dispatcher.executor","thread-pool-executor")
private[this] val config : Config = ConfigFactory.parseProperties(akkaConfig)
private[this] val system = ActorSystem("test-kafka-state-actor",config)
private[this] val broker = new SeededBroker("ks-test",4)
override val kafkaServerZkPath = broker.getZookeeperConnectionString
private[this] var kafkaStateActor : Option[ActorRef] = None
private[this] implicit val timeout: Timeout = 10.seconds
private[this] val defaultClusterConfig = ClusterConfig("test","0.8.2.0","localhost:2818",100,false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT")
private[this] val defaultClusterContext = ClusterContext(ClusterFeatures.from(defaultClusterConfig), defaultClusterConfig)
override protected def beforeAll(): Unit = {
super.beforeAll()
val ksConfig = KafkaStateActorConfig(
sharedCurator
, "pinned-dispatcher"
, defaultClusterContext
, LongRunningPoolConfig(2,100)
, LongRunningPoolConfig(2,100)
, 5
, 10000
, None
, KafkaManagedOffsetCacheConfig()
)
val props = Props(classOf[KafkaStateActor],ksConfig)
kafkaStateActor = Some(system.actorOf(props.withDispatcher("pinned-dispatcher"),"ksa"))
}
override protected def afterAll(): Unit = {
kafkaStateActor.foreach( _ ! Kill )
system.shutdown()
Try(broker.shutdown())
super.afterAll()
}
private[this] def withKafkaStateActor[Input,Output,FOutput](msg: Input)(fn: Output => FOutput)(implicit tag: ClassTag[Output]) : FOutput = {
require(kafkaStateActor.isDefined, "kafkaStateActor undefined!")
val future = ask(kafkaStateActor.get, msg).mapTo[Output]
val result = Await.result(future,10.seconds)
fn(result)
}
test("get topic list") {
withKafkaStateActor(KSGetTopics) { result: TopicList =>
result.list foreach println
}
}
test("get consumer list") {
withKafkaStateActor(KSGetConsumers) { result: ConsumerList =>
result.list foreach println
}
}
test("get topic config") {
withKafkaStateActor(KSGetTopics) { result: TopicList =>
val configs = result.list map { topic =>
withKafkaStateActor(KSGetTopicConfig(topic)) { topicConfig: TopicConfig => topicConfig }
}
configs foreach println
}
}
test("get broker list") {
withKafkaStateActor(KSGetBrokers) { result: BrokerList =>
result.list foreach println
val brokerIdentityList : IndexedSeq[BrokerIdentity] = result.list
brokerIdentityList foreach println
}
}
test("get topic description") {
withKafkaStateActor(KSGetTopics) { result: TopicList =>
val descriptions = result.list map { topic =>
withKafkaStateActor(KSGetTopicDescription(topic)) { optionalDesc: Option[TopicDescription] => optionalDesc }
}
descriptions foreach println
withKafkaStateActor(KSGetBrokers) { brokerList: BrokerList =>
val topicIdentityList : IndexedSeq[TopicIdentity] = descriptions.flatten.map(td => TopicIdentity.from(brokerList, td, None, None, brokerList.clusterContext, None))
topicIdentityList foreach println
}
}
}
test("get consumer description") {
withKafkaStateActor(KSGetConsumers) { result: ConsumerList =>
val descriptions = result.list map { consumer =>
withKafkaStateActor(KSGetConsumerDescription(consumer.name, consumer.consumerType)) { optionalDesc: Option[ConsumerDescription] => optionalDesc }
}
descriptions foreach println
}
}
test("get all topic descriptions") {
withKafkaStateActor(KSGetAllTopicDescriptions()) { td: TopicDescriptions =>
td.descriptions foreach println
}
}
test("get all consumer descriptions") {
withKafkaStateActor(KSGetAllConsumerDescriptions()) { cd: ConsumerDescriptions =>
cd.descriptions foreach println
}
}
}
| krux/kafka-manager | test/kafka/manager/TestKafkaStateActor.scala | Scala | apache-2.0 | 5,059 |
/**
* Copyright: Copyright (C) 2016, ATS Advanced Telematic Systems GmbH
* License: MPL-2.0
*/
package org.genivi.sota.resolver.filters
import eu.timepit.refined.api.{Refined, Validate}
import org.genivi.sota.data.Namespace
import org.genivi.sota.resolver.filters.FilterAST._
case class Filter(
namespace: Namespace,
name: Filter.Name,
expression: Filter.Expression
) {
def samePK(that: Filter): Boolean = { (namespace == that.namespace) && (name == that.name) }
override def toString(): String = { s"Filter(${name.get}, ${expression.get})" }
}
object Filter {
case class ValidName()
case class ValidExpression()
type Name = Refined[String, ValidName]
type Expression = Refined[String, ValidExpression]
case class ExpressionWrapper (
expression: Filter.Expression
)
implicit val validFilterName: Validate.Plain[String, ValidName] =
Validate.fromPredicate(
name => name.length > 1
&& name.length <= 100
&& name.forall(_.isLetterOrDigit),
name => s"($name should be between two and a hundred alphanumeric characters long.)",
ValidName()
)
implicit val validFilterExpression: Validate.Plain[String, ValidExpression] =
Validate.fromPredicate(
expr => parseFilter(expr).isRight,
expr => parseFilter(expr) match {
case Left(e) => s"($expr failed to parse: $e.)"
case Right(_) => "IMPOSSIBLE"
},
ValidExpression()
)
}
| PDXostc/rvi_sota_server | external-resolver/src/main/scala/org/genivi/sota/resolver/filters/Filter.scala | Scala | mpl-2.0 | 1,457 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.math.{BigDecimal => JBigDecimal}
import java.sql.{Connection, Date, Timestamp}
import java.text.SimpleDateFormat
import java.util.Properties
import org.apache.spark.sql.Column
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.Literal
import org.apache.spark.sql.types.{ArrayType, DecimalType, FloatType, ShortType}
import org.apache.spark.tags.DockerTest
/**
* To run this test suite for a specific version (e.g., postgres:14.0):
* {{{
* ENABLE_DOCKER_INTEGRATION_TESTS=1 POSTGRES_DOCKER_IMAGE_NAME=postgres:14.0
* ./build/sbt -Pdocker-integration-tests
* "testOnly org.apache.spark.sql.jdbc.PostgresIntegrationSuite"
* }}}
*/
@DockerTest
class PostgresIntegrationSuite extends DockerJDBCIntegrationSuite {
override val db = new DatabaseOnDocker {
override val imageName = sys.env.getOrElse("POSTGRES_DOCKER_IMAGE_NAME", "postgres:14.0-alpine")
override val env = Map(
"POSTGRES_PASSWORD" -> "rootpass"
)
override val usesIpc = false
override val jdbcPort = 5432
override def getJdbcUrl(ip: String, port: Int): String =
s"jdbc:postgresql://$ip:$port/postgres?user=postgres&password=rootpass"
}
override def dataPreparation(conn: Connection): Unit = {
conn.prepareStatement("CREATE DATABASE foo").executeUpdate()
conn.setCatalog("foo")
conn.prepareStatement("CREATE TYPE enum_type AS ENUM ('d1', 'd2')").executeUpdate()
conn.prepareStatement("CREATE TABLE bar (c0 text, c1 integer, c2 double precision, c3 bigint, "
+ "c4 bit(1), c5 bit(10), c6 bytea, c7 boolean, c8 inet, c9 cidr, "
+ "c10 integer[], c11 text[], c12 real[], c13 numeric(2,2)[], c14 enum_type, "
+ "c15 float4, c16 smallint, c17 numeric[], c18 bit varying(6), c19 point, c20 line, "
+ "c21 lseg, c22 box, c23 path, c24 polygon, c25 circle, c26 pg_lsn, "
+ "c27 character(2), c28 character varying(3), c29 date, c30 interval, "
+ "c31 macaddr, c32 macaddr8, c33 numeric(6,4), c34 pg_snapshot, "
+ "c35 real, c36 time, c37 timestamp, c38 tsquery, c39 tsvector, c40 txid_snapshot, "
+ "c41 xml)").executeUpdate()
conn.prepareStatement("INSERT INTO bar VALUES ('hello', 42, 1.25, 123456789012345, B'0', "
+ "B'1000100101', E'\\\\\\\\xDEADBEEF', true, '172.16.0.42', '192.168.0.0/16', "
+ """'{1, 2}', '{"a", null, "b"}', '{0.11, 0.22}', '{0.11, 0.22}', 'd1', 1.01, 1, """
+ "'{111.2222, 333.4444}', B'101010', '(800, 600)', '(23.8, 56.2), (16.23, 89.2)', "
+ "'[(80.12, 131.24), (201.5, 503.33)]', '(19.84, 11.23), (20.21, 2.1)', "
+ "'(10.2, 30.4), (50.6, 70.8), (90.1, 11.3)', "
+ "'((100.3, 40.2), (20.198, 83.1), (500.821, 311.38))', '<500, 200, 100>', '16/B374D848', "
+ "'ab', 'efg', '2021-02-02', '1 minute', '00:11:22:33:44:55', "
+ "'00:11:22:33:44:55:66:77', 12.3456, '10:20:10,14,15', 1E+37, "
+ "'17:22:31', '2016-08-12 10:22:31.949271', 'cat:AB & dog:CD', "
+ "'dog and cat and fox', '10:20:10,14,15', '<key>id</key><value>10</value>')"
).executeUpdate()
conn.prepareStatement("INSERT INTO bar VALUES (null, null, null, null, null, "
+ "null, null, null, null, null, null, null, null, null, null, null, null, "
+ "null, null, null, null, null, null, null, null, null, null, null, null, "
+ "null, null, null, null, null, null, null, null, null, null, null, null, null)"
).executeUpdate()
conn.prepareStatement("CREATE TABLE ts_with_timezone " +
"(id integer, tstz TIMESTAMP WITH TIME ZONE, ttz TIME WITH TIME ZONE)")
.executeUpdate()
conn.prepareStatement("INSERT INTO ts_with_timezone VALUES " +
"(1, TIMESTAMP WITH TIME ZONE '2016-08-12 10:22:31.949271-07', " +
"TIME WITH TIME ZONE '17:22:31.949271+00')")
.executeUpdate()
conn.prepareStatement("CREATE TABLE st_with_array (c0 uuid, c1 inet, c2 cidr," +
"c3 json, c4 jsonb, c5 uuid[], c6 inet[], c7 cidr[], c8 json[], c9 jsonb[], c10 xml[], " +
"c11 tsvector[], c12 tsquery[], c13 macaddr[], c14 txid_snapshot[], c15 point[], " +
"c16 line[], c17 lseg[], c18 box[], c19 path[], c20 polygon[], c21 circle[], c22 pg_lsn[], " +
"c23 bit varying(6)[], c24 interval[], c25 macaddr8[], c26 pg_snapshot[])")
.executeUpdate()
conn.prepareStatement("INSERT INTO st_with_array VALUES ( " +
"'0a532531-cdf1-45e3-963d-5de90b6a30f1', '172.168.22.1', '192.168.100.128/25', " +
"""'{"a": "foo", "b": "bar"}', '{"a": 1, "b": 2}', """ +
"ARRAY['7be8aaf8-650e-4dbb-8186-0a749840ecf2'," +
"'205f9bfc-018c-4452-a605-609c0cfad228']::uuid[], ARRAY['172.16.0.41', " +
"'172.16.0.42']::inet[], ARRAY['192.168.0.0/24', '10.1.0.0/16']::cidr[], " +
"""ARRAY['{"a": "foo", "b": "bar"}', '{"a": 1, "b": 2}']::json[], """ +
"""ARRAY['{"a": 1, "b": 2, "c": 3}']::jsonb[], """ +
"""ARRAY['<key>id</key><value>10</value>']::xml[], ARRAY['The dog laying on the grass', """ +
"""'the:1 cat:2 is:3 on:4 the:5 table:6']::tsvector[], """ +
"""ARRAY['programming & language & ! interpreter', 'cat:AB & dog:CD']::tsquery[], """ +
"""ARRAY['12:34:56:78:90:ab', 'cd-ef-12-34-56-78']::macaddr[], """ +
"""ARRAY['10:20:10,14,15']::txid_snapshot[], """ +
"""ARRAY['(800, 600)', '83.24, 5.10']::point[], """ +
"""ARRAY['(23.8, 56.2), (16.23, 89.2)', '{23.85, 10.87, 5.92}']::line[], """ +
"""ARRAY['[(80.12, 131.24), (201.5, 503.33)]']::lseg[], """ +
"""ARRAY['(19.84, 11.23), (20.21, 2.1)']::box[], """ +
"""ARRAY['(10.2, 30.4), (50.6, 70.8), (90.1, 11.3)']::path[], """ +
"""ARRAY['((100.3, 40.2), (20.198, 83.1), (500.821, 311.38))']::polygon[], """ +
"""ARRAY['<500, 200, 100>']::circle[], """ +
"""ARRAY['16/B374D848']::pg_lsn[], """ +
"""ARRAY[B'101010']::bit varying(6)[], """ +
"""ARRAY['1 day', '2 minutes']::interval[], """ +
"""ARRAY['08:00:2b:01:02:03:04:05']::macaddr8[], """ +
"""ARRAY['10:20:10,14,15']::pg_snapshot[])"""
).executeUpdate()
conn.prepareStatement("CREATE TABLE char_types (" +
"c0 char(4), c1 character(4), c2 character varying(4), c3 varchar(4), c4 bpchar)"
).executeUpdate()
conn.prepareStatement("INSERT INTO char_types VALUES " +
"('abcd', 'efgh', 'ijkl', 'mnop', 'q')").executeUpdate()
conn.prepareStatement("CREATE TABLE char_array_types (" +
"c0 char(4)[], c1 character(4)[], c2 character varying(4)[], c3 varchar(4)[], c4 bpchar[])"
).executeUpdate()
conn.prepareStatement("INSERT INTO char_array_types VALUES " +
"""('{"a", "bcd"}', '{"ef", "gh"}', '{"i", "j", "kl"}', '{"mnop"}', '{"q", "r"}')"""
).executeUpdate()
conn.prepareStatement("CREATE TABLE money_types (" +
"c0 money)").executeUpdate()
conn.prepareStatement("INSERT INTO money_types VALUES " +
"('$1,000.00')").executeUpdate()
}
test("Type mapping for various types") {
val df = sqlContext.read.jdbc(jdbcUrl, "bar", new Properties)
val rows = df.collect().sortBy(_.toString())
assert(rows.length == 2)
// Test the types, and values using the first row.
val types = rows(0).toSeq.map(x => x.getClass)
assert(types.length == 42)
assert(classOf[String].isAssignableFrom(types(0)))
assert(classOf[java.lang.Integer].isAssignableFrom(types(1)))
assert(classOf[java.lang.Double].isAssignableFrom(types(2)))
assert(classOf[java.lang.Long].isAssignableFrom(types(3)))
assert(classOf[java.lang.Boolean].isAssignableFrom(types(4)))
assert(classOf[Array[Byte]].isAssignableFrom(types(5)))
assert(classOf[Array[Byte]].isAssignableFrom(types(6)))
assert(classOf[java.lang.Boolean].isAssignableFrom(types(7)))
assert(classOf[String].isAssignableFrom(types(8)))
assert(classOf[String].isAssignableFrom(types(9)))
assert(classOf[scala.collection.Seq[Int]].isAssignableFrom(types(10)))
assert(classOf[scala.collection.Seq[String]].isAssignableFrom(types(11)))
assert(classOf[scala.collection.Seq[Double]].isAssignableFrom(types(12)))
assert(classOf[scala.collection.Seq[BigDecimal]].isAssignableFrom(types(13)))
assert(classOf[String].isAssignableFrom(types(14)))
assert(classOf[java.lang.Float].isAssignableFrom(types(15)))
assert(classOf[java.lang.Short].isAssignableFrom(types(16)))
assert(classOf[scala.collection.Seq[BigDecimal]].isAssignableFrom(types(17)))
assert(classOf[String].isAssignableFrom(types(18)))
assert(classOf[String].isAssignableFrom(types(19)))
assert(classOf[String].isAssignableFrom(types(20)))
assert(classOf[String].isAssignableFrom(types(21)))
assert(classOf[String].isAssignableFrom(types(22)))
assert(classOf[String].isAssignableFrom(types(23)))
assert(classOf[String].isAssignableFrom(types(24)))
assert(classOf[String].isAssignableFrom(types(25)))
assert(classOf[String].isAssignableFrom(types(26)))
assert(classOf[String].isAssignableFrom(types(27)))
assert(classOf[String].isAssignableFrom(types(28)))
assert(classOf[Date].isAssignableFrom(types(29)))
assert(classOf[String].isAssignableFrom(types(30)))
assert(classOf[String].isAssignableFrom(types(31)))
assert(classOf[String].isAssignableFrom(types(32)))
assert(classOf[JBigDecimal].isAssignableFrom(types(33)))
assert(classOf[String].isAssignableFrom(types(34)))
assert(classOf[java.lang.Float].isAssignableFrom(types(35)))
assert(classOf[java.sql.Timestamp].isAssignableFrom(types(36)))
assert(classOf[java.sql.Timestamp].isAssignableFrom(types(37)))
assert(classOf[String].isAssignableFrom(types(38)))
assert(classOf[String].isAssignableFrom(types(39)))
assert(classOf[String].isAssignableFrom(types(40)))
assert(classOf[String].isAssignableFrom(types(41)))
assert(rows(0).getString(0).equals("hello"))
assert(rows(0).getInt(1) == 42)
assert(rows(0).getDouble(2) == 1.25)
assert(rows(0).getLong(3) == 123456789012345L)
assert(!rows(0).getBoolean(4))
// BIT(10)'s come back as ASCII strings of ten ASCII 0's and 1's...
assert(java.util.Arrays.equals(rows(0).getAs[Array[Byte]](5),
Array[Byte](49, 48, 48, 48, 49, 48, 48, 49, 48, 49)))
assert(java.util.Arrays.equals(rows(0).getAs[Array[Byte]](6),
Array[Byte](0xDE.toByte, 0xAD.toByte, 0xBE.toByte, 0xEF.toByte)))
assert(rows(0).getBoolean(7))
assert(rows(0).getString(8) == "172.16.0.42")
assert(rows(0).getString(9) == "192.168.0.0/16")
assert(rows(0).getSeq(10) == Seq(1, 2))
assert(rows(0).getSeq(11) == Seq("a", null, "b"))
assert(rows(0).getSeq(12).toSeq == Seq(0.11f, 0.22f))
assert(rows(0).getSeq(13) == Seq("0.11", "0.22").map(BigDecimal(_).bigDecimal))
assert(rows(0).getString(14) == "d1")
assert(rows(0).getFloat(15) == 1.01f)
assert(rows(0).getShort(16) == 1)
assert(rows(0).getSeq(17) ==
Seq("111.222200000000000000", "333.444400000000000000").map(BigDecimal(_).bigDecimal))
assert(rows(0).getString(18) == "101010")
assert(rows(0).getString(19) == "(800,600)")
assert(rows(0).getString(20) == "{-4.359313077939234,-1,159.9516512549538}")
assert(rows(0).getString(21) == "[(80.12,131.24),(201.5,503.33)]")
assert(rows(0).getString(22) == "(20.21,11.23),(19.84,2.1)")
assert(rows(0).getString(23) == "((10.2,30.4),(50.6,70.8),(90.1,11.3))")
assert(rows(0).getString(24) == "((100.3,40.2),(20.198,83.1),(500.821,311.38))")
assert(rows(0).getString(25) == "<(500,200),100>")
assert(rows(0).getString(26) == "16/B374D848")
assert(rows(0).getString(27) == "ab")
assert(rows(0).getString(28) == "efg")
assert(rows(0).getDate(29) == new SimpleDateFormat("yyyy-MM-dd").parse("2021-02-02"))
assert(rows(0).getString(30) == "00:01:00")
assert(rows(0).getString(31) == "00:11:22:33:44:55")
assert(rows(0).getString(32) == "00:11:22:33:44:55:66:77")
assert(rows(0).getDecimal(33) == new JBigDecimal("12.3456"))
assert(rows(0).getString(34) == "10:20:10,14,15")
assert(rows(0).getFloat(35) == 1E+37F)
assert(rows(0).getTimestamp(36) == Timestamp.valueOf("1970-01-01 17:22:31.0"))
assert(rows(0).getTimestamp(37) == Timestamp.valueOf("2016-08-12 10:22:31.949271"))
assert(rows(0).getString(38) == "'cat':AB & 'dog':CD")
assert(rows(0).getString(39) == "'and' 'cat' 'dog' 'fox'")
assert(rows(0).getString(40) == "10:20:10,14,15")
assert(rows(0).getString(41) == "<key>id</key><value>10</value>")
// Test reading null values using the second row.
assert(0.until(16).forall(rows(1).isNullAt(_)))
}
test("Basic write test") {
val df = sqlContext.read.jdbc(jdbcUrl, "bar", new Properties)
// Test only that it doesn't crash.
df.write.jdbc(jdbcUrl, "public.barcopy", new Properties)
// Test that written numeric type has same DataType as input
assert(sqlContext.read.jdbc(jdbcUrl, "public.barcopy", new Properties).schema(13).dataType ==
ArrayType(DecimalType(2, 2), true))
// Test write null values.
df.select(df.queryExecution.analyzed.output.map { a =>
Column(Literal.create(null, a.dataType)).as(a.name)
}: _*).write.jdbc(jdbcUrl, "public.barcopy2", new Properties)
}
test("Creating a table with shorts and floats") {
sqlContext.createDataFrame(Seq((1.0f, 1.toShort)))
.write.jdbc(jdbcUrl, "shortfloat", new Properties)
val schema = sqlContext.read.jdbc(jdbcUrl, "shortfloat", new Properties).schema
assert(schema(0).dataType == FloatType)
assert(schema(1).dataType == ShortType)
}
test("SPARK-20557: column type TIMESTAMP with TIME ZONE and TIME with TIME ZONE " +
"should be recognized") {
// When using JDBC to read the columns of TIMESTAMP with TIME ZONE and TIME with TIME ZONE
// the actual types are java.sql.Types.TIMESTAMP and java.sql.Types.TIME
val dfRead = sqlContext.read.jdbc(jdbcUrl, "ts_with_timezone", new Properties)
val rows = dfRead.collect()
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types(1).equals("class java.sql.Timestamp"))
assert(types(2).equals("class java.sql.Timestamp"))
}
test("SPARK-22291: Conversion error when transforming array types of " +
"uuid, inet and cidr to StingType in PostgreSQL") {
val df = sqlContext.read.jdbc(jdbcUrl, "st_with_array", new Properties)
val rows = df.collect()
assert(rows(0).getString(0) == "0a532531-cdf1-45e3-963d-5de90b6a30f1")
assert(rows(0).getString(1) == "172.168.22.1")
assert(rows(0).getString(2) == "192.168.100.128/25")
assert(rows(0).getString(3) == "{\\"a\\": \\"foo\\", \\"b\\": \\"bar\\"}")
assert(rows(0).getString(4) == "{\\"a\\": 1, \\"b\\": 2}")
assert(rows(0).getSeq(5) == Seq("7be8aaf8-650e-4dbb-8186-0a749840ecf2",
"205f9bfc-018c-4452-a605-609c0cfad228"))
assert(rows(0).getSeq(6) == Seq("172.16.0.41", "172.16.0.42"))
assert(rows(0).getSeq(7) == Seq("192.168.0.0/24", "10.1.0.0/16"))
assert(rows(0).getSeq(8) == Seq("""{"a": "foo", "b": "bar"}""", """{"a": 1, "b": 2}"""))
assert(rows(0).getSeq(9) == Seq("""{"a": 1, "b": 2, "c": 3}"""))
assert(rows(0).getSeq(10) == Seq("""<key>id</key><value>10</value>"""))
assert(rows(0).getSeq(11) == Seq("'The' 'dog' 'grass' 'laying' 'on' 'the'",
"'cat':2 'is':3 'on':4 'table':6 'the':1,5"))
assert(rows(0).getSeq(12) == Seq("'programming' & 'language' & !'interpreter'",
"'cat':AB & 'dog':CD"))
assert(rows(0).getSeq(13) == Seq("12:34:56:78:90:ab", "cd:ef:12:34:56:78"))
assert(rows(0).getSeq(14) == Seq("10:20:10,14,15"))
assert(rows(0).getSeq(15) == Seq("(800.0,600.0)", "(83.24,5.1)"))
assert(rows(0).getSeq(16) == Seq("{-4.359313077939234,-1.0,159.9516512549538}",
"{23.85,10.87,5.92}"))
assert(rows(0).getSeq(17) == Seq("[(80.12,131.24),(201.5,503.33)]"))
assert(rows(0).getSeq(18) == Seq("(20.21,11.23),(19.84,2.1)"))
assert(rows(0).getSeq(19) == Seq("((10.2,30.4),(50.6,70.8),(90.1,11.3))"))
assert(rows(0).getSeq(20) == Seq("((100.3,40.2),(20.198,83.1),(500.821,311.38))"))
assert(rows(0).getSeq(21) == Seq("<(500.0,200.0),100.0>"))
assert(rows(0).getSeq(22) == Seq("16/B374D848"))
assert(rows(0).getSeq(23) == Seq("101010"))
assert(rows(0).getSeq(24) == Seq("0 years 0 mons 1 days 0 hours 0 mins 0.0 secs",
"0 years 0 mons 0 days 0 hours 2 mins 0.0 secs"))
assert(rows(0).getSeq(25) == Seq("08:00:2b:01:02:03:04:05"))
assert(rows(0).getSeq(26) == Seq("10:20:10,14,15"))
}
test("query JDBC option") {
val expectedResult = Set(
(42, 123456789012345L)
).map { case (c1, c3) =>
Row(Integer.valueOf(c1), java.lang.Long.valueOf(c3))
}
val query = "SELECT c1, c3 FROM bar WHERE c1 IS NOT NULL"
// query option to pass on the query string.
val df = spark.read.format("jdbc")
.option("url", jdbcUrl)
.option("query", query)
.load()
assert(df.collect.toSet === expectedResult)
// query option in the create table path.
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW queryOption
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$jdbcUrl', query '$query')
""".stripMargin.replaceAll("\\n", " "))
assert(sql("select c1, c3 from queryOption").collect.toSet == expectedResult)
}
test("write byte as smallint") {
sqlContext.createDataFrame(Seq((1.toByte, 2.toShort)))
.write.jdbc(jdbcUrl, "byte_to_smallint_test", new Properties)
val df = sqlContext.read.jdbc(jdbcUrl, "byte_to_smallint_test", new Properties)
val schema = df.schema
assert(schema.head.dataType == ShortType)
assert(schema(1).dataType == ShortType)
val rows = df.collect()
assert(rows.length === 1)
assert(rows(0).getShort(0) === 1)
assert(rows(0).getShort(1) === 2)
}
test("character type tests") {
val df = sqlContext.read.jdbc(jdbcUrl, "char_types", new Properties)
val row = df.collect()
assert(row.length == 1)
assert(row(0).length === 5)
assert(row(0).getString(0) === "abcd")
assert(row(0).getString(1) === "efgh")
assert(row(0).getString(2) === "ijkl")
assert(row(0).getString(3) === "mnop")
assert(row(0).getString(4) === "q")
}
test("SPARK-32576: character array type tests") {
val df = sqlContext.read.jdbc(jdbcUrl, "char_array_types", new Properties)
val row = df.collect()
assert(row.length == 1)
assert(row(0).length === 5)
assert(row(0).getSeq[String](0) === Seq("a ", "bcd "))
assert(row(0).getSeq[String](1) === Seq("ef ", "gh "))
assert(row(0).getSeq[String](2) === Seq("i", "j", "kl"))
assert(row(0).getSeq[String](3) === Seq("mnop"))
assert(row(0).getSeq[String](4) === Seq("q", "r"))
}
test("SPARK-34333: money type tests") {
val df = sqlContext.read.jdbc(jdbcUrl, "money_types", new Properties)
val row = df.collect()
assert(row.length === 1)
assert(row(0).length === 1)
assert(row(0).getString(0) === "$1,000.00")
}
}
| ueshin/apache-spark | external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/PostgresIntegrationSuite.scala | Scala | apache-2.0 | 19,773 |
package de.leanovate.swaggercheck.schema.gen
import de.leanovate.swaggercheck.schema.adapter.NodeAdapter
import de.leanovate.swaggercheck.schema.model.{JsonPath, Schema, ValidationResult}
import de.leanovate.swaggercheck.shrinkable.{CheckJsNull, CheckJsValue}
import org.scalacheck.Gen
object GeneratableEmpty extends GeneratableDefinition {
override def generate(schema: GeneratableSchema): Gen[CheckJsValue] = Gen.const(CheckJsNull)
override def validate[T](schema: Schema, path: JsonPath, node: T)
(implicit nodeAdapter: NodeAdapter[T]): ValidationResult = ValidationResult.success
}
| leanovate/swagger-check | json-schema-gen/src/main/scala/de/leanovate/swaggercheck/schema/gen/GeneratableEmpty.scala | Scala | mit | 620 |
/*
* Copyright 2012 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.components.physics.jbullet
import simplex3d.math.floatx.ConstVec3f
import javax.vecmath.{Matrix4f, Vector3f}
import simx.core.entity.typeconversion.Converter
//Global Types & Local Types
import simx.core.ontology.{types => gt}
import simx.components.physics.jbullet.ontology.{types => lt}
/*
* Created by IntelliJ IDEA.
* User: martin
* Date: 6/7/11
* Time: 10:32 AM
*/
object JBulletConverters {
//Todo:
//This encounters a jbullet bug:
//When absolute sizes and positions get too small < 0.1f, jbullet does not compute correct values any more
//This scale factor i used to magnify all values passed to jbullet
//But this has to be solved in a nicer way
//
//!!! Be careful when changing scale to a value other than 1f. !!!
//!!! This hotfix does currently not support to scale shapes from collada files !!!
//It is likely that I (martin) forgot to check all locations where scale has to be multiplied
val scale = 1f
val vectorConverter = new Converter(lt.Vector3, lt.Gravity, lt.Acceleration, lt.Velocity, lt.HalfExtends, lt.Normal, lt.Impulse)(gt.Vector3) {
def revert(from: ConstVec3f): Vector3f =
new Vector3f(from.x*scale, from.y*scale, from.z*scale)
def convert(from: Vector3f): ConstVec3f =
ConstVec3f(from.x, from.y, from.z) * (1f/scale)
}
val transformConverter = new Converter(lt.Transformation)(gt.Transformation) {
def revert(from: simplex3d.math.floatx.ConstMat4f): com.bulletphysics.linearmath.Transform = {
new com.bulletphysics.linearmath.Transform(
new Matrix4f(
from.m00, from.m10, from.m20, from.m30 * scale,
from.m01, from.m11, from.m21, from.m31 * scale,
from.m02, from.m12, from.m22, from.m32 * scale,
from.m03, from.m13, from.m23, from.m33)
)
}
def convert(from: com.bulletphysics.linearmath.Transform): simplex3d.math.floatx.ConstMat4f = {
val matrix = from.getMatrix(new Matrix4f)
simplex3d.math.float.ConstMat4(
matrix.m00, matrix.m10, matrix.m20, matrix.m30,
matrix.m01, matrix.m11, matrix.m21, matrix.m31,
matrix.m02, matrix.m12, matrix.m22, matrix.m32,
matrix.m03 * (1f/scale), matrix.m13 * (1f/scale), matrix.m23 * (1f/scale), matrix.m33)
}
}
def register() {}
} | simulator-x/jbullet-physics | src/simx/components/physics/jbullet/JBulletConverters.scala | Scala | apache-2.0 | 3,143 |
// Generated by <a href="http://scalaxb.org/">scalaxb</a>.
package eveapi.xml.account.char.CharacterInfo
case class Eveapi(currentTime: String,
result: eveapi.xml.account.char.CharacterInfo.Result,
cachedUntil: String,
attributes: Map[String, scalaxb.DataRecord[Any]] = Map()) {
lazy val version = attributes("@version").as[BigInt]
}
case class Result(characterID: BigInt,
name: String,
homeStationID: BigInt,
DoB: String,
race: String,
bloodLine: String,
ancestry: String,
gender: String,
corporationName: String,
corporationID: BigInt,
allianceName: String,
allianceID: BigInt,
factionName: eveapi.xml.account.char.CharacterInfo.FactionName,
factionID: BigInt,
cloneTypeID: BigInt,
cloneName: String,
cloneSkillPoints: BigInt,
freeSkillPoints: BigInt,
freeRespecs: BigInt,
cloneJumpDate: String,
lastRespecDate: String,
lastTimedRespec: String,
remoteStationDate: String,
resultoption: Seq[scalaxb.DataRecord[Any]] = Nil)
trait ResultOption
case class FactionName()
case class AttributesType(
intelligence: BigInt, memory: BigInt, charisma: BigInt, perception: BigInt, willpower: BigInt)
case class Rowset(row: Seq[eveapi.xml.account.char.CharacterInfo.Row] = Nil,
attributes: Map[String, scalaxb.DataRecord[Any]] = Map()) {
lazy val columns = attributes("@columns").as[String]
lazy val key = attributes("@key").as[String]
lazy val name = attributes("@name").as[String]
}
case class Row(attributes: Map[String, scalaxb.DataRecord[Any]] = Map()) {
lazy val cloneName = attributes.get("@cloneName") map { _.as[String] }
lazy val jumpCloneID = attributes.get("@jumpCloneID") map { _.as[BigInt] }
lazy val level = attributes.get("@level") map { _.as[BigInt] }
lazy val locationID = attributes.get("@locationID") map { _.as[BigInt] }
lazy val published = attributes.get("@published") map { _.as[BigInt] }
lazy val roleID = attributes.get("@roleID") map { _.as[BigInt] }
lazy val roleName = attributes.get("@roleName") map { _.as[String] }
lazy val skillpoints = attributes.get("@skillpoints") map { _.as[BigInt] }
lazy val titleID = attributes.get("@titleID") map { _.as[BigInt] }
lazy val titleName = attributes.get("@titleName") map { _.as[String] }
lazy val typeID = attributes.get("@typeID") map { _.as[BigInt] }
lazy val typeName = attributes.get("@typeName") map { _.as[String] }
}
| scala-eveapi/eveapi | xml/src/main/scala/eveapi/xml/char/CharacterInfo/CharacterInfo.scala | Scala | mit | 2,812 |
package doodle
package turtle
import doodle.core._
object Turtle {
final case class State(at: Vec, heading: Angle)
def draw(instructions: List[Instruction], angle: Angle = Angle.zero): Image = {
import Instruction._
import PathElement._
val initialState = State(Vec.zero, angle)
// Note that iterate returns the path in *reversed* order.
def iterate(state: State, instructions: List[Instruction]): (State, List[PathElement]) = {
instructions.foldLeft( (state, List.empty[PathElement]) ){ (accum, elt) =>
val (state, path) = accum
elt match {
case Forward(d) =>
val nowAt = state.at + Vec.polar(d, state.heading)
val element = lineTo(nowAt.toPoint)
(state.copy(at = nowAt), element +: path)
case Turn(a) =>
val nowHeading = state.heading + a
(state.copy(heading = nowHeading), path)
case Branch(i) =>
val (_, branchedPath) = iterate(state, i)
(state, MoveTo(state.at.toPoint) +: (branchedPath ++ path))
case NoOp =>
accum
}
}
}
val (_, path) = iterate(initialState, instructions)
Image.openPath(moveTo(0, 0) :: path.reverse.toList)
}
}
| Angeldude/doodle | shared/src/main/scala/doodle/turtle/Turtle.scala | Scala | apache-2.0 | 1,256 |
package de.tu_berlin.dima.bdapro.flink.oddsemordnilaps.christophholtmann
import org.apache.flink.api.scala._
/**
* Created by christophholtmann on 03/11/2016.
*/
object OddSemordnilaps {
def main(args: Array[String]): Unit =
{
val env = ExecutionEnvironment.getExecutionEnvironment
val input = env.readTextFile(args{0})
val possibleOddSemordnilaps = input.flatMap (_.split("\\\\n"))
.flatMap(_.split(" "))
.filter(i => i.length != 0 && i.toInt % 2 != 0)
.distinct()
.map(i => (i, i.reverse))
val oddSemordnilapsCount = possibleOddSemordnilaps
.join(possibleOddSemordnilaps)
.where(0)
.equalTo(1)
.count()
println("The result is " + oddSemordnilapsCount)
}
} | cristiprg/BDAPRO.GlobalStateML | bdapro-ws1617-flink-jobs/src/main/scala/de/tu_berlin/dima/bdapro/flink/oddsemordnilaps/christophholtmann/OddSemordnilaps.scala | Scala | apache-2.0 | 741 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.controller
import org.apache.predictionio.core.BasePreparator
import org.apache.spark.SparkContext
/** Base class of a parallel preparator.
*
* A parallel preparator can be run in parallel on a cluster and produces a
* prepared data that is distributed across a cluster.
*
* @tparam TD Training data class.
* @tparam PD Prepared data class.
* @group Preparator
*/
abstract class PPreparator[TD, PD]
extends BasePreparator[TD, PD] {
def prepareBase(sc: SparkContext, td: TD): PD = {
prepare(sc, td)
}
/** Implement this method to produce prepared data that is ready for model
* training.
*
* @param sc An Apache Spark context.
* @param trainingData Training data to be prepared.
*/
def prepare(sc: SparkContext, trainingData: TD): PD
}
| alex9311/PredictionIO | core/src/main/scala/org/apache/predictionio/controller/PPreparator.scala | Scala | apache-2.0 | 1,435 |
package com.rumblesan.scalapd.util
import akka.actor.Actor
import scala.sys.process.Process
class SubProcess extends Actor {
def receive = {
case SubProcessRun(p) => {
val result = p.exitValue()
sender ! SubProcessFinished(result)
}
}
}
case class SubProcessRun(p: Process)
case class SubProcessFinished(result: Int)
| rumblesan/scalapd | src/main/scala/com/rumblesan/scalapd/util/SubProcess.scala | Scala | mit | 351 |
package scala.tools.nsc.typechecker
import org.junit.Assert.assertEquals
import org.junit.Test
import scala.tools.testkit.BytecodeTesting
class TypedTreeTest extends BytecodeTesting {
override def compilerArgs = "-Ystop-after:typer"
@Test
def constantFoldedOriginalTreeAttachment(): Unit = {
val code =
"""object O {
| final val x = 42
| def f(x: Int) = x
| def f(x: Boolean) = x
| f(O.x)
|}
""".stripMargin
val run = compiler.newRun
run.compileSources(List(BytecodeTesting.makeSourceFile(code, "UnitTestSource.scala")))
val tree = run.units.next().body
val List(t) = tree.filter(_.attachments.all.nonEmpty).toList
assertEquals("42:Set(OriginalTreeAttachment(O.x))", s"$t:${t.attachments.all}")
}
}
| martijnhoekstra/scala | test/junit/scala/tools/nsc/typechecker/TypedTreeTest.scala | Scala | apache-2.0 | 791 |
package grammarcomp
package grammar
import CFGrammar._
import EBNFGrammar._
import java.io._
import scala.io.StdIn
object GrammarWriter {
def dumpPrettyGrammar[T](filename: String, g: Grammar[T]) = {
dumpGrammar(filename, CFGrammar.renameAutoSymbols(g))
}
def dumpGrammar[T](file: File, g: BNFGrammar[T]): Unit = {
dumpFile(file, g)
}
def dumpGrammar[T](file: File, g: Grammar[T]): Unit = {
dumpFile(file, g)
}
def dumpFile(file: File, content: Any): Unit = {
val pw = new PrintWriter(new FileOutputStream(file))
pw.print(content)
pw.flush()
pw.close()
}
def dumpGrammar[T](filename: String, g: Grammar[T]) {
val fullname = filename + ".gram"
dumpGrammar(new File(fullname), g)
println("Dumped grammar to file: " + fullname)
}
} | epfl-lara/GrammarComparison | src/main/scala/grammarcomp/grammar/GrammarWriters.scala | Scala | mit | 809 |
// relevant doc:
// * https://developer.github.com/guides/getting-started/
// * https://developer.github.com/v3/
// A note on terminology: a repository is identified by a combination
// of a user name and a repository name. The user name might be an
// individual's, but is more likely an organization name.
// A note on pagination: many API calls only return the first n
// results, where n is often 30 but may be 100 or a different
// number depending on the particular call. This can lead to
// random-seeming failage when we happen to exceed the limit.
// For now, we add `per_page` if the limit is found to be a
// problem in practice. per_page cannot be increased past
// 100; if we ever find that's not enough in practice, we'd
// have to add additional code to split the request into
// multiple pages. relevant doc:
// * https://developer.github.com/guides/traversing-with-pagination/
package scabot
package github
import scabot.core.BaseRef
trait GithubApi extends GithubApiTypes with GithubJsonProtocol with GithubApiActions
// definitions in topo-order, no cycles in dependencies
trait GithubApiTypes extends core.Core {
// spray json seems to disregard the expected type and won't unmarshall a json number as a String (sometimes github uses longs, sometimes string reps)
type Date = Option[Either[String, Long]]
object CommitStatusConstants {
final val SUCCESS = "success"
final val PENDING = "pending"
final val FAILURE = "failure"
// context to enforce that last commit is green only if all prior commits are also green
final val COMBINED = "combined"
final val REVIEWED = "reviewed"
final val CLA = "cla"
def jenkinsContext(ctx: String) = ctx match {
case COMBINED | REVIEWED | CLA => false
case _ => !ctx.contains("travis-ci")
}
}
import CommitStatusConstants._
case class User(login: String)
case class Author(name: String, email: String) {// , username: Option[String]
override def toString = name
}
case class Repository(name: String, full_name: String, git_url: String,
updated_at: Date, created_at: Date, pushed_at: Date) { // owner: Either[User, Author]
override def toString = full_name
}
case class GitRef(sha: String, label: String, ref: String, repo: Repository, user: User) {
override def toString = s"${repo}#${sha.take(7)}"
}
case class PullRequest(number: Int, state: String, title: String, body: Option[String],
created_at: Date, updated_at: Date, closed_at: Date, merged_at: Date,
head: GitRef, base: GitRef, user: User, merged: Option[Boolean], mergeable: Option[Boolean], merged_by: Option[User]) {
override def toString = s"${base.repo}#$number"
}
case class Reviewers(reviewers: List[String])
//, comments: Int, commits: Int, additions: Int, deletions: Int, changed_files: Int)
case class Label(name: String, color: Option[String] = None, url: Option[String] = None) {
override def toString = name
}
object Milestone {
private val MergeBranch = """Merge to (\\S+)\\b""".r.unanchored
}
case class Milestone(number: Int, state: String, title: String, description: Option[String], creator: User,
created_at: Date, updated_at: Date, closed_at: Option[Date], due_on: Option[Date]) {
override def toString = s"Milestone $title ($state)"
def mergeBranch = description match {
case Some(Milestone.MergeBranch(branch)) => Some(branch)
case _ => None
}
}
case class Issue(number: Int, state: String, title: String, body: Option[String], user: User, labels: List[Label],
assignee: Option[User], milestone: Option[Milestone], created_at: Date, updated_at: Date, closed_at: Date) {
override def toString = s"Issue #$number"
}
case class CommitInfo(id: Option[String], message: String, timestamp: Date, author: Author, committer: Author)
// added: Option[List[String]], removed: Option[List[String]], modified: Option[List[String]]
case class Commit(sha: String, commit: CommitInfo, url: Option[String] = None)
trait HasState {
def state: String
def success = state == SUCCESS
def pending = state == PENDING
def failure = state == FAILURE
}
case class CombiCommitStatus(state: String, sha: String, statuses: List[CommitStatus], total_count: Int) extends HasState {
lazy val byContext = statuses.groupBy(_.context).toMap
def apply(context: String) = byContext.get(Some(context))
}
trait HasContext {
def context: Option[String]
def combined = context == Some(COMBINED)
}
// TODO: factory method that caps state to 140 chars
case class CommitStatus(state: String, context: Option[String] = None, description: Option[String] = None, target_url: Option[String] = None) extends HasState with HasContext {
def forJob(job: String, baseRef: BaseRef)(implicit lense: JobContextLense): Boolean = lense.contextForJob(job, baseRef) == context
def jobName(baseRef: BaseRef)(implicit lense: JobContextLense): Option[String] = context.flatMap(lense.jobForContext(_, baseRef))
}
case class IssueComment(body: String, user: Option[User] = None, created_at: Date = None, updated_at: Date = None, id: Option[Long] = None) extends PRMessage
case class PullRequestComment(body: String, user: Option[User] = None, commit_id: Option[String] = None, path: Option[String] = None, position: Option[Int] = None,
created_at: Date = None, updated_at: Date = None, id: Option[Long] = None) extends PRMessage
// diff_hunk, original_position, original_commit_id
case class PullRequestEvent(action: String, number: Int, pull_request: PullRequest) extends ProjectMessage with PRMessage
case class PushEvent(ref: String, commits: List[CommitInfo], repository: Repository) extends ProjectMessage
// TODO: https://github.com/scala/scabot/issues/46 --> ref_name is also included, but it comes at the end of the JSON payload, and for some reason we don't see it (chunking?)
// ref_name: String, before: String, after: String, created: Boolean, deleted: Boolean, forced: Boolean, base_ref: Option[String], commits: List[CommitInfo], head_commit: CommitInfo, repository: Repository, pusher: Author)
case class PullRequestReviewCommentEvent(action: String, pull_request: PullRequest, comment: PullRequestComment, repository: Repository) extends ProjectMessage
case class IssueCommentEvent(action: String, issue: Issue, comment: IssueComment, repository: Repository) extends ProjectMessage
// case class AuthApp(name: String, url: String)
// case class Authorization(token: String, app: AuthApp, note: Option[String])
}
import spray.http.BasicHttpCredentials
import spray.json.{RootJsonFormat, DefaultJsonProtocol}
// TODO: can we make this more debuggable?
// TODO: test against https://github.com/github/developer.github.com/tree/master/lib/webhooks
trait GithubJsonProtocol extends GithubApiTypes with DefaultJsonProtocol with core.Configuration {
private type RJF[x] = RootJsonFormat[x]
implicit lazy val _fmtUser : RJF[User] = jsonFormat1(User)
implicit lazy val _fmtAuthor : RJF[Author] = jsonFormat2(Author)
implicit lazy val _fmtRepository : RJF[Repository] = jsonFormat6(Repository)
implicit lazy val _fmtGitRef : RJF[GitRef] = jsonFormat5(GitRef)
implicit lazy val _fmtPullRequest : RJF[PullRequest] = jsonFormat14(PullRequest)
implicit lazy val _fmtReviewers : RJF[Reviewers] = jsonFormat1(Reviewers)
implicit lazy val _fmtLabel : RJF[Label] = jsonFormat3(Label)
implicit lazy val _fmtMilestone : RJF[Milestone] = jsonFormat9(Milestone.apply)
implicit lazy val _fmtIssue : RJF[Issue] = jsonFormat11(Issue)
implicit lazy val _fmtCommitInfo : RJF[CommitInfo] = jsonFormat5(CommitInfo)
implicit lazy val _fmtCommit : RJF[Commit] = jsonFormat3(Commit)
implicit lazy val _fmtCommitStatus : RJF[CommitStatus] = jsonFormat4(CommitStatus.apply)
implicit lazy val _fmtCombiCommitStatus: RJF[CombiCommitStatus] = jsonFormat(CombiCommitStatus, "state", "sha", "statuses", "total_count") // need to specify field names because we added methods to the case class..
implicit lazy val _fmtIssueComment : RJF[IssueComment] = jsonFormat5(IssueComment)
implicit lazy val _fmtPullRequestComment: RJF[PullRequestComment] = jsonFormat8(PullRequestComment)
implicit lazy val _fmtPullRequestEvent : RJF[PullRequestEvent] = jsonFormat3(PullRequestEvent)
implicit lazy val _fmtPushEvent : RJF[PushEvent] = jsonFormat3(PushEvent)
implicit lazy val _fmtPRCommentEvent : RJF[PullRequestReviewCommentEvent] = jsonFormat4(PullRequestReviewCommentEvent)
implicit lazy val _fmtIssueCommentEvent: RJF[IssueCommentEvent] = jsonFormat4(IssueCommentEvent)
// implicit lazy val _fmtAuthorization : RJF[Authorization] = jsonFormat3(Authorization)
// implicit lazy val _fmtAuthApp : RJF[AuthApp] = jsonFormat2(AuthApp)
}
trait GithubApiActions extends GithubJsonProtocol with core.HttpClient {
class GithubConnection(config: Config.Github) {
import spray.http.{GenericHttpCredentials, Uri}
import spray.httpx.SprayJsonSupport._
import spray.client.pipelining._
// NOTE: the token (https://github.com/settings/applications#personal-access-tokens)
// must belong to a collaborator of the repo (https://github.com/$user/$repo/settings/collaboration)
// or we can't set commit statuses
private implicit def connection = setupConnection(config.host, Some(new BasicHttpCredentials(config.token, "x-oauth-basic"))) // https://developer.github.com/v3/auth/#basic-authentication
// addHeader("X-My-Special-Header", "fancy-value")
// "Accept" -> "application/vnd.github.v3+json"
def api(rest: String) = Uri(s"/repos/${config.user}/${config.repo}" / rest)
import spray.json._
def pullRequests = p[List[PullRequest]] (Get(api("pulls")))
def closedPullRequests = p[List[PullRequest]] (Get(api("pulls") withQuery Map("state" -> "closed")))
def pullRequest(nb: Int) = p[PullRequest] (Get(api("pulls" / nb)))
def pullRequestCommits(nb: Int) = p[List[Commit]] (Get(api("pulls" / nb / "commits")
withQuery Map("per_page" -> "100")))
def deletePRComment(id: String) = px (Delete(api("pulls" / "comments" / id)))
def requestReview(nb: Int, reviewers: Reviewers) = px (Post(api("pulls" / nb / "requested_reviewers"), reviewers)~>
/** https://developer.github.com/changes/2016-12-14-reviews-api/ */ addHeader("Accept", "application/vnd.github.black-cat-preview+json"))
def issueComments(nb: Int) = p[List[IssueComment]](Get(api("issues" / nb / "comments")))
def postIssueComment(nb: Int, c: IssueComment) = p[IssueComment] (Post(api("issues" / nb / "comments"), c))
def issue(nb: Int) = p[Issue] (Get(api("issues" / nb)))
def setMilestone(nb: Int, milestone: Int) = px (Patch(api("issues" / nb), JsObject("milestone" -> JsNumber(milestone))))
def addLabel(nb: Int, labels: List[Label]) = p[Label] (Post(api("issues" / nb / "labels"), labels))
def deleteLabel(nb: Int, label: String) = px (Delete(api("issues" / nb / "labels" / label)))
def labels(nb: Int) = p[List[Label]] (Get(api("issues" / nb / "labels")))
// most recent status comes first in the resulting list!
def commitStatus(sha: String) = p[CombiCommitStatus] (Get(api("commits" / sha / "status")))
def postStatus(sha: String, status: CommitStatus) = p[CommitStatus] (Post(api("statuses" / sha), status))
def allLabels = p[List[Label]] (Get(api("labels")))
def createLabel(label: Label) = p[List[Label]] (Post(api("labels"), label))
def postCommitComment(sha: String, c: PullRequestComment)= p[PullRequestComment] (Post(api("commits" / sha / "comments"), c))
def commitComments(sha: String) = p[List[PullRequestComment]](Get(api("commits" / sha / "comments")))
def deleteCommitComment(id: String): Unit = px (Delete(api("comments" / id)))
def repoMilestones(state: String = "open") = p[List[Milestone]] (Get(api("milestones") withQuery Map("state" -> state)))
// def editPRComment(user: String, repo: String, id: String, comment: IssueComment) = patch[IssueComment](pulls + "/comments/$id")
// // Normalize sha if it's not 40 chars
// // GET /repos/:owner/:repo/commits/:sha
// def normalizeSha(user: String, repo: String, sha: String): String =
// if (sha.length == 40) sha
// else try {
// val url = makeAPIurl(s"/repos/$user/$repo/commits/$sha")
// val action = url >- (x => parseJsonTo[PRCommit](x).sha)
// Http(action)
// } catch {
// case e: Exception =>
// println(s"Error: couldn't normalize $sha (for $user/$repo): "+ e)
// sha
// }
}
}
//object CommitStatus {
// final val PENDING = "pending"
// final val SUCCESS = "success"
// final val ERROR = "error"
// final val FAILURE = "failure"
//
// // to distinguish PENDING jobs that are done but waiting on other PENDING jobs from truly pending jobs
// // the message of other PENDING jobs should never start with "$job OK"
// final val FAKE_PENDING = "OK"
//
// // TODO: assert(!name.contains(" ")) for all job* methods below
// def jobQueued(name: String) = CommitStatus(PENDING, None, Some(name +" queued."))
// def jobStarted(name: String, url: String) = CommitStatus(PENDING, Some(url), Some(name +" started."))
// // assert(!message.startsWith(FAKE_PENDING))
// def jobEnded(name: String, url: String, ok: Boolean, message: String) =
// CommitStatus(if(ok) SUCCESS else ERROR, Some(url), Some((name +" "+ message).take(140)))
//
// // only used for last commit
// def jobEndedBut(name: String, url: String, message: String)(prev: String) =
// CommitStatus(PENDING, Some(url), Some((name +" "+ FAKE_PENDING +" but waiting for "+ prev).take(140)))
//
// // depends on the invariant maintained by overruleSuccess so that we only have to look at the most recent status
// def jobDoneOk(cs: List[CommitStatus]) = cs.headOption.map(st => st.success || st.fakePending).getOrElse(false)
//
//
// /** Find commit status that's either truly pending (not fake pending) or that found an error,
// * and for which there's no corresponding successful commit status
// */
// def notDoneOk(commitStati: List[CommitStatus]): Iterable[CommitStatus] = {
// val grouped = commitStati.groupBy(_.job)
// val problems = grouped.flatMap {
// case (Some(jobName), jobAndCommitStati) if !jobAndCommitStati.exists(_.success) =>
// jobAndCommitStati.filter(cs => (cs.pending && !cs.fakePending) || cs.error)
// case _ =>
// Nil
// }
// // println("notDoneOk grouped: "+ grouped.mkString("\\n"))
// // println("problems: "+ problems)
// problems
// }
//}
//// note: it looks like the buildbot github user needs administrative permission to create labels,
//// but also to set the commit status
//object Authenticate {
//
// private[this] val authorizations = :/("api.github.com").secure / "authorizations" <:< Map("User-Agent" -> USER_AGENT)
//
// val authScopes = """{
// "scopes": [
// "user",
// "repo",
// "repo:status"
// ],
// "note": "scabot API Access"
//}"""
//
// /** This method looks for a previous GH authorization for this API and retrieves it, or
// * creates a new one.
// */
// def authenticate(user: String, pw: String): Authorization = {
// val previousAuth: Option[Authorization] =
// (getAuthentications(user,pw) filter (_.note == Some("scabot API Access"))).headOption
// previousAuth getOrElse makeAuthentication(user, pw)
// }
//
//
// def makeAuthentication(user: String, pw: String): Authorization =
// Http(authorizations.POST.as_!(user, pw) << authScopes >- parseJsonTo[Authorization])
//
// def getAuthentications(user: String, pw: String): List[Authorization] =
// Http(authorizations.as_!(user, pw) >- parseJsonTo[List[Authorization]])
//
// def deleteAuthentication(auth: Authorization, user: String, pw: String): Unit =
// Http( (authorizations / auth.id).DELETE.as_!(user,pw) >|)
//
// def deleteAuthentications(user: String, pw: String): Unit =
// getAuthentications(user, pw) foreach { a =>
// deleteAuthentication(a, user, pw)
// }
//}
//
//case class PullRequest(
// number: Int,
// head: GitRef,
// base: GitRef,
// user: User,
// title: String,
// body: String,
// state: String,
// updated_at: String,
// created_at: String,
// mergeable: Option[Boolean],
// milestone: Option[Milestone] // when treating an issue as a pull
// ) extends Ordered[PullRequest] {
// def compare(other: PullRequest): Int = number compare other.number
// def sha10 = head.sha10
// def ref = head.ref
// def branch = head.label.replace(':', '/')
// def date = updated_at takeWhile (_ != 'T')
// def time = updated_at drop (date.length + 1)
//
// override def toString = s"${base.repo.owner.login}/${base.repo.name}#$number"
//}
//
//// {
//// "url": "https://api.github.com/repos/octocat/Hello-World/milestones/1",
//// "number": 1,
//// "state": "open",
//// "title": "v1.0",
//// "description": "",
//// "creator": {
//// "login": "octocat",
//// "id": 1,
//// "avatar_url": "https://github.com/images/error/octocat_happy.gif",
//// "gravatar_id": "somehexcode",
//// "url": "https://api.github.com/users/octocat"
//// },
//// "open_issues": 4,
//// "closed_issues": 8,
//// "created_at": "2011-04-10T20:09:31Z",
//// "due_on": null
//// }
////]
| scala/scabot | github/src/main/scala/scabot/github/GithubApi.scala | Scala | apache-2.0 | 18,944 |
package scala.tools.scalap
package scalax
package rules
package scalasig
sealed trait Type
trait FunctionType extends Type {
def resultType: Type
def paramSymbols: Seq[Symbol]
}
case object NoType extends Type
case object NoPrefixType extends Type
case class ThisType(symbol : Symbol) extends Type
case class SuperType(typerRef: Type, superTypeRef: Type) extends Type
case class SingleType(typeRef : Type, symbol : Symbol) extends Type
case class ConstantType(constant : Any) extends Type
case class TypeRefType(prefix : Type, symbol : Symbol, typeArgs : Seq[Type]) extends Type
case class TypeBoundsType(lower : Type, upper : Type) extends Type
case class RefinedType(classSym : Symbol, typeRefs : List[Type]) extends Type
case class ClassInfoType(symbol : Symbol, typeRefs : Seq[Type]) extends Type
case class ClassInfoTypeWithCons(symbol : Symbol, typeRefs : Seq[Type], cons: String) extends Type
case class MethodType(resultType : Type, paramSymbols : Seq[Symbol]) extends FunctionType
case class NullaryMethodType(resultType : Type) extends Type
case class PolyType(typeRef : Type, symbols : Seq[TypeSymbol]) extends Type
case class PolyTypeWithCons(typeRef : Type, symbols : Seq[TypeSymbol], cons: String) extends Type
case class ImplicitMethodType(resultType : Type, paramSymbols : Seq[Symbol]) extends FunctionType
case class AnnotatedType(typeRef : Type, attribTreeRefs : List[Int]) extends Type
case class AnnotatedWithSelfType(typeRef : Type, symbol : Symbol, attribTreeRefs : List[Int]) extends Type
case class DeBruijnIndexType(typeLevel : Int, typeIndex : Int) extends Type
case class ExistentialType(typeRef : Type, symbols : Seq[Symbol]) extends Type
| LPTK/intellij-scala | scalap/src/scalap/scalax/rules/scalasig/Type.scala | Scala | apache-2.0 | 1,675 |
package us.stivers.blue.route
import scalax.util.{Try,Success,Failure}
import us.stivers.blue.http.{Request}
/**
* Finds the best match route for a given request.
*/
trait Router extends (Request=>Try[Route]) {
def routes: Iterable[Route]
}
| cstivers78/blue | blue-core/src/main/scala/us/stivers/blue/route/Router.scala | Scala | apache-2.0 | 247 |
package blended.itestsupport.condition
import akka.actor.Actor
import blended.util.logging.Logger
import scala.concurrent.{ExecutionContext, Future}
/**
* An Actor to be used by [[AsyncCondition]].
*/
abstract class AsyncChecker extends Actor {
import AsyncChecker._
protected implicit val ctxt : ExecutionContext= context.system.dispatcher
private[this] val log : Logger = Logger[AsyncChecker]
case object Tick
case object Stop
def performCheck(condition: AsyncCondition): Future[Boolean]
def receive: Receive = initializing
def initializing: Receive = {
case CheckAsyncCondition(condition) =>
log.debug("Starting asynchronous condition checker")
self ! Tick
context.become(checking(condition))
}
def checking(condition: AsyncCondition): Receive = {
case Tick =>
log.debug(s"Checking asynchronous [${condition.description}] condition ....")
performCheck(condition).map{
case true =>
log.debug(s"Asynchronous condition [${condition.description}] is now satisfied.")
condition.isSatisfied.set(true)
context.stop(self)
case false =>
log.debug(s"Scheduling next condition check in [${condition.interval}]")
context.system.scheduler.scheduleOnce(condition.interval, self, Tick)
}
}
}
object AsyncChecker {
/**
* Use this object to kick off an Asynchronous checker.
*/
case class CheckAsyncCondition(condition: AsyncCondition)
} | woq-blended/blended | blended.itestsupport/src/main/scala/blended/itestsupport/condition/AsyncChecker.scala | Scala | apache-2.0 | 1,480 |
package no.vestein.webapp.eventhandler
abstract class Event
| WoodStone/PurpleRain-ScalaJS | src/main/scala/no/vestein/webapp/eventhandler/Event.scala | Scala | gpl-3.0 | 61 |
package wakfutcp.protocol.common
import enumeratum.values.{IntEnum, IntEnumEntry}
import wakfutcp.protocol.Codec
import scala.collection.immutable
sealed abstract class Community(val value: Int) extends IntEnumEntry with Serializable
case object Community extends IntEnum[Community] {
import Codec._
import cats.syntax.invariant._
implicit val codec: Codec[Community] =
int.imap(withValue)(_.value)
case object FR extends Community(0)
case object UK extends Community(1)
case object INT extends Community(2)
case object DE extends Community(3)
case object ES extends Community(4)
case object RU extends Community(5)
case object PT extends Community(6)
case object NL extends Community(7)
case object JP extends Community(8)
case object IT extends Community(9)
case object NA extends Community(10)
case object CN extends Community(11)
case object ASIA extends Community(12)
case object TW extends Community(13)
def values: immutable.IndexedSeq[Community] = findValues
}
| OpenWakfu/wakfutcp | protocol/src/main/scala/wakfutcp/protocol/common/Community.scala | Scala | mit | 1,017 |
package com.vivint.ceph
package model
import java.util.UUID
case class PersistentState(
id: UUID,
cluster: String,
role: JobRole.EnumVal,
goal: Option[RunState.EnumVal] = None,
lastLaunched: Option[RunState.EnumVal] = None,
reservationConfirmed: Boolean = false,
reservationId: Option[UUID] = None,
slaveId: Option[String] = None,
taskId: Option[String] = None,
location: Location = Location.empty) {
def ipLocation: Option[IPLocationLike] = location match {
case i: IPLocationLike => Some(i)
case _ => None
}
def serviceLocation: Option[ServiceLocation] = location match {
case s: ServiceLocation => Some(s)
case _ => None
}
if (reservationConfirmed) {
require(slaveId.nonEmpty, "slaveId must be set if reservationConfirmed is set")
require(reservationId.nonEmpty, "reservationId must be set if reservationConfirmed is set")
}
def resourcesReserved =
slaveId.nonEmpty
}
| vivint-smarthome/ceph-on-mesos | src/main/scala/com/vivint/ceph/model/PersistentState.scala | Scala | apache-2.0 | 939 |
package scalding
import sbt._
import Keys._
import sbtassembly.Plugin._
import AssemblyKeys._
import com.typesafe.tools.mima.plugin.MimaPlugin.mimaDefaultSettings
import com.typesafe.tools.mima.plugin.MimaKeys._
import scala.collection.JavaConverters._
object ScaldingBuild extends Build {
val sharedSettings = Project.defaultSettings ++ assemblySettings ++ Seq(
organization := "com.twitter",
//TODO: Change to 2.10.* when Twitter moves to Scala 2.10 internally
scalaVersion := "2.9.3",
crossScalaVersions := Seq("2.9.3", "2.10.3"),
javacOptions ++= Seq("-source", "1.6", "-target", "1.6"),
javacOptions in doc := Seq("-source", "1.6"),
libraryDependencies ++= Seq(
"org.scalacheck" %% "scalacheck" % "1.10.0" % "test",
"org.scala-tools.testing" %% "specs" % "1.6.9" % "test",
"org.mockito" % "mockito-all" % "1.8.5" % "test"
),
resolvers ++= Seq(
"snapshots" at "http://oss.sonatype.org/content/repositories/snapshots",
"releases" at "http://oss.sonatype.org/content/repositories/releases",
"Concurrent Maven Repo" at "http://conjars.org/repo",
"Clojars Repository" at "http://clojars.org/repo",
"Twitter Maven" at "http://maven.twttr.com"
),
parallelExecution in Test := false,
scalacOptions ++= Seq("-unchecked", "-deprecation"),
// Uncomment if you don't want to run all the tests before building assembly
// test in assembly := {},
// Publishing options:
publishMavenStyle := true,
publishArtifact in Test := false,
pomIncludeRepository := {
x => false
},
publishTo <<= version { v =>
Some(
if (v.trim.endsWith("SNAPSHOT"))
Opts.resolver.sonatypeSnapshots
else
Opts.resolver.sonatypeStaging
//"twttr" at "http://artifactory.local.twitter.com/libs-releases-local"
)
},
// Janino includes a broken signature, and is not needed:
excludedJars in assembly <<= (fullClasspath in assembly) map {
cp =>
val excludes = Set("jsp-api-2.1-6.1.14.jar", "jsp-2.1-6.1.14.jar",
"jasper-compiler-5.5.12.jar", "janino-2.5.16.jar")
cp filter {
jar => excludes(jar.data.getName)
}
},
// Some of these files have duplicates, let's ignore:
mergeStrategy in assembly <<= (mergeStrategy in assembly) {
(old) => {
case s if s.endsWith(".class") => MergeStrategy.last
case s if s.endsWith("project.clj") => MergeStrategy.concat
case s if s.endsWith(".html") => MergeStrategy.last
case s if s.endsWith(".dtd") => MergeStrategy.last
case s if s.endsWith(".xsd") => MergeStrategy.last
case s if s.endsWith(".jnilib") => MergeStrategy.rename
case s if s.endsWith("jansi.dll") => MergeStrategy.rename
case x => old(x)
}
},
pomExtra := (
<url>https://github.com/twitter/scalding</url>
<licenses>
<license>
<name>Apache 2</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
<comments>A business-friendly OSS license</comments>
</license>
</licenses>
<scm>
<url>git@github.com:twitter/scalding.git</url>
<connection>scm:git:git@github.com:twitter/scalding.git</connection>
</scm>
<developers>
<developer>
<id>posco</id>
<name>Oscar Boykin</name>
<url>http://twitter.com/posco</url>
</developer>
<developer>
<id>avibryant</id>
<name>Avi Bryant</name>
<url>http://twitter.com/avibryant</url>
</developer>
<developer>
<id>argyris</id>
<name>Argyris Zymnis</name>
<url>http://twitter.com/argyris</url>
</developer>
</developers>)
) ++ mimaDefaultSettings
lazy val scalding = Project(
id = "scalding",
base = file("."),
settings = sharedSettings ++ DocGen.publishSettings
).settings(
test := {},
publish := {}, // skip publishing for this root project.
publishLocal := {}
).aggregate(
scaldingArgs,
scaldingDate,
scaldingCore,
scaldingCommons,
scaldingAvro,
scaldingParquet,
scaldingRepl,
scaldingJson,
scaldingJdbc,
maple
)
/**
* This returns the youngest jar we released that is compatible with
* the current.
*/
val unreleasedModules = Set[String]()
def youngestForwardCompatible(subProj: String) =
Some(subProj)
.filterNot(unreleasedModules.contains(_))
.map {
s => "com.twitter" % ("scalding-" + s + "_2.9.2") % "0.8.5"
}
def module(name: String) = {
val id = "scalding-%s".format(name)
Project(id = id, base = file(id), settings = sharedSettings ++ Seq(
Keys.name := id,
previousArtifact := youngestForwardCompatible(name))
)
}
lazy val scaldingArgs = module("args")
lazy val scaldingDate = module("date")
lazy val cascadingVersion =
System.getenv.asScala.getOrElse("SCALDING_CASCADING_VERSION", "2.5.2")
lazy val cascadingJDBCVersion =
System.getenv.asScala.getOrElse("SCALDING_CASCADING_JDBC_VERSION", "2.5.1")
val hadoopVersion = "1.1.2"
val algebirdVersion = "0.5.0"
val bijectionVersion = "0.6.2"
val chillVersion = "0.3.6"
val slf4jVersion = "1.6.6"
lazy val scaldingCore = module("core").settings(
libraryDependencies ++= Seq(
"cascading" % "cascading-core" % cascadingVersion,
"cascading" % "cascading-local" % cascadingVersion,
"cascading" % "cascading-hadoop" % cascadingVersion,
"com.twitter" %% "chill" % chillVersion,
"com.twitter" % "chill-hadoop" % chillVersion,
"com.twitter" % "chill-java" % chillVersion,
"com.twitter" %% "bijection-core" % bijectionVersion,
"com.twitter" %% "algebird-core" % algebirdVersion,
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided",
"org.slf4j" % "slf4j-api" % slf4jVersion,
"org.slf4j" % "slf4j-log4j12" % slf4jVersion % "provided"
)
).dependsOn(scaldingArgs, scaldingDate, maple)
lazy val scaldingCommons = Project(
id = "scalding-commons",
base = file("scalding-commons"),
settings = sharedSettings
).settings(
name := "scalding-commons",
previousArtifact := Some("com.twitter" % "scalding-commons_2.9.2" % "0.2.0"),
libraryDependencies ++= Seq(
"com.backtype" % "dfs-datastores-cascading" % "1.3.4",
"com.backtype" % "dfs-datastores" % "1.3.4",
// TODO: split into scalding-protobuf
"com.google.protobuf" % "protobuf-java" % "2.4.1",
"com.twitter" %% "bijection-core" % bijectionVersion,
"com.twitter" %% "algebird-core" % algebirdVersion,
"com.twitter" %% "chill" % chillVersion,
"com.twitter.elephantbird" % "elephant-bird-cascading2" % "4.4",
"com.hadoop.gplcompression" % "hadoop-lzo" % "0.4.16",
// TODO: split this out into scalding-thrift
"org.apache.thrift" % "libthrift" % "0.5.0",
"org.slf4j" % "slf4j-api" % slf4jVersion,
"org.slf4j" % "slf4j-log4j12" % slf4jVersion % "provided",
"org.scalacheck" %% "scalacheck" % "1.10.0" % "test",
"org.scala-tools.testing" %% "specs" % "1.6.9" % "test"
)
).dependsOn(scaldingArgs, scaldingDate, scaldingCore)
lazy val scaldingAvro = Project(
id = "scalding-avro",
base = file("scalding-avro"),
settings = sharedSettings
).settings(
name := "scalding-avro",
previousArtifact := Some("com.twitter" % "scalding-avro_2.9.2" % "0.1.0"),
libraryDependencies ++= Seq(
"cascading.avro" % "avro-scheme" % "2.1.2",
"org.apache.avro" % "avro" % "1.7.4",
"org.slf4j" % "slf4j-api" % slf4jVersion,
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided",
"org.slf4j" % "slf4j-log4j12" % slf4jVersion % "test",
"org.scalacheck" %% "scalacheck" % "1.10.0" % "test",
"org.scala-tools.testing" %% "specs" % "1.6.9" % "test"
)
).dependsOn(scaldingCore)
lazy val scaldingParquet = Project(
id = "scalding-parquet",
base = file("scalding-parquet"),
settings = sharedSettings
).settings(
name := "scalding-parquet",
//previousArtifact := Some("com.twitter" % "scalding-parquet_2.9.2" % "0.1.0"),
previousArtifact := None,
libraryDependencies ++= Seq(
"com.twitter" % "parquet-cascading" % "1.3.2",
"org.slf4j" % "slf4j-api" % slf4jVersion,
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided",
"org.slf4j" % "slf4j-log4j12" % slf4jVersion % "test",
"org.scalacheck" %% "scalacheck" % "1.10.0" % "test",
"org.scala-tools.testing" %% "specs" % "1.6.9" % "test"
)
).dependsOn(scaldingCore)
lazy val scaldingRepl = Project(
id = "scalding-repl",
base = file("scalding-repl"),
settings = sharedSettings
).settings(
name := "scalding-repl",
previousArtifact := None,
libraryDependencies <++= (scalaVersion) { scalaVersion => Seq(
"org.scala-lang" % "jline" % scalaVersion,
"org.scala-lang" % "scala-compiler" % scalaVersion,
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided"
)
}
).dependsOn(scaldingCore)
lazy val scaldingJson = Project(
id = "scalding-json",
base = file("scalding-json"),
settings = sharedSettings
).settings(
name := "scalding-json",
previousArtifact := None,
libraryDependencies <++= (scalaVersion) { scalaVersion => Seq(
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided",
"com.fasterxml.jackson.module" %% "jackson-module-scala" % "2.2.3"
)
}
).dependsOn(scaldingCore)
lazy val scaldingJdbc = Project(
id = "scalding-jdbc",
base = file("scalding-jdbc"),
settings = sharedSettings
).settings(
name := "scalding-jdbc",
previousArtifact := None,
libraryDependencies <++= (scalaVersion) { scalaVersion => Seq(
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided",
"cascading" % "cascading-jdbc-core" % cascadingJDBCVersion
)
}
).dependsOn(scaldingCore)
lazy val maple = Project(
id = "maple",
base = file("maple"),
settings = sharedSettings
).settings(
name := "maple",
previousArtifact := None,
crossPaths := false,
autoScalaLibrary := false,
libraryDependencies <++= (scalaVersion) { scalaVersion => Seq(
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided",
"org.apache.hbase" % "hbase" % "0.94.5" % "provided",
"cascading" % "cascading-hadoop" % cascadingVersion
)
}
)
}
| danosipov/scalding | project/Build.scala | Scala | apache-2.0 | 10,715 |
package models
import play.api.libs.json.Json
import reactivemongo.api.indexes.{IndexType, Index}
import reactivemongo.bson.BSONObjectID
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.Random
case class Token(userId: String, id: String)
object Token extends MongoModel("tokens") {
val TOKEN_ID_SIZE = 64
private def generatedId = Random.alphanumeric.take(TOKEN_ID_SIZE).mkString
def newTokenForUser(userId: String) = {
val token = new Token(userId, Token.generatedId)
val futureToken = create(token)
token
}
def create(token: Token) = collection.insert(token)
def findById(id: String) = collection.find(Json.obj("_id" -> id)).cursor[Token].collect[List]()
} | jdauphant/play_api_example | app/models/Token.scala | Scala | isc | 714 |
package s3.website.model
import java.io.File
import s3.website.model.Files.recursiveListFiles
// ssg = static site generator
trait Ssg {
def outputDirectory: String
}
object Ssg {
val automaticallySupportedSiteGenerators = Jekyll :: Nanoc :: Middleman :: Nil
def autodetectSiteDir(workingDirectory: File): Option[File] =
recursiveListFiles(workingDirectory).find { file =>
file.isDirectory && automaticallySupportedSiteGenerators.exists(ssg => file.getAbsolutePath.endsWith(ssg.outputDirectory))
}
}
case object Jekyll extends Ssg {
def outputDirectory = "_site"
}
case object Nanoc extends Ssg {
def outputDirectory = s"public${File.separatorChar}output"
}
case object Middleman extends Ssg {
def outputDirectory = "build"
}
| naytev/naytev-blog | vendor/bundle/ruby/2.3.0/gems/s3_website-2.14.0/src/main/scala/s3/website/model/ssg.scala | Scala | mit | 759 |
package io.toolsplus.atlassian.connect.play.actions.asymmetric
import io.toolsplus.atlassian.connect.play.TestSpec
import io.toolsplus.atlassian.connect.play.actions.{JwtActionRefiner, JwtRequest}
import io.toolsplus.atlassian.connect.play.api.models.DefaultAtlassianHostUser
import io.toolsplus.atlassian.connect.play.api.models.Predefined.ClientKey
import io.toolsplus.atlassian.connect.play.api.repositories.AtlassianHostRepository
import io.toolsplus.atlassian.connect.play.auth.jwt
import io.toolsplus.atlassian.connect.play.auth.jwt.asymmetric.{AsymmetricJwtAuthenticationProvider, PublicKeyProvider}
import io.toolsplus.atlassian.connect.play.auth.jwt.symmetric.SymmetricJwtAuthenticationProvider
import io.toolsplus.atlassian.connect.play.auth.jwt.{CanonicalHttpRequestQshProvider, ContextQshProvider, JwtCredentials}
import io.toolsplus.atlassian.connect.play.models.PlayAddonProperties
import io.toolsplus.atlassian.jwt.api.Predef.RawJwt
import io.toolsplus.atlassian.jwt.generators.util.JwtTestHelper
import org.scalacheck.Gen.alphaStr
import org.scalacheck.Shrink
import org.scalatest.EitherValues
import org.scalatestplus.play.guice.GuiceOneAppPerSuite
import play.api.Configuration
import play.api.http.Status.UNAUTHORIZED
import play.api.mvc.BodyParsers
import play.api.test.Helpers.{contentAsString, status}
import java.security.interfaces.RSAPublicKey
import java.security.{KeyPair, PrivateKey}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class AsymmetricallySignedAtlassianHostUserActionSpec
extends TestSpec
with GuiceOneAppPerSuite
with EitherValues {
val config: Configuration = app.configuration
val appProperties = new PlayAddonProperties(config)
val parser: BodyParsers.Default = app.injector.instanceOf[BodyParsers.Default]
val hostRepository: AtlassianHostRepository = mock[AtlassianHostRepository]
val symmetricJwtAuthenticationProvider =
new SymmetricJwtAuthenticationProvider(hostRepository)
val keyId: String = "0e50fccb-239d-4991-a5db-dc850ba3f236"
val keyPair: KeyPair = JwtTestHelper.generateKeyPair()
val publicKey: RSAPublicKey = keyPair.getPublic.asInstanceOf[RSAPublicKey]
val privateKey: PrivateKey = keyPair.getPrivate
val publicKeyProvider: PublicKeyProvider = mock[PublicKeyProvider]
val asymmetricJwtAuthenticationProvider =
new AsymmetricJwtAuthenticationProvider(appProperties,
publicKeyProvider,
hostRepository)
val jwtActionRefiner = new JwtActionRefiner()
"AsymmetricallySignedAtlassianHostUserActionRefiner" when {
"refining an asymmetrically signed JwtRequest with a context QSH claim" should {
implicit val rawJwtNoShrink: Shrink[RawJwt] = Shrink.shrinkAny
val refiner =
AsymmetricallySignedAtlassianHostUserActionRefiner(asymmetricJwtAuthenticationProvider,
ContextQshProvider)
"successfully refine to MaybeAtlassianHostUserRequest if host is installed" in {
forAll(playRequestGen, atlassianHostGen, alphaStr) {
(request, host, subject) =>
val canonicalHttpRequest = jwt.CanonicalPlayHttpRequest(request)
forAll(
signedAsymmetricJwtStringGen(
keyId,
privateKey,
Seq("iss" -> host.clientKey,
"sub" -> subject,
"qsh" -> ContextQshProvider.qsh,
"aud" -> appProperties.baseUrl))) { jwt =>
val jwtRequest =
JwtRequest(JwtCredentials(jwt, canonicalHttpRequest), request)
val hostUser =
DefaultAtlassianHostUser(host, Option(subject))
(publicKeyProvider
.fetchPublicKey(_: String)) expects keyId returning Future
.successful(Right(JwtTestHelper.toPemString(publicKey)))
(hostRepository
.findByClientKey(_: ClientKey)) expects host.clientKey returning Future
.successful(Some(host))
val result = await {
refiner.refine(jwtRequest)
}
result mustBe Right(
MaybeAtlassianHostUserRequest(Some(hostUser), jwtRequest))
}
}
}
"successfully refine to MaybeAtlassianHostUserRequest if host is not installed" in {
forAll(playRequestGen, atlassianHostGen, alphaStr) {
(request, host, subject) =>
val canonicalHttpRequest = jwt.CanonicalPlayHttpRequest(request)
forAll(
signedAsymmetricJwtStringGen(
keyId,
privateKey,
Seq("iss" -> host.clientKey,
"sub" -> subject,
"qsh" -> ContextQshProvider.qsh,
"aud" -> appProperties.baseUrl))) { jwt =>
val jwtRequest =
JwtRequest(JwtCredentials(jwt, canonicalHttpRequest), request)
(publicKeyProvider
.fetchPublicKey(_: String)) expects keyId returning Future
.successful(Right(JwtTestHelper.toPemString(publicKey)))
(hostRepository
.findByClientKey(_: ClientKey)) expects host.clientKey returning Future
.successful(None)
val result = await {
refiner.refine(jwtRequest)
}
result mustBe Right(
MaybeAtlassianHostUserRequest(None, jwtRequest))
}
}
}
"fail to refine if QSH provider is CanonicalHttpRequestQshProvider" in {
forAll(playRequestGen, atlassianHostGen, alphaStr) {
(request, host, subject) =>
val canonicalHttpRequest = jwt.CanonicalPlayHttpRequest(request)
forAll(
signedAsymmetricJwtStringGen(
keyId,
privateKey,
Seq("iss" -> host.clientKey,
"sub" -> subject,
"qsh" -> ContextQshProvider.qsh,
"aud" -> appProperties.baseUrl))) { jwt =>
val jwtRequest =
JwtRequest(JwtCredentials(jwt, canonicalHttpRequest), request)
(publicKeyProvider
.fetchPublicKey(_: String)) expects keyId returning Future
.successful(Right(JwtTestHelper.toPemString(publicKey)))
val result =
AsymmetricallySignedAtlassianHostUserActionRefiner(
asymmetricJwtAuthenticationProvider,
CanonicalHttpRequestQshProvider)
.refine(jwtRequest)
status(result.map(_.left.value)) mustBe UNAUTHORIZED
contentAsString(result.map(_.left.value)) startsWith "JWT validation failed"
}
}
}
}
"refining an asymmetrically signed JwtRequest with a HTTP request QSH claim" should {
implicit val rawJwtNoShrink: Shrink[RawJwt] = Shrink.shrinkAny
val refiner =
AsymmetricallySignedAtlassianHostUserActionRefiner(asymmetricJwtAuthenticationProvider,
CanonicalHttpRequestQshProvider)
"successfully refine to MaybeAtlassianHostUserRequest if host is installed" in {
forAll(playRequestGen, atlassianHostGen, alphaStr) {
(request, host, subject) =>
val canonicalHttpRequest = jwt.CanonicalPlayHttpRequest(request)
forAll(
signedAsymmetricJwtStringGen(
keyId,
privateKey,
Seq("iss" -> host.clientKey,
"sub" -> subject,
"qsh" -> CanonicalHttpRequestQshProvider.qsh(
canonicalHttpRequest),
"aud" -> appProperties.baseUrl)
)) { jwt =>
val jwtRequest =
JwtRequest(JwtCredentials(jwt, canonicalHttpRequest), request)
val hostUser =
DefaultAtlassianHostUser(host, Option(subject))
(publicKeyProvider
.fetchPublicKey(_: String)) expects keyId returning Future
.successful(Right(JwtTestHelper.toPemString(publicKey)))
(hostRepository
.findByClientKey(_: ClientKey)) expects host.clientKey returning Future
.successful(Some(host))
val result = await {
refiner.refine(jwtRequest)
}
result mustBe Right(
MaybeAtlassianHostUserRequest(Some(hostUser), jwtRequest))
}
}
}
"successfully refine to MaybeAtlassianHostUserRequest if host is not installed" in {
forAll(playRequestGen, atlassianHostGen, alphaStr) {
(request, host, subject) =>
val canonicalHttpRequest = jwt.CanonicalPlayHttpRequest(request)
forAll(
signedAsymmetricJwtStringGen(
keyId,
privateKey,
Seq("iss" -> host.clientKey,
"sub" -> subject,
"qsh" -> CanonicalHttpRequestQshProvider.qsh(
canonicalHttpRequest),
"aud" -> appProperties.baseUrl)
)) { jwt =>
val jwtRequest =
JwtRequest(JwtCredentials(jwt, canonicalHttpRequest), request)
(publicKeyProvider
.fetchPublicKey(_: String)) expects keyId returning Future
.successful(Right(JwtTestHelper.toPemString(publicKey)))
(hostRepository
.findByClientKey(_: ClientKey)) expects host.clientKey returning Future
.successful(None)
val result = await {
refiner.refine(jwtRequest)
}
result mustBe Right(
MaybeAtlassianHostUserRequest(None, jwtRequest))
}
}
}
"fail to refine if QSH provider is ContextQshProvider" in {
forAll(playRequestGen, atlassianHostGen, alphaStr) {
(request, host, subject) =>
val canonicalHttpRequest = jwt.CanonicalPlayHttpRequest(request)
forAll(
signedAsymmetricJwtStringGen(
keyId,
privateKey,
Seq("iss" -> host.clientKey,
"sub" -> subject,
"qsh" -> CanonicalHttpRequestQshProvider.qsh(
canonicalHttpRequest),
"aud" -> appProperties.baseUrl)
)) { jwt =>
val jwtRequest =
JwtRequest(JwtCredentials(jwt, canonicalHttpRequest), request)
(publicKeyProvider
.fetchPublicKey(_: String)) expects keyId returning Future
.successful(Right(JwtTestHelper.toPemString(publicKey)))
val result =
AsymmetricallySignedAtlassianHostUserActionRefiner(
asymmetricJwtAuthenticationProvider,
ContextQshProvider)
.refine(jwtRequest)
status(result.map(_.left.value)) mustBe UNAUTHORIZED
contentAsString(result.map(_.left.value)) startsWith "JWT validation failed"
}
}
}
}
}
}
| toolsplus/atlassian-connect-play | modules/core/test/io/toolsplus/atlassian/connect/play/actions/asymmetric/AsymmetricallySignedAtlassianHostUserActionSpec.scala | Scala | apache-2.0 | 11,180 |
package spatial.lang
package control
import argon.core._
import forge._
import spatial.metadata._
import spatial.nodes._
protected class ReduceAccum[T](accum: Option[Reg[T]], style: ControlStyle, ii: Option[Double], zero: Option[T], fold: Option[T]) {
/** 1 dimensional reduction **/
@api def apply(domain1D: Counter)(map: Index => T)(reduce: (T,T) => T)(implicit mT: Type[T], bits: Bits[T]): Reg[T] = {
val acc = accum.getOrElse(Reg[T])
Reduce.alloc(List(domain1D), acc, {x: List[Index] => map(x.head)}, reduce, style, ii, zero, fold)
acc
}
/** 2 dimensional reduction **/
@api def apply(domain1: Counter, domain2: Counter)(map: (Index,Index) => T)(reduce: (T,T) => T)(implicit mT: Type[T], bits: Bits[T]): Reg[T] = {
val acc = accum.getOrElse(Reg[T])
Reduce.alloc(List(domain1, domain2), acc, {x: List[Index] => map(x(0),x(1)) }, reduce, style, ii, zero, fold)
acc
}
/** 3 dimensional reduction **/
@api def apply(domain1: Counter, domain2: Counter, domain3: Counter)(map: (Index,Index,Index) => T)(reduce: (T,T) => T)(implicit mT: Type[T], bits: Bits[T]): Reg[T] = {
val acc = accum.getOrElse(Reg[T])
Reduce.alloc(List(domain1, domain2, domain3), acc, {x: List[Index] => map(x(0),x(1),x(2)) }, reduce, style, ii, zero, fold)
acc
}
/** N dimensional reduction **/
@api def apply(domain1: Counter, domain2: Counter, domain3: Counter, domain4: Counter, domain5plus: Counter*)(map: List[Index] => T)(reduce: (T,T) => T)(implicit mT: Type[T], bits: Bits[T]): Reg[T] = {
val acc = accum.getOrElse(Reg[T])
Reduce.alloc(List(domain1, domain2, domain3, domain4) ++ domain5plus, acc, map, reduce, style, ii, zero, fold)
acc
}
}
protected class ReduceConstant[A,T](style: ControlStyle, ii: Option[Double], a: A, isFold: Boolean) {
@api def apply(domain1D: Counter)(map: Index => T)(reduce: (T, T) => T)(implicit bT: Bits[T], lift: Lift[A, T]): Reg[T] = {
implicit val mT: Type[T] = lift.staged
val accum = Some(Reg[T](lift(a)))
val init = Some(lift(a))
val fold = if (isFold) init else None
val zero = if (!isFold) init else None
new ReduceAccum(accum, style, ii, zero, fold).apply(domain1D)(map)(reduce)
}
@api def apply(domain1: Counter, domain2: Counter)(map: (Index, Index) => T)(reduce: (T, T) => T)(implicit bT: Bits[T], lift: Lift[A, T]): Reg[T] = {
implicit val mT: Type[T] = lift.staged
val accum = Some(Reg[T](lift(a)))
val init = Some(lift(a))
val fold = if (isFold) init else None
val zero = if (!isFold) init else None
new ReduceAccum(accum, style, ii, zero, fold).apply(domain1, domain2)(map)(reduce)
}
@api def apply(domain1: Counter, domain2: Counter, domain3: Counter)(map: (Index, Index, Index) => T)(reduce: (T, T) => T)(implicit bT: Bits[T], lift: Lift[A, T]): Reg[T] = {
implicit val mT: Type[T] = lift.staged
val accum = Some(Reg[T](lift(a)))
val init = Some(lift(a))
val fold = if (isFold) init else None
val zero = if (!isFold) init else None
new ReduceAccum(accum, style, ii, zero, fold).apply(domain1, domain2, domain3)(map)(reduce)
}
@api def apply(domain1: Counter, domain2: Counter, domain3: Counter, domain4: Counter, domain5plus: Counter*)(map: List[Index] => T)(reduce: (T, T) => T)(implicit bT: Bits[T], lift: Lift[A, T]): Reg[T] = {
implicit val mT: Type[T] = lift.staged
val accum = Some(Reg[T](lift(a)))
val init = Some(lift(a))
val fold = if (isFold) init else None
val zero = if (!isFold) init else None
new ReduceAccum(accum, style, ii, zero, fold).apply(domain1, domain2, domain3, domain4, domain5plus: _*)(map)(reduce)
}
}
protected case class ReduceClass(style: ControlStyle, ii: Option[Double] = None) extends ReduceAccum(None, style, ii, None, None) {
/** Reduction with implicit accumulator **/
// TODO: Can't use ANY implicits if we want to be able to use Reduce(0)(...). Maybe a macro can help here?
def apply(zero: scala.Int) = new ReduceConstant[Int,Int32](style, ii, zero, isFold = false)
def apply(zero: scala.Long) = new ReduceConstant[Long,Int64](style, ii, zero, isFold = false)
def apply(zero: scala.Float) = new ReduceConstant[Float,Float32](style, ii, zero, isFold = false)
def apply(zero: scala.Double) = new ReduceConstant[Double,Float64](style, ii, zero, isFold = false)
//def apply(zero: FixPt[_,_,_]) = new ReduceAccum(Reg[FixPt[S,I,F]](zero), style)
//def apply(zero: FltPt[_,_]) = new ReduceAccum(Reg[FltPt[G,E]](zero), style)
/** Reduction with explicit accumulator **/
// TODO: Should initial value of accumulator be assumed to be the identity value?
def apply[T](accum: Reg[T]) = new ReduceAccum(Some(accum), style, ii, None, None)
}
protected case class FoldClass(style: ControlStyle, ii: Option[Double] = None) {
/** Fold with implicit accumulator **/
// TODO: Can't use ANY implicits if we want to be able to use Reduce(0)(...). Maybe a macro can help here?
def apply(zero: scala.Int) = new ReduceConstant[Int,Int32](style, ii, zero, isFold = true)
def apply(zero: scala.Long) = new ReduceConstant[Long,Int64](style, ii, zero, isFold = true)
def apply(zero: scala.Float) = new ReduceConstant[Float,Float32](style, ii, zero, isFold = true)
def apply(zero: scala.Double) = new ReduceConstant[Double,Float64](style, ii, zero, isFold = true)
def apply[T](accum: Reg[T]) = {
val sty = if (style == InnerPipe) MetaPipe else style
MemReduceAccum(accum, sty, ii, None, true, None)
}
}
object Fold extends FoldClass(InnerPipe)
object Reduce extends ReduceClass(InnerPipe) {
@internal def alloc[T:Type:Bits](
domain: Seq[Counter],
reg: Reg[T],
map: List[Index] => T,
reduce: (T,T) => T,
style: ControlStyle,
ii: Option[Double],
ident: Option[T],
fold: Option[T]
): Controller = {
val rV = (fresh[T], fresh[T])
val iters = List.tabulate(domain.length){_ => fresh[Index] }
val mBlk = stageSealedBlock{ map(wrap(iters)).s }
val ldBlk = stageColdLambda1(reg.s) { reg.value.s }
val rBlk = stageColdLambda2(rV._1,rV._2){ reduce(wrap(rV._1),wrap(rV._2)).s }
val stBlk = stageColdLambda2(reg.s, rBlk.result){ unwrap( reg := wrap(rBlk.result) ) }
val cchain = CounterChain(domain: _*)
val z = ident.map(_.s)
val f = fold.map(_.s)
val effects = mBlk.effects andAlso ldBlk.effects andAlso rBlk.effects andAlso stBlk.effects
val pipe = stageEffectful(OpReduce[T](Nil, cchain.s, reg.s, mBlk, ldBlk, rBlk, stBlk, z, f, rV, iters), effects)(ctx)
styleOf(pipe) = style
userIIOf(pipe) = ii
levelOf(pipe) = InnerControl // Fixed in Level Analyzer
Controller(pipe)
}
@internal def op_reduce[T:Type:Bits](
ens: Seq[Exp[Bit]],
cchain: Exp[CounterChain],
reg: Exp[Reg[T]],
map: () => Exp[T],
load: Exp[Reg[T]] => Exp[T],
reduce: (Exp[T], Exp[T]) => Exp[T],
store: (Exp[Reg[T]], Exp[T]) => Exp[MUnit],
ident: Option[Exp[T]],
fold: Option[Exp[T]],
rV: (Bound[T],Bound[T]),
iters: List[Bound[Index]]
): Sym[Controller] = {
val mBlk = stageSealedBlock{ map() }
val ldBlk = stageColdLambda1(reg){ load(reg) }
val rBlk = stageColdLambda2(rV._1,rV._2){ reduce(rV._1,rV._2) }
val stBlk = stageColdLambda2(reg, rBlk.result){ store(reg, rBlk.result) }
val effects = mBlk.effects andAlso ldBlk.effects andAlso rBlk.effects andAlso stBlk.effects
stageEffectful( OpReduce[T](ens, cchain, reg, mBlk, ldBlk, rBlk, stBlk, ident, fold, rV, iters), effects)(ctx)
}
@internal def op_unrolled_reduce[T,C[T]](
en: Seq[Exp[Bit]],
cchain: Exp[CounterChain],
accum: Exp[C[T]],
func: () => Exp[MUnit],
iters: Seq[Seq[Bound[Index]]],
valids: Seq[Seq[Bound[Bit]]]
)(implicit mT: Type[T], mC: Type[C[T]]): Exp[Controller] = {
val fBlk = stageSealedLambda1(accum) { func() }
val effects = fBlk.effects
stageEffectful(UnrolledReduce(en, cchain, accum, fBlk, iters, valids), effects.star)(ctx)
}
}
| stanford-ppl/spatial-lang | spatial/core/src/spatial/lang/control/Reduce.scala | Scala | mit | 8,009 |
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2021 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.persistence.jdbc.query.dao
import akka.NotUsed
import akka.persistence.PersistentRepr
import akka.persistence.jdbc.AkkaSerialization
import akka.persistence.jdbc.config.ReadJournalConfig
import akka.persistence.jdbc.journal.dao.{ BaseJournalDaoWithReadMessages, H2Compat }
import akka.serialization.Serialization
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import slick.jdbc.JdbcBackend.Database
import slick.jdbc.JdbcProfile
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try
class DefaultReadJournalDao(
val db: Database,
val profile: JdbcProfile,
val readJournalConfig: ReadJournalConfig,
serialization: Serialization)(implicit val ec: ExecutionContext, val mat: Materializer)
extends ReadJournalDao
with BaseJournalDaoWithReadMessages
with H2Compat {
import profile.api._
val queries = new ReadJournalQueries(profile, readJournalConfig)
override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] =
Source.fromPublisher(db.stream(queries.allPersistenceIdsDistinct(correctMaxForH2Driver(max)).result))
override def eventsByTag(
tag: String,
offset: Long,
maxOffset: Long,
max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] = {
// This doesn't populate the tags. AFAICT they aren't used
Source
.fromPublisher(db.stream(queries.eventsByTag((tag, offset, maxOffset, correctMaxForH2Driver(max))).result))
.map(row =>
AkkaSerialization.fromRow(serialization)(row).map { case (repr, ordering) => (repr, Set.empty, ordering) })
}
override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] =
Source.fromPublisher(db.stream(queries.journalSequenceQuery((offset, limit)).result))
override def maxJournalSequence(): Future[Long] =
db.run(queries.maxJournalSequenceQuery.result)
override def messages(
persistenceId: String,
fromSequenceNr: Long,
toSequenceNr: Long,
max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] =
Source
.fromPublisher(
db.stream(
queries.messagesQuery((persistenceId, fromSequenceNr, toSequenceNr, correctMaxForH2Driver(max))).result))
.map(AkkaSerialization.fromRow(serialization)(_))
}
| dnvriend/akka-persistence-jdbc | core/src/main/scala/akka/persistence/jdbc/query/dao/DefaultReadJournalDao.scala | Scala | apache-2.0 | 2,450 |
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.blaze.internal
private[blaze] object compat {
val CollectionConverters = scala.jdk.CollectionConverters
}
| http4s/blaze | core/src/main/scala-2.13/org/http4s/blaze/internal/compat.scala | Scala | apache-2.0 | 723 |
package edu.gemini.ags.gems
import edu.gemini.ags.TargetsHelper
import edu.gemini.ags.gems.mascot.MascotProgress
import edu.gemini.ags.gems.mascot.Strehl
import edu.gemini.catalog.votable.TestVoTableBackend
import edu.gemini.shared.skyobject.{Magnitude => JMagnitude}
import edu.gemini.shared.skyobject.coords.HmsDegCoordinates
import edu.gemini.shared.util.immutable.{None => JNone}
import edu.gemini.skycalc.Coordinates
import edu.gemini.skycalc.Offset
import edu.gemini.pot.ModelConverters._
import edu.gemini.spModel.core.Magnitude
import edu.gemini.spModel.core._
import edu.gemini.spModel.gemini.flamingos2.Flamingos2
import edu.gemini.spModel.gemini.gems.Canopus
import edu.gemini.spModel.gemini.gems.Gems
import edu.gemini.spModel.gemini.gems.GemsInstrument
import edu.gemini.spModel.gemini.gsaoi.Gsaoi
import edu.gemini.spModel.gemini.gsaoi.GsaoiOdgw
import edu.gemini.spModel.gemini.obscomp.SPSiteQuality
import edu.gemini.spModel.gemini.obscomp.SPSiteQuality.Conditions
import edu.gemini.spModel.gems.GemsTipTiltMode
import edu.gemini.spModel.obs.context.ObsContext
import edu.gemini.spModel.obscomp.SPInstObsComp
import edu.gemini.spModel.target.SPTarget
import edu.gemini.spModel.target.env.TargetEnvironment
import edu.gemini.spModel.telescope.IssPort
import jsky.coords.WorldCoords
import org.specs2.time.NoTimeConversions
import scala.concurrent.duration._
import org.specs2.mutable.Specification
import AlmostEqual.AlmostEqualOps
import scala.concurrent.Await
import scalaz._
import Scalaz._
/**
* See OT-27
*/
class GemsResultsAnalyzerSpec extends MascotProgress with Specification with NoTimeConversions with TargetsHelper {
class TestGemsVoTableCatalog(file: String) extends GemsVoTableCatalog {
override val backend = TestVoTableBackend(file)
}
"GemsCatalogResultsSpec" should {
"support Gsaoi Search on TYC 8345-1155-1" in {
val base = new WorldCoords("17:25:27.529", "-48:27:24.02")
val inst = new Gsaoi <| {_.setPosAngle(0.0)} <| {_.setIssPort(IssPort.UP_LOOKING)}
val tipTiltMode = GemsTipTiltMode.canopus
val conditions = SPSiteQuality.Conditions.NOMINAL.sb(SPSiteQuality.SkyBackground.ANY).wv(SPSiteQuality.WaterVapor.ANY)
val (results, gemsGuideStars) = search(inst, base.getRA.toString, base.getDec.toString, tipTiltMode, conditions, new TestGemsVoTableCatalog("/gems_TYC_8345_1155_1.xml"))
val expectedResults = if (tipTiltMode == GemsTipTiltMode.both) 4 else 2
results should have size expectedResults
results.zipWithIndex.foreach { case (r, i) =>
System.out.println("Result #" + i)
System.out.println(" Criteria:" + r.criterion)
System.out.println(" Results size:" + r.results.size)
}
System.out.println("gems results: size = " + gemsGuideStars.size)
gemsGuideStars should have size 247
val result = gemsGuideStars.head
result.pa.toDegrees should beCloseTo(0, 0.0001)
val group = result.guideGroup
val set = group.getReferencedGuiders
// Found a star on CWFS1, CWFS2, CWFS3 and ODWG3
set.contains(Canopus.Wfs.cwfs1) should beTrue
set.contains(Canopus.Wfs.cwfs2) should beTrue
set.contains(Canopus.Wfs.cwfs3) should beTrue
set.contains(GsaoiOdgw.odgw1) should beFalse
set.contains(GsaoiOdgw.odgw2) should beFalse
set.contains(GsaoiOdgw.odgw3) should beFalse
set.contains(GsaoiOdgw.odgw4) should beTrue
val cwfs1 = group.get(Canopus.Wfs.cwfs1).getValue.getPrimary.getValue.getTarget
val cwfs2 = group.get(Canopus.Wfs.cwfs2).getValue.getPrimary.getValue.getTarget
val cwfs3 = group.get(Canopus.Wfs.cwfs3).getValue.getPrimary.getValue.getTarget
val odgw4 = group.get(GsaoiOdgw.odgw4).getValue.getPrimary.getValue.getTarget
cwfs1.getName must beEqualTo("208-152095")
cwfs2.getName must beEqualTo("208-152215")
cwfs3.getName must beEqualTo("208-152039")
odgw4.getName must beEqualTo("208-152102")
val cwfs1x = Coordinates.create("17:25:27.151", "-48:28:07.67")
val cwfs2x = Coordinates.create("17:25:32.541", "-48:27:30.06")
val cwfs3x = Coordinates.create("17:25:24.719", "-48:26:58.00")
val odgw4x = Coordinates.create("17:25:27.552", "-48:27:23.86")
(Angle.fromDegrees(cwfs1x.getRaDeg) ~= Angle.fromDegrees(cwfs1.getSkycalcCoordinates.getRaDeg)) should beTrue
(Angle.fromDegrees(cwfs1x.getDecDeg) ~= Angle.fromDegrees(cwfs1.getSkycalcCoordinates.getDecDeg)) should beTrue
(Angle.fromDegrees(cwfs2x.getRaDeg) ~= Angle.fromDegrees(cwfs2.getSkycalcCoordinates.getRaDeg)) should beTrue
(Angle.fromDegrees(cwfs2x.getDecDeg) ~= Angle.fromDegrees(cwfs2.getSkycalcCoordinates.getDecDeg)) should beTrue
(Angle.fromDegrees(cwfs3x.getRaDeg) ~= Angle.fromDegrees(cwfs3.getSkycalcCoordinates.getRaDeg)) should beTrue
(Angle.fromDegrees(cwfs3x.getDecDeg) ~= Angle.fromDegrees(cwfs3.getSkycalcCoordinates.getDecDeg)) should beTrue
(Angle.fromDegrees(odgw4x.getRaDeg) ~= Angle.fromDegrees(odgw4.getSkycalcCoordinates.getRaDeg)) should beTrue
(Angle.fromDegrees(odgw4x.getDecDeg) ~= Angle.fromDegrees(odgw4.getSkycalcCoordinates.getDecDeg)) should beTrue
val cwfs1Mag = cwfs1.getMagnitude(JMagnitude.Band.r).getValue.getBrightness
val cwfs2Mag = cwfs2.getMagnitude(JMagnitude.Band.UC).getValue.getBrightness
val cwfs3Mag = cwfs3.getMagnitude(JMagnitude.Band.r).getValue.getBrightness
cwfs3Mag < cwfs1Mag && cwfs2Mag < cwfs1Mag should beTrue
}
"support Gsaoi Search on SN-1987A" in {
val base = new WorldCoords("05:35:28.020", "-69:16:11.07")
val inst = new Gsaoi <| {_.setPosAngle(0.0)} <| {_.setIssPort(IssPort.UP_LOOKING)}
val tipTiltMode = GemsTipTiltMode.canopus
val (results, gemsGuideStars) = search(inst, base.getRA.toString, base.getDec.toString, tipTiltMode, SPSiteQuality.Conditions.NOMINAL.sb(SPSiteQuality.SkyBackground.ANY), new TestGemsVoTableCatalog("/gems_sn1987A.xml"))
val expectedResults = if (tipTiltMode == GemsTipTiltMode.both) 4 else 2
results should have size expectedResults
results.zipWithIndex.foreach { case (r, i) =>
System.out.println("Result #" + i)
System.out.println(" Criteria:" + r.criterion)
System.out.println(" Results size:" + r.results.size)
}
System.out.println("gems results: size = " + gemsGuideStars.size)
gemsGuideStars should have size 135
val result = gemsGuideStars.head
result.pa.toDegrees should beCloseTo(0, 0.0001)
val group = result.guideGroup
val set = group.getReferencedGuiders
// Found a star on CWFS1, CWFS2, CWFS3 and ODWG2
set.contains(Canopus.Wfs.cwfs1) should beTrue
set.contains(Canopus.Wfs.cwfs2) should beTrue
set.contains(Canopus.Wfs.cwfs3) should beTrue
set.contains(GsaoiOdgw.odgw1) should beFalse
set.contains(GsaoiOdgw.odgw2) should beTrue
set.contains(GsaoiOdgw.odgw3) should beFalse
set.contains(GsaoiOdgw.odgw4) should beFalse
val cwfs1 = group.get(Canopus.Wfs.cwfs1).getValue.getPrimary.getValue.getTarget
val cwfs2 = group.get(Canopus.Wfs.cwfs2).getValue.getPrimary.getValue.getTarget
val cwfs3 = group.get(Canopus.Wfs.cwfs3).getValue.getPrimary.getValue.getTarget
val odgw2 = group.get(GsaoiOdgw.odgw2).getValue.getPrimary.getValue.getTarget
cwfs1.getName must beEqualTo("104-014597")
cwfs2.getName must beEqualTo("104-014608")
cwfs3.getName must beEqualTo("104-014547")
odgw2.getName must beEqualTo("104-014556")
val cwfs1x = Coordinates.create("05:35:32.630", "-69:15:48.64")
val cwfs2x = Coordinates.create("05:35:36.409", "-69:16:24.17")
val cwfs3x = Coordinates.create("05:35:18.423", "-69:16:30.67")
val odgw2x = Coordinates.create("05:35:23.887", "-69:16:18.20")
(Angle.fromDegrees(cwfs1x.getRaDeg) ~= Angle.fromDegrees(cwfs1.getSkycalcCoordinates.getRaDeg)) should beTrue
(Angle.fromDegrees(cwfs1x.getDecDeg) ~= Angle.fromDegrees(cwfs1.getSkycalcCoordinates.getDecDeg)) should beTrue
(Angle.fromDegrees(cwfs2x.getRaDeg) ~= Angle.fromDegrees(cwfs2.getSkycalcCoordinates.getRaDeg)) should beTrue
(Angle.fromDegrees(cwfs2x.getDecDeg) ~= Angle.fromDegrees(cwfs2.getSkycalcCoordinates.getDecDeg)) should beTrue
(Angle.fromDegrees(cwfs3x.getRaDeg) ~= Angle.fromDegrees(cwfs3.getSkycalcCoordinates.getRaDeg)) should beTrue
(Angle.fromDegrees(cwfs3x.getDecDeg) ~= Angle.fromDegrees(cwfs3.getSkycalcCoordinates.getDecDeg)) should beTrue
(Angle.fromDegrees(odgw2x.getRaDeg) ~= Angle.fromDegrees(odgw2.getSkycalcCoordinates.getRaDeg)) should beTrue
(Angle.fromDegrees(odgw2x.getDecDeg) ~= Angle.fromDegrees(odgw2.getSkycalcCoordinates.getDecDeg)) should beTrue
val cwfs1Mag = cwfs1.getMagnitude(JMagnitude.Band.UC).getValue.getBrightness
val cwfs2Mag = cwfs2.getMagnitude(JMagnitude.Band.UC).getValue.getBrightness
val cwfs3Mag = cwfs3.getMagnitude(JMagnitude.Band.UC).getValue.getBrightness
cwfs3Mag < cwfs1Mag && cwfs2Mag < cwfs1Mag should beTrue
}
"support Gsaoi Search on M6" in {
val base = new WorldCoords("17:40:20.000", "-32:15:12.00")
val inst = new Gsaoi
val tipTiltMode = GemsTipTiltMode.canopus
val (results, gemsGuideStars) = search(inst, base.getRA.toString, base.getDec.toString, tipTiltMode, SPSiteQuality.Conditions.NOMINAL.sb(SPSiteQuality.SkyBackground.ANY), new TestGemsVoTableCatalog("/gems_m6.xml"))
val expectedResults = if (tipTiltMode == GemsTipTiltMode.both) 4 else 2
results should have size expectedResults
results.zipWithIndex.foreach { case (r, i) =>
System.out.println("Result #" + i)
System.out.println(" Criteria:" + r.criterion)
System.out.println(" Results size:" + r.results.size)
}
System.out.println("gems results: size = " + gemsGuideStars.size)
gemsGuideStars should have size 98
val result = gemsGuideStars.head
result.pa.toDegrees should beCloseTo(90, 0.0001)
val group = result.guideGroup
val set = group.getReferencedGuiders
// Found a star on CWFS1, CWFS2, CWFS3 and ODWG2
set.contains(Canopus.Wfs.cwfs1) should beTrue
set.contains(Canopus.Wfs.cwfs2) should beTrue
set.contains(Canopus.Wfs.cwfs3) should beTrue
set.contains(GsaoiOdgw.odgw1) should beFalse
set.contains(GsaoiOdgw.odgw2) should beTrue
set.contains(GsaoiOdgw.odgw3) should beFalse
set.contains(GsaoiOdgw.odgw4) should beFalse
val cwfs1 = group.get(Canopus.Wfs.cwfs1).getValue.getPrimary.getValue.getTarget
val cwfs2 = group.get(Canopus.Wfs.cwfs2).getValue.getPrimary.getValue.getTarget
val cwfs3 = group.get(Canopus.Wfs.cwfs3).getValue.getPrimary.getValue.getTarget
val odgw2 = group.get(GsaoiOdgw.odgw2).getValue.getPrimary.getValue.getTarget
cwfs1.getName must beEqualTo("289-128909")
cwfs2.getName must beEqualTo("289-128878")
cwfs3.getName must beEqualTo("289-128908")
odgw2.getName must beEqualTo("289-128891")
val cwfs1x = Coordinates.create("17:40:21.743", "-32:14:54.04")
val cwfs2x = Coordinates.create("17:40:16.855", "-32:15:55.83")
val cwfs3x = Coordinates.create("17:40:21.594", "-32:15:50.38")
val odgw2x = Coordinates.create("17:40:19.295", "-32:14:58.34")
(Angle.fromDegrees(cwfs1x.getRaDeg) ~= Angle.fromDegrees(cwfs1.getSkycalcCoordinates.getRaDeg)) should beTrue
(Angle.fromDegrees(cwfs1x.getDecDeg) ~= Angle.fromDegrees(cwfs1.getSkycalcCoordinates.getDecDeg)) should beTrue
(Angle.fromDegrees(cwfs2x.getRaDeg) ~= Angle.fromDegrees(cwfs2.getSkycalcCoordinates.getRaDeg)) should beTrue
(Angle.fromDegrees(cwfs2x.getDecDeg) ~= Angle.fromDegrees(cwfs2.getSkycalcCoordinates.getDecDeg)) should beTrue
(Angle.fromDegrees(cwfs3x.getRaDeg) ~= Angle.fromDegrees(cwfs3.getSkycalcCoordinates.getRaDeg)) should beTrue
(Angle.fromDegrees(cwfs3x.getDecDeg) ~= Angle.fromDegrees(cwfs3.getSkycalcCoordinates.getDecDeg)) should beTrue
(Angle.fromDegrees(odgw2x.getRaDeg) ~= Angle.fromDegrees(odgw2.getSkycalcCoordinates.getRaDeg)) should beTrue
(Angle.fromDegrees(odgw2x.getDecDeg) ~= Angle.fromDegrees(odgw2.getSkycalcCoordinates.getDecDeg)) should beTrue
val cwfs1Mag = cwfs1.getMagnitude(JMagnitude.Band.UC).getValue.getBrightness
val cwfs2Mag = cwfs2.getMagnitude(JMagnitude.Band.UC).getValue.getBrightness
val cwfs3Mag = cwfs3.getMagnitude(JMagnitude.Band.UC).getValue.getBrightness
cwfs3Mag < cwfs1Mag && cwfs2Mag < cwfs1Mag should beTrue
}
"support Gsaoi Search on BPM 37093" in {
val base = new WorldCoords("12:38:49.820", "-49:48:00.20")
val inst = new Gsaoi
val tipTiltMode = GemsTipTiltMode.canopus
val (results, gemsGuideStars) = search(inst, base.getRA.toString, base.getDec.toString, tipTiltMode, SPSiteQuality.Conditions.NOMINAL.sb(SPSiteQuality.SkyBackground.ANY), new TestGemsVoTableCatalog("/gems_bpm_37093.xml"))
val expectedResults = if (tipTiltMode == GemsTipTiltMode.both) 4 else 2
results should have size expectedResults
results.zipWithIndex.foreach { case (r, i) =>
System.out.println("Result #" + i)
System.out.println(" Criteria:" + r.criterion)
System.out.println(" Results size:" + r.results.size)
}
System.out.println("gems results: size = " + gemsGuideStars.size)
gemsGuideStars should have size 54
val result = gemsGuideStars.head
result.pa.toDegrees should beCloseTo(0, 0.0001)
val group = result.guideGroup
val set = group.getReferencedGuiders
// Found a star on CWFS1, CWFS2, CWFS3 and ODWG4
set.contains(Canopus.Wfs.cwfs1) should beTrue
set.contains(Canopus.Wfs.cwfs2) should beTrue
set.contains(Canopus.Wfs.cwfs3) should beTrue
set.contains(GsaoiOdgw.odgw1) should beFalse
set.contains(GsaoiOdgw.odgw2) should beFalse
set.contains(GsaoiOdgw.odgw3) should beFalse
set.contains(GsaoiOdgw.odgw4) should beTrue
val cwfs2 = group.get(Canopus.Wfs.cwfs2).getValue.getPrimary.getValue.getTarget.getSkycalcCoordinates
val cwfs3 = group.get(Canopus.Wfs.cwfs3).getValue.getPrimary.getValue.getTarget.getSkycalcCoordinates
val odgw4 = group.get(GsaoiOdgw.odgw4).getValue.getPrimary.getValue.getTarget.getSkycalcCoordinates
val cwfs2x = Coordinates.create("12:38:44.500", "-49:47:58.38")
val cwfs3x = Coordinates.create("12:38:50.005", "-49:48:00.89")
val odgw4x = Coordinates.create("12:38:50.005", "-49:48:00.89")
(Angle.fromDegrees(cwfs2x.getRaDeg) ~= Angle.fromDegrees(cwfs2.getRaDeg)) should beTrue
(Angle.fromDegrees(cwfs2x.getDecDeg) ~= Angle.fromDegrees(cwfs2.getDecDeg)) should beTrue
(Angle.fromDegrees(cwfs3x.getRaDeg) ~= Angle.fromDegrees(cwfs3.getRaDeg)) should beTrue
(Angle.fromDegrees(cwfs3x.getDecDeg) ~= Angle.fromDegrees(cwfs3.getDecDeg)) should beTrue
(Angle.fromDegrees(odgw4x.getRaDeg) ~= Angle.fromDegrees(odgw4.getRaDeg)) should beTrue
(Angle.fromDegrees(odgw4x.getDecDeg) ~= Angle.fromDegrees(odgw4.getDecDeg)) should beTrue
//val cwfs1Mag = group.get(Canopus.Wfs.cwfs1).getValue.getPrimary.getValue.getTarget.getMagnitude(JMagnitude.Band.r).getValue.getBrightness
val cwfs2Mag = group.get(Canopus.Wfs.cwfs2).getValue.getPrimary.getValue.getTarget.getMagnitude(JMagnitude.Band.r).getValue.getBrightness
val cwfs3Mag = group.get(Canopus.Wfs.cwfs3).getValue.getPrimary.getValue.getTarget.getMagnitude(JMagnitude.Band.r).getValue.getBrightness
}
"sort targets by R magnitude" in {
val st1 = target("n", edu.gemini.spModel.core.Coordinates.zero, List(new Magnitude(10.0, MagnitudeBand.J)))
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1)).head should beEqualTo(st1)
val st2 = target("n", edu.gemini.spModel.core.Coordinates.zero, List(new Magnitude(15.0, MagnitudeBand.J)))
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2)).head should beEqualTo(st1)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2))(1) should beEqualTo(st2)
val st3 = target("n", edu.gemini.spModel.core.Coordinates.zero, List(new Magnitude(15.0, MagnitudeBand.R)))
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3)).head should beEqualTo(st3)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3))(1) should beEqualTo(st1)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3))(2) should beEqualTo(st2)
val st4 = target("n", edu.gemini.spModel.core.Coordinates.zero, List(new Magnitude(9.0, MagnitudeBand.R)))
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4)).head should beEqualTo(st4)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4))(1) should beEqualTo(st3)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4))(2) should beEqualTo(st1)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4))(3) should beEqualTo(st2)
val st5 = target("n", edu.gemini.spModel.core.Coordinates.zero, List(new Magnitude(19.0, MagnitudeBand.R)))
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4, st5)).head should beEqualTo(st4)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4, st5))(1) should beEqualTo(st3)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4, st5))(2) should beEqualTo(st5)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4, st5))(3) should beEqualTo(st1)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4, st5))(4) should beEqualTo(st2)
}
"sort targets by R-like magnitude" in {
val st1 = target("n", edu.gemini.spModel.core.Coordinates.zero, List(new Magnitude(10.0, MagnitudeBand.J)))
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1)).head should beEqualTo(st1)
val st2 = target("n", edu.gemini.spModel.core.Coordinates.zero, List(new Magnitude(15.0, MagnitudeBand.J)))
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2)).head should beEqualTo(st1)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2))(1) should beEqualTo(st2)
val st3 = target("n", edu.gemini.spModel.core.Coordinates.zero, List(new Magnitude(15.0, MagnitudeBand.R)))
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3)).head should beEqualTo(st3)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3))(1) should beEqualTo(st1)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3))(2) should beEqualTo(st2)
val st4 = target("n", edu.gemini.spModel.core.Coordinates.zero, List(new Magnitude(9.0, MagnitudeBand._r)))
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4)).head should beEqualTo(st4)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4))(1) should beEqualTo(st3)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4))(2) should beEqualTo(st1)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4))(3) should beEqualTo(st2)
val st5 = target("n", edu.gemini.spModel.core.Coordinates.zero, List(new Magnitude(19.0, MagnitudeBand.UC)))
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4, st5)).head should beEqualTo(st4)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4, st5))(1) should beEqualTo(st3)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4, st5))(2) should beEqualTo(st5)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4, st5))(3) should beEqualTo(st1)
GemsResultsAnalyzer.sortTargetsByBrightness(List(st1, st2, st3, st4, st5))(4) should beEqualTo(st2)
}
}
def search(inst: SPInstObsComp, raStr: String, decStr: String, tipTiltMode: GemsTipTiltMode, conditions: Conditions, catalog: TestGemsVoTableCatalog): (List[GemsCatalogSearchResults], List[GemsGuideStars]) = {
import scala.collection.JavaConverters._
val coords = new WorldCoords(raStr, decStr)
val baseTarget = new SPTarget(coords.getRaDeg, coords.getDecDeg)
val env = TargetEnvironment.create(baseTarget)
val offsets = new java.util.HashSet[Offset]
val obsContext = ObsContext.create(env, inst, JNone.instance[Site], conditions, offsets, new Gems, JNone.instance())
val baseRA = Angle.fromDegrees(coords.getRaDeg)
val baseDec = Angle.fromDegrees(coords.getDecDeg)
val base = new HmsDegCoordinates.Builder(baseRA.toOldModel, baseDec.toOldModel).build
val instrument = if (inst.isInstanceOf[Flamingos2]) GemsInstrument.flamingos2 else GemsInstrument.gsaoi
val posAngles = Set(Angle.zero, Angle.fromDegrees(90), Angle.fromDegrees(180), Angle.fromDegrees(270)).asJava
val options = new GemsGuideStarSearchOptions(instrument, tipTiltMode, posAngles)
val results = Await.result(catalog.search(obsContext, base.toNewModel, options, scala.None), 5.seconds)
val gemsResults = GemsResultsAnalyzer.analyze(obsContext, posAngles, results.asJava, scala.None)
(results, gemsResults.asScala.toList)
}
def progress(s: Strehl, count: Int, total: Int, usable: Boolean): Boolean = true
def setProgressTitle(s: String) {
System.out.println(s)
}
} | arturog8m/ocs | bundle/edu.gemini.ags/src/test/scala/edu/gemini/ags/gems/GemsResultsAnalyzerSpec.scala | Scala | bsd-3-clause | 21,388 |
package net.pierreandrews.utils
/**
* An Iterator that takes in a set of partially sorted inputs and merges them in a sorted manned, in descending order.
*
* If the inputs are not partially sorted, than the output will not be sorted properly. This sorter more or less
* only needs inputs.size*size(I) memory as it lazily loads each line from the inputs.
*
* The second parameter is a function that extracts the field to be sorted on. It extracts it in an Option as the extraction
* might fail (i.e. we can't parse the date out of the logline)
*
* User: pierre
* Date: 11/30/14
*/
class LineSorter[I, T: Ordering](inputs: Seq[Iterator[I]], extractOrderField: I => Option[T]) extends Iterator[I] {
//filter out the log lines to only keep the ones
// which are parseable.
// This flatMap is done on an iterator, so it's executed lazily only when the line is pulled.
private val bufferedInputs = inputs.map(_.flatMap { l=>
//get rid of the lines we can't parse
extractOrderField(l).map { (_, l) }
}.buffered)
// we buffer the iterator so that we can peak at the top of the iterator without moving it forward.
/**
* there are more lines in this iterator if at least one of the underlying iterators has more lines
*/
override def hasNext: Boolean = {
bufferedInputs.exists(_.hasNext)
}
/**
* extract the next line, choosing the one with the maximum sorting field from all the available iterators
* returns the maximum next line
*/
override def next(): I = {
// only get the possible next line from the iterators that still have content available
// we will then figure out which one to return
val possibleNext = bufferedInputs.collect {
case it if it.hasNext =>
(it.head, it)
}
//find out the maximum ordering field from the possible next lines
// maxBy will sort the iterator in descending order.
val ((sortField, maxNext), itToAdvance) = possibleNext.maxBy {
case ((headOrder, _), _) =>
headOrder
}
// move the chosen iterator forward
itToAdvance.next()
// return the line from that iterator
maxNext
}
}
| Mortimerp9/LogSplit | src/main/scala/net/pierreandrews/utils/LineSorter.scala | Scala | gpl-3.0 | 2,144 |
package lib
import io.apibuilder.api.v0.models.{Original, OriginalForm, OriginalType}
import org.scalatestplus.play.PlaySpec
import org.scalatestplus.play.guice.GuiceOneAppPerSuite
class OriginalHelpersSpec extends PlaySpec with GuiceOneAppPerSuite with TestHelper {
"original" in {
val data = readFile("../spec/apibuilder-api.json")
OriginalUtil.toOriginal(OriginalForm(data = data)) must be(
Original(
OriginalType.ApiJson,
data
)
)
}
"guessType" must {
"apiJson" in {
OriginalUtil.guessType(readFile("../spec/apibuilder-api.json")) must be(Some(OriginalType.ApiJson))
OriginalUtil.guessType(readFile("../spec/apibuilder-spec.json")) must be(Some(OriginalType.ApiJson))
}
"serviceJson" in {
OriginalUtil.guessType(readFile("../core/src/test/resources/apibuilder-service.json")) must be(Some(OriginalType.ServiceJson))
}
"swaggerJson" in {
OriginalUtil.guessType(readFile("../swagger/src/test/resources/petstore-external-docs-example-security.json")) must be(Some(OriginalType.Swagger))
}
"swaggerYaml" in {
OriginalUtil.guessType("swagger: '2.0'\\ninfo:\\n version: 0.0.1") must be(Some(OriginalType.Swagger))
}
"avroIdl" in {
OriginalUtil.guessType(" @namespace ") must be(Some(OriginalType.AvroIdl))
OriginalUtil.guessType(" protocol bar {} ") must be(Some(OriginalType.AvroIdl))
}
"unknown" in {
OriginalUtil.guessType(" ") must be(None)
}
"poorly formatted json" in {
OriginalUtil.guessType("{ ") must be(None)
}
}
}
| gheine/apidoc | api/test/lib/OriginalHelpersSpec.scala | Scala | mit | 1,601 |
package code.model
import _root_.net.liftweb.mapper._
import _root_.net.liftweb.util._
import _root_.net.liftweb.common._
class Recipient extends LongKeyedMapper[Recipient] with IdPK {
def getSingleton = Recipient
object addressIndex extends MappedEmail(this, 256)
def url: String = "/recipient/" + primaryKeyField
}
object Recipient extends Recipient with LongKeyedMetaMapper[Recipient] {
override def dbTableName = "recipients"
override def fieldOrder = List(addressIndex)
override def dbIndexes = UniqueIndex(addressIndex) :: super.dbIndexes
// FIXME: race condition if address is created elsewhere between find & create...
def recipientFindOrNew(rcpt : String) = {
find(By(addressIndex,rcpt)) openOr (create.addressIndex(rcpt))
}
}
| scsibug/fermata | src/main/scala/code/model/Recipient.scala | Scala | bsd-3-clause | 761 |
package example.gql_server.routing
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives
import akka.http.scaladsl.server.Route
import de.heikoseeberger.akkahttpcirce.FailFastCirceSupport._
import example.gql_server.component.GraphQLComponent
import example.gql_server.context.UserContext
import example.gql_server.schema.GraphQLSchema
import io.circe.Json
import io.circe.parser._
import sangria.ast.Document
import sangria.execution.{ErrorWithResolver, Executor, QueryAnalysisError}
import sangria.marshalling.circe._
import sangria.parser.{QueryParser, SyntaxError}
import scala.util.control.NonFatal
import scala.util.{Failure, Success}
trait GraphQLRoute extends GraphQLSchema with GraphQLComponent with Directives {
def graphQLRoute(): Route =
path("graphql") {
post {
entity(as[Json]) { body =>
val query = body.hcursor.get[String]("query").toOption
val operationName = body.hcursor.get[String]("operationName").toOption
val variablesStr = body.hcursor.get[String]("variables").toOption
query.map(QueryParser.parse(_)) match {
case Some(Success(ast)) =>
variablesStr.map(parse) match {
case Some(Left(error)) => complete(BadRequest, formatError(error))
case Some(Right(json)) => executeGraphQL(ast, operationName, json)
case None =>
executeGraphQL(
ast,
operationName,
body.hcursor.get[Json]("variables").toOption getOrElse Json.obj()
)
}
case Some(Failure(error)) => complete(BadRequest, formatError(error))
case None => complete(BadRequest, formatError("No query to execute"))
}
}
}
}
@SuppressWarnings(
Array(
"org.wartremover.warts.Any",
"org.wartremover.warts.Product",
"org.wartremover.warts.Serializable"
)
)
def executeGraphQL(
query: Document,
operationName: Option[String],
variables: Json
): Route =
extractExecutionContext { implicit ec =>
complete(
Executor
.execute(
schema,
query,
new UserContext {
val personHandler = personHandlerImpl
},
variables = variables,
operationName = operationName,
middleware = Nil
)
.map(OK -> _)
.recover {
case error: QueryAnalysisError => BadRequest -> error.resolveError
case error: ErrorWithResolver => InternalServerError -> error.resolveError
}
)
}
@SuppressWarnings(Array("org.wartremover.warts.Throw"))
def formatError(error: Throwable): Json =
error match {
case syntaxError: SyntaxError =>
Json.obj(
"errors" -> Json.arr(
Json.obj(
"message" -> Json.fromString(syntaxError.getMessage),
"locations" -> Json.arr(
Json.obj(
"line" -> Json.fromBigInt(syntaxError.originalError.position.line),
"column" -> Json.fromBigInt(syntaxError.originalError.position.column)
)
)
)
)
)
case NonFatal(e) =>
formatError(e.getMessage)
case e =>
throw e
}
def formatError(message: String): Json =
Json.obj("errors" -> Json.arr(Json.obj("message" -> Json.fromString(message))))
}
| t-mochizuki/scala-study | circleci-example/gql-server/src/main/scala/example/gql_server/routing/GraphQLRoute.scala | Scala | mit | 3,536 |
package com.verizon.bda.trapezium.framework.apps
import org.apache.spark.sql.SparkSession
/**
* Created by sankma8 on 10/11/17.
*/
object Main {
def main(args: Array[String]): Unit = {
val session = SparkSession.builder().master("local[2]").getOrCreate()
}
}
| Verizon/trapezium | framework/example/kafkaHADRSample/src/main/scala/com/verizon/bda/trapezium/framework/apps/Main.scala | Scala | apache-2.0 | 277 |
package nest.sparkle.http
import spray.http.{HttpMessage, HttpRequest, HttpResponse}
import spray.routing.Directive.SingleValueModifiers
import spray.routing.{Directive0, Directive1, Directives}
import nest.sparkle.util.Log
/** Mix in to a Directives route structure to add the withRequestResponseLog directive.
* Wrap a route in the logwithRequestResponseLog directive and it will log requests and
* responses to the debug log, with full detail provided at the if TRACE level is enabled.
*/
trait HttpLogging {
self: Directives with Log =>
// report the client's ip address, or "unknown" if the ip address isn't known
lazy val sourceIP: Directive1[String] = clientIP.map(_.toString) | provide("unknown-ip")
lazy val withRequestResponseLog: Directive0 = {
/** return a trace loggable string containing full headers and the start of the entity body */
def headersAndBody(message: HttpMessage): String = {
val headers = message.headers.map(_.toString).mkString(" ", "\\n ", "\\n")
val body = message.entity.asString.take(300)
headers + body
}
/** extract the request and client ip address from the request */
val requestIp =
for {
ip <- sourceIP
request <- extract(_.request)
} yield {
(request, ip)
}
/** log the http request */
def logRequestStart(requestLine: String, request: HttpRequest) {
val headersAndBodyLog = headersAndBody(request)
log.trace(s"$requestLine\\n$headersAndBodyLog")
}
/** log the http response */
def logRequestComplete(ip: String, requestLine: String, request: HttpRequest, response: HttpResponse) {
val responseCode = response.status.intValue.toString
log.info(s"$requestLine $responseCode")
log.trace(s"$ip $responseCode\\n ${headersAndBody(response)}")
}
/** return a log message string for an http request */
def requestLogLine(request: HttpRequest, ip: String): String = {
val uri = request.uri
val method = request.method.name
s"$ip: $method $uri"
}
// log before and after the request completes
requestIp.flatMap {
case (request, ip) =>
val requestLog = requestLogLine(request, ip)
logRequestStart(requestLog, request)
mapHttpResponse{ response =>
logRequestComplete(ip, requestLog, request, response)
response
}
}
}
} | mighdoll/sparkle | http-common/src/main/scala/nest/sparkle/http/HttpLogging.scala | Scala | apache-2.0 | 2,410 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.concurrent
import org.scalatest._
import org.scalatest.exceptions.StackDepthExceptionHelper.getStackDepthFun
import org.scalatest.Suite.anExceptionThatShouldCauseAnAbort
import scala.annotation.tailrec
import org.scalatest.time.Span
import exceptions.{TestCanceledException, TestFailedException, TestPendingException, TimeoutField}
import PatienceConfiguration._
/**
* Trait that facilitates testing with futures.
*
* <p>
* This trait defines a <a href="Futures$FutureConcept.html"><code>FutureConcept</code></a> trait that can be used to implicitly wrap
* different kinds of futures, thereby providing a uniform testing API for futures.
* The three ways this trait enables you to test futures are:
* </p>
*
* <p>
* 1. Invoking <code>isReadyWithin</code>, to assert that a future is ready within a a specified time period.
* Here's an example:
* </p>
*
* <pre class="stHighlight">
* assert(result.isReadyWithin(100 millis))
* </pre>
*
* <p>
* 2. Invoking <code>futureValue</code>, to obtain a futures result within a specified or implicit time period,
* like this:
* </p>
*
* <pre class="stHighlight">
* assert(result.futureValue === 7)
* </pre>
*
* <p>
* 3. Passing the future to <code>whenReady</code>, and performing assertions on the result value passed
* to the given function, as in:
* </p>
*
* <pre class="stHighlight">
* whenReady(result) { s =>
* s should be ("hello")
* }
* </pre>
*
* <p>
* The <code>whenReady</code> construct periodically inspects the passed
* future, until it is either ready or the configured timeout has been surpassed. If the future becomes
* ready before the timeout, <code>whenReady</code> passes the future's value to the specified function.
* </p>
*
* <p>
* To make <code>whenReady</code> more broadly applicable, the type of future it accepts is a <code>FutureConcept[T]</code>,
* where <code>T</code> is the type of value promised by the future. Passing a future to <code>whenReady</code> requires
* an implicit conversion from the type of future you wish to pass (the <em>modeled type</em>) to
* <code>FutureConcept[T]</code>. Subtrait <code>JavaFutures</code> provides an implicit conversion from
* <code>java.util.concurrent.Future[T]</code> to <code>FutureConcept[T]</code>.
* </p>
*
* <p>
* For example, the following invocation of <code>whenReady</code> would succeed (not throw an exception):
* </p>
*
* <pre class="stHighlight">
* import org.scalatest._
* import matchers.ShouldMatchers._
* import concurrent.Futures._
* import java.util.concurrent._
*
* val exec = Executors.newSingleThreadExecutor
* val task = new Callable[String] { def call() = { Thread.sleep(50); "hi" } }
* whenReady(exec.submit(task)) { s =>
* s should be ("hi")
* }
* </pre>
*
* <p>
* However, because the default timeout is 150 milliseconds, the following invocation of
* <code>whenReady</code> would ultimately produce a <code>TestFailedException</code>:
* </p>
*
* <pre class="stHighlight">
* val task = new Callable[String] { def call() = { Thread.sleep(500); "hi" } }
* whenReady(exec.submit(task)) { s =>
* s should be ("hi")
* }
* </pre>
*
* <p>
* Assuming the default configuration parameters, a <code>timeout</code> of 150 milliseconds and an
* <code>interval</code> of 15 milliseconds,
* were passed implicitly to <code>whenReady</code>, the detail message of the thrown
* <code>TestFailedException</code> would look like:
* </p>
*
* <p>
* <code>The future passed to whenReady was never ready, so whenReady timed out. Queried 95 times, sleeping 10 milliseconds between each query.</code>
* </p>
*
* <a name="defaultPatience"></a><h2>Configuration of <code>whenReady</code></h2>
*
* <p>
* The <code>whenReady</code> methods of this trait can be flexibly configured.
* The two configuration parameters for <code>whenReady</code> along with their
* default values and meanings are described in the following table:
* </p>
*
* <table style="border-collapse: collapse; border: 1px solid black">
* <tr>
* <th style="background-color: #CCCCCC; border-width: 1px; padding: 3px; text-align: center; border: 1px solid black">
* <strong>Configuration Parameter</strong>
* </th>
* <th style="background-color: #CCCCCC; border-width: 1px; padding: 3px; text-align: center; border: 1px solid black">
* <strong>Default Value</strong>
* </th>
* <th style="background-color: #CCCCCC; border-width: 1px; padding: 3px; text-align: center; border: 1px solid black">
* <strong>Meaning</strong>
* </th>
* </tr>
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: center">
* timeout
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: center">
* scaled(150 milliseconds)
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* the maximum amount of time to allow unsuccessful queries before giving up and throwing <code>TestFailedException</code>
* </td>
* </tr>
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: center">
* interval
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: center">
* scaled(15 milliseconds)
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* the amount of time to sleep between each query
* </td>
* </tr>
* </table>
*
* <p>
* The default values of both timeout and interval are passed to the <code>scaled</code> method, inherited
* from <code>ScaledTimeSpans</code>, so that the defaults can be scaled up
* or down together with other scaled time spans. See the documentation for trait <a href="ScaledTimeSpans.html"><code>ScaledTimeSpans</code></a>
* for more information.
* </p>
*
* <p>
* The <code>whenReady</code> methods of trait <code>Futures</code> each take a <code>PatienceConfig</code>
* object as an implicit parameter. This object provides values for the two configuration parameters. Trait
* <code>Futures</code> provides an implicit <code>val</code> named <code>defaultPatience</code> with each
* configuration parameter set to its default value.
* If you want to set one or more configuration parameters to a different value for all invocations of
* <code>whenReady</code> in a suite you can override this
* val (or hide it, for example, if you are importing the members of the <code>Futures</code> companion object rather
* than mixing in the trait). For example, if
* you always want the default <code>timeout</code> to be 2 seconds and the default <code>interval</code> to be 5 milliseconds, you
* can override <code>defaultPatience</code>, like this:
*
* <pre class="stHighlight">
* implicit override val defaultPatience =
* PatienceConfig(timeout = Span(2, Seconds), interval = Span(5, Millis))
* </pre>
*
* <p>
* Or, hide it by declaring a variable of the same name in whatever scope you want the changed values to be in effect:
* </p>
*
* <pre class="stHighlight">
* implicit val defaultPatience =
* PatienceConfig(timeout = Span(2, Seconds), interval = Span(5, Millis))
* </pre>
*
* <p>
* In addition to taking a <code>PatienceConfig</code> object as an implicit parameter, the <code>whenReady</code> methods of trait
* <code>Futures</code> include overloaded forms that take one or two <code>PatienceConfigParam</code>
* objects that you can use to override the values provided by the implicit <code>PatienceConfig</code> for a single <code>whenReady</code>
* invocation. For example, if you want to set <code>timeout</code> to 6 seconds for just one particular <code>whenReady</code> invocation,
* you can do so like this:
* </p>
*
* <pre class="stHighlight">
* whenReady (exec.submit(task), timeout(Span(6, Seconds))) { s =>
* s should be ("hi")
* }
* </pre>
*
* <p>
* This invocation of <code>eventually</code> will use 6000 for <code>timeout</code> and whatever value is specified by the
* implicitly passed <code>PatienceConfig</code> object for the <code>interval</code> configuration parameter.
* If you want to set both configuration parameters in this way, just list them separated by commas:
* </p>
*
* <pre class="stHighlight">
* whenReady (exec.submit(task), timeout(Span(6, Seconds)), interval(Span(500, Millis))) { s =>
* s should be ("hi")
* }
* </pre>
*
* <p>
* You can also import or mix in the members of <a href="../time/SpanSugar.html"><code>SpanSugar</code></a> if
* you want a more concise DSL for expressing time spans:
* </p>
*
* <pre class="stHighlight">
* whenReady (exec.submit(task), timeout(6 seconds), interval(500 millis)) { s =>
* s should be ("hi")
* }
* </pre>
*
* <p>
* <em>Note: The <code>whenReady</code> construct was in part inspired by the <code>whenDelivered</code> matcher of the
* <a href="http://github.com/jdegoes/blueeyes" target="_blank">BlueEyes</a> project, a lightweight, asynchronous web framework for Scala.</em>
* </p>
*
* @author Bill Venners
*/
trait Futures extends PatienceConfiguration {
/**
* Concept trait for futures, instances of which are passed to the <code>whenReady</code>
* methods of trait <a href="Futures.html"><code>Futures</code></a>.
*
* <p>
* See the documentation for trait <a href="Futures.html"><code>Futures</code></a> for the details on the syntax this trait
* provides for testing with futures.
* </p>
*
* @author Bill Venners
*/
trait FutureConcept[T] { thisFuture =>
/**
* Queries this future for its value.
*
* <p>
* If the future is not ready, this method will return <code>None</code>. If ready, it will either return an exception
* or a <code>T</code>.
* </p>
*/
def eitherValue: Option[Either[Throwable, T]]
/**
* Indicates whether this future has expired (timed out).
*
* <p>
* The timeout detected by this method is different from the timeout supported by <code>whenReady</code>. This timeout
* is a timeout of the underlying future. If the underlying future does not support timeouts, this method must always
* return <code>false</code>.
* </p>
*/
def isExpired: Boolean
/**
* Indicates whether this future has been canceled.
*
* <p>
* If the underlying future does not support the concept of cancellation, this method must always return <code>false</code>.
* </p>
*/
def isCanceled: Boolean
/**
* Indicates whether this future is ready within the specified timeout.
*
* <p>
* If the <code>eitherValue</code> method of the underlying Scala future returns a <code>scala.Some</code> containing a
* <code>scala.util.Failure</code> containing a <code>java.util.concurrent.ExecutionException</code>, and this
* exception contains a non-<code>null</code> cause, that cause will be included in the <code>TestFailedException</code> as its cause. The
* <code>ExecutionException</code> will be be included as the <code>TestFailedException</code>'s cause only if the
* <code>ExecutionException</code>'s cause is <code>null</code>.
* </p>
*
* @param timeout
* @param config
* @return
*/
final def isReadyWithin(timeout: Span)(implicit config: PatienceConfig): Boolean = {
try {
futureValue(PatienceConfig(timeout, config.interval))
true
}
catch {
case e: TimeoutField => false
}
}
/**
* Returns the result of this <code>FutureConcept</code>, once it is ready, or throws either the
* exception returned by the future (<em>i.e.</em>, <code>value</code> returned a <code>Left</code>)
* or <code>TestFailedException</code>.
*
* <p>
* The maximum amount of time to wait for the future to become ready before giving up and throwing
* <code>TestFailedException</code> is configured by the value contained in the passed
* <code>timeout</code> parameter.
* The interval to sleep between queries of the future (used only if the future is polled) is configured by the value contained in the passed
* <code>interval</code> parameter.
* </p>
*
* <p>
* This method invokes the overloaded <code>futureValue</code> form with only one (implicit) argument
* list that contains only one argument, a <code>PatienceConfig</code>, passing a new
* <code>PatienceConfig</code> with the <code>Timeout</code> specified as <code>timeout</code> and
* the <code>Interval</code> specified as <code>interval</code>.
* </p>
*
* <p>
* If the <code>eitherValue</code> method of the underlying Scala future returns a <code>scala.Some</code> containing a
* <code>scala.util.Failure</code> containing a <code>java.util.concurrent.ExecutionException</code>, and this
* exception contains a non-<code>null</code> cause, that cause will be included in the <code>TestFailedException</code> as its cause. The
* <code>ExecutionException</code> will be be included as the <code>TestFailedException</code>'s cause only if the
* <code>ExecutionException</code>'s cause is <code>null</code>.
* </p>
*
* @param timeout the <code>Timeout</code> configuration parameter
* @param interval the <code>Interval</code> configuration parameter
* @return the result of the future once it is ready, if <code>value</code> is defined as a <code>Right</code>
* @throws Throwable if once ready, the <code>value</code> of this future is defined as a
* <code>Left</code> (in this case, this method throws that same exception)
* @throws TestFailedException if the future is cancelled, expires, or is still not ready after
* the specified timeout has been exceeded
*/
final def futureValue(timeout: Timeout, interval: Interval): T =
futureValue(PatienceConfig(timeout.value, interval.value))
/**
* Returns the result of this <code>FutureConcept</code>, once it is ready, or throws either the
* exception returned by the future (<em>i.e.</em>, <code>value</code> returned a <code>Left</code>)
* or <code>TestFailedException</code>.
*
* <p>
* The maximum amount of time to wait for the future to become ready before giving up and throwing
* <code>TestFailedException</code> is configured by the value contained in the passed
* <code>timeout</code> parameter.
* The interval to sleep between queries of the future (used only if the future is polled) is configured by the <code>interval</code> field of
* the <code>PatienceConfig</code> passed implicitly as the last parameter.
* </p>
*
* <p>
* This method invokes the overloaded <code>futureValue</code> form with only one (implicit) argument
* list that contains only one argument, a <code>PatienceConfig</code>, passing a new
* <code>PatienceConfig</code> with the <code>Timeout</code> specified as <code>timeout</code> and
* the <code>Interval</code> specified as <code>config.interval</code>.
* </p>
*
* <p>
* If the <code>eitherValue</code> method of the underlying Scala future returns a <code>scala.Some</code> containing a
* <code>scala.util.Failure</code> containing a <code>java.util.concurrent.ExecutionException</code>, and this
* exception contains a non-<code>null</code> cause, that cause will be included in the <code>TestFailedException</code> as its cause. The
* <code>ExecutionException</code> will be be included as the <code>TestFailedException</code>'s cause only if the
* <code>ExecutionException</code>'s cause is <code>null</code>.
* </p>
*
* @param timeout the <code>Timeout</code> configuration parameter
* @param config an <code>PatienceConfig</code> object containing <code>timeout</code> and
* <code>interval</code> parameters that are unused by this method
* @return the result of the future once it is ready, if <code>eitherValue</code> is defined as a <code>Right</code>
* @throws Throwable if once ready, the <code>eitherValue</code> of this future is defined as a
* <code>Left</code> (in this case, this method throws that same exception)
* @throws TestFailedException if the future is cancelled, expires, or is still not ready after
* the specified timeout has been exceeded
*/
final def futureValue(timeout: Timeout)(implicit config: PatienceConfig): T =
futureValue(PatienceConfig(timeout.value, config.interval))
/**
* Returns the result of this <code>FutureConcept</code>, once it is ready, or throws either the
* exception returned by the future (<em>i.e.</em>, <code>eitherValue</code> returned a <code>Left</code>)
* or <code>TestFailedException</code>.
*
* <p>
* The maximum amount of time to wait for the future to become ready before giving up and throwing
* <code>TestFailedException</code> is configured by the <code>timeout</code> field of
* the <code>PatienceConfig</code> passed implicitly as the last parameter.
* The interval to sleep between queries of the future (used only if the future is polled) is configured by the value contained in the passed
* <code>interval</code> parameter.
* </p>
*
* <p>
* This method invokes the overloaded <code>futureValue</code> form with only one (implicit) argument
* list that contains only one argument, a <code>PatienceConfig</code>, passing a new
* <code>PatienceConfig</code> with the <code>Interval</code> specified as <code>interval</code> and
* the <code>Timeout</code> specified as <code>config.timeout</code>.
* </p>
*
* <p>
* If the <code>eitherValue</code> method of the underlying Scala future returns a <code>scala.Some</code> containing a
* <code>scala.util.Failure</code> containing a <code>java.util.concurrent.ExecutionException</code>, and this
* exception contains a non-<code>null</code> cause, that cause will be included in the <code>TestFailedException</code> as its cause. The
* <code>ExecutionException</code> will be be included as the <code>TestFailedException</code>'s cause only if the
* <code>ExecutionException</code>'s cause is <code>null</code>.
* </p>
*
* @param interval the <code>Interval</code> configuration parameter
* @param config an <code>PatienceConfig</code> object containing <code>timeout</code> and
* <code>interval</code> parameters that are unused by this method
* @return the result of the future once it is ready, if <code>value</code> is defined as a <code>Right</code>
* @throws Throwable if once ready, the <code>value</code> of this future is defined as a
* <code>Left</code> (in this case, this method throws that same exception)
* @throws TestFailedException if the future is cancelled, expires, or is still not ready after
* the specified timeout has been exceeded
*/
final def futureValue(interval: Interval)(implicit config: PatienceConfig): T =
futureValue(PatienceConfig(config.timeout, interval.value))
/**
* Returns the result of this <code>FutureConcept</code>, once it is ready, or throws either the
* exception returned by the future (<em>i.e.</em>, <code>futureValue</code> returned a <code>Left</code>)
* or <code>TestFailedException</code>.
*
* <p>
* This trait's implementation of this method queries the future repeatedly until it either is
* ready, or a configured maximum amount of time has passed, sleeping a configured interval between
* attempts; and when ready, returns the future's value. For greater efficiency, implementations of
* this trait may override this method so that it blocks the specified timeout while waiting for
* the result, if the underlying future supports this.
* </p>
*
* <p>
* The maximum amount of time to wait for the future to become ready before giving up and throwing
* <code>TestFailedException</code> is configured by the <code>timeout</code> field of
* the <code>PatienceConfig</code> passed implicitly as the last parameter.
* The interval to sleep between queries of the future (used only if the future is polled) is configured by the <code>interval</code> field of
* the <code>PatienceConfig</code> passed implicitly as the last parameter.
* </p>
*
* <p>
* If the <code>eitherValue</code> method of the underlying Scala future returns a <code>scala.Some</code> containing a
* <code>scala.util.Failure</code> containing a <code>java.util.concurrent.ExecutionException</code>, and this
* exception contains a non-<code>null</code> cause, that cause will be included in the <code>TestFailedException</code> as its cause. The
* <code>ExecutionException</code> will be be included as the <code>TestFailedException</code>'s cause only if the
* <code>ExecutionException</code>'s cause is <code>null</code>.
* </p>
*
* @param config a <code>PatienceConfig</code> object containing <code>timeout</code> and
* <code>interval</code> parameters that are unused by this method
* @return the result of the future once it is ready, if <code>value</code> is defined as a <code>Right</code>
* @throws Throwable if once ready, the <code>value</code> of this future is defined as a
* <code>Left</code> (in this case, this method throws that same exception)
* @throws TestFailedException if the future is cancelled, expires, or is still not ready after
* the specified timeout has been exceeded
*/
def futureValue(implicit config: PatienceConfig): T = {
val st = Thread.currentThread.getStackTrace
val callerStackFrame =
if (!st(2).getMethodName.contains("futureValue"))
st(2)
else
st(3)
val methodName =
if (callerStackFrame.getFileName == "Futures.scala" && callerStackFrame.getMethodName == "whenReady")
"whenReady"
else if (callerStackFrame.getFileName == "Futures.scala" && callerStackFrame.getMethodName == "isReadyWithin")
"isReadyWithin"
else
"futureValue"
val adjustment =
methodName match {
case "whenReady" => 3
case "isReadyWithin" => 3
case _ => 0
}
val startNanos = System.nanoTime
@tailrec
def tryTryAgain(attempt: Int): T = {
val timeout = config.timeout
val interval = config.interval
if (thisFuture.isCanceled)
throw new TestFailedException(
sde => Some(Resources.futureWasCanceled),
None,
getStackDepthFun("Futures.scala", methodName, adjustment)
)
if (thisFuture.isExpired)
throw new TestFailedException(
sde => Some(Resources.futureExpired(attempt.toString, interval.prettyString)),
None,
getStackDepthFun("Futures.scala", methodName, adjustment)
)
thisFuture.eitherValue match {
case Some(Right(v)) => v
case Some(Left(tpe: TestPendingException)) => throw tpe
case Some(Left(tce: TestCanceledException)) => throw tce
case Some(Left(e)) if anExceptionThatShouldCauseAnAbort(e) => throw e
case Some(Left(ee: java.util.concurrent.ExecutionException)) if ee.getCause != null =>
val cause = ee.getCause
cause match {
case tpe: TestPendingException => throw tpe
case tce: TestCanceledException => throw tce
case e if anExceptionThatShouldCauseAnAbort(e) => throw e
case _ =>
throw new TestFailedException(
sde => Some {
if (cause.getMessage == null)
Resources.futureReturnedAnException(cause.getClass.getName)
else
Resources.futureReturnedAnExceptionWithMessage(cause.getClass.getName, cause.getMessage)
},
Some(cause),
getStackDepthFun("Futures.scala", methodName, adjustment)
)
}
case Some(Left(e)) =>
throw new TestFailedException(
sde => Some {
if (e.getMessage == null)
Resources.futureReturnedAnException(e.getClass.getName)
else
Resources.futureReturnedAnExceptionWithMessage(e.getClass.getName, e.getMessage)
},
Some(e),
getStackDepthFun("Futures.scala", methodName, adjustment)
)
case None =>
val duration = System.nanoTime - startNanos
if (duration < timeout.totalNanos)
SleepHelper.sleep(interval.millisPart, interval.nanosPart)
else {
throw new TestFailedException(
sde => Some(Resources.wasNeverReady(attempt.toString, interval.prettyString)),
None,
getStackDepthFun("Futures.scala", methodName, adjustment)
) with TimeoutField {
val timeout: Span = config.timeout
}
}
tryTryAgain(attempt + 1)
}
}
tryTryAgain(1)
}
}
/**
* Queries the passed future repeatedly until it either is ready, or a configured maximum
* amount of time has passed, sleeping a configured interval between attempts; and when ready, passes the future's value
* to the passed function.
*
* <p>
* The maximum amount of time to tolerate unsuccessful queries before giving up and throwing
* <code>TestFailedException</code> is configured by the value contained in the passed
* <code>timeout</code> parameter.
* The interval to sleep between attempts is configured by the value contained in the passed
* <code>interval</code> parameter.
* </p>
*
* <p>
* If the <code>eitherValue</code> method of the underlying Scala future returns a <code>scala.Some</code> containing a
* <code>scala.util.Failure</code> containing a <code>java.util.concurrent.ExecutionException</code>, and this
* exception contains a non-<code>null</code> cause, that cause will be included in the <code>TestFailedException</code> as its cause. The
* <code>ExecutionException</code> will be be included as the <code>TestFailedException</code>'s cause only if the
* <code>ExecutionException</code>'s cause is <code>null</code>.
* </p>
*
* @param future the future to query
* @param timeout the <code>Timeout</code> configuration parameter
* @param interval the <code>Interval</code> configuration parameter
* @param fun the function to which pass the future's value when it is ready
* @param config an <code>PatienceConfig</code> object containing <code>timeout</code> and
* <code>interval</code> parameters that are unused by this method
* @return the result of invoking the <code>fun</code> parameter
*/
final def whenReady[T, U](future: FutureConcept[T], timeout: Timeout, interval: Interval)(fun: T => U)(implicit config: PatienceConfig): U = {
val result = future.futureValue(PatienceConfig(timeout.value, interval.value))
fun(result)
}
// whenReady(future)(fun)(PatienceConfig(timeout.value, interval.value))
/**
* Queries the passed future repeatedly until it either is ready, or a configured maximum
* amount of time has passed, sleeping a configured interval between attempts; and when ready, passes the future's value
* to the passed function.
*
* <p>
* The maximum amount of time in milliseconds to tolerate unsuccessful queries before giving up and throwing
* <code>TestFailedException</code> is configured by the value contained in the passed
* <code>timeout</code> parameter.
* The interval to sleep between attempts is configured by the <code>interval</code> field of
* the <code>PatienceConfig</code> passed implicitly as the last parameter.
* </p>
*
* <p>
* If the <code>eitherValue</code> method of the underlying Scala future returns a <code>scala.Some</code> containing a
* <code>scala.util.Failure</code> containing a <code>java.util.concurrent.ExecutionException</code>, and this
* exception contains a non-<code>null</code> cause, that cause will be included in the <code>TestFailedException</code> as its cause. The
* <code>ExecutionException</code> will be be included as the <code>TestFailedException</code>'s cause only if the
* <code>ExecutionException</code>'s cause is <code>null</code>.
* </p>
*
* @param future the future to query
* @param timeout the <code>Timeout</code> configuration parameter
* @param fun the function to which pass the future's value when it is ready
* @param config an <code>PatienceConfig</code> object containing <code>timeout</code> and
* <code>interval</code> parameters that are unused by this method
* @return the result of invoking the <code>fun</code> parameter
*/
final def whenReady[T, U](future: FutureConcept[T], timeout: Timeout)(fun: T => U)(implicit config: PatienceConfig): U = {
val result = future.futureValue(PatienceConfig(timeout.value, config.interval))
fun(result)
}
// whenReady(future)(fun)(PatienceConfig(timeout.value, config.interval))
/**
* Queries the passed future repeatedly until it either is ready, or a configured maximum
* amount of time has passed, sleeping a configured interval between attempts; and when ready, passes the future's value
* to the passed function.
*
* <p>
* The maximum amount of time in milliseconds to tolerate unsuccessful attempts before giving up is configured by the <code>timeout</code> field of
* the <code>PatienceConfig</code> passed implicitly as the last parameter.
* The interval to sleep between attempts is configured by the value contained in the passed
* <code>interval</code> parameter.
* </p>
*
* @param future the future to query
* @param interval the <code>Interval</code> configuration parameter
* @param fun the function to which pass the future's value when it is ready
* @param config an <code>PatienceConfig</code> object containing <code>timeout</code> and
* <code>interval</code> parameters that are unused by this method
* @return the result of invoking the <code>fun</code> parameter
*/
final def whenReady[T, U](future: FutureConcept[T], interval: Interval)(fun: T => U)(implicit config: PatienceConfig): U = {
val result = future.futureValue(PatienceConfig(config.timeout, interval.value))
fun(result)
}
// whenReady(future)(fun)(PatienceConfig(config.timeout, interval.value))
/**
* Queries the passed future repeatedly until it either is ready, or a configured maximum
* amount of time has passed, sleeping a configured interval between attempts; and when ready, passes the future's value
* to the passed function.
*
* <p>
* The maximum amount of time in milliseconds to tolerate unsuccessful attempts before giving up is configured by the <code>timeout</code> field of
* the <code>PatienceConfig</code> passed implicitly as the last parameter.
* The interval to sleep between attempts is configured by the <code>interval</code> field of
* the <code>PatienceConfig</code> passed implicitly as the last parameter.
* </p>
*
* <p>
* If the <code>eitherValue</code> method of the underlying Scala future returns a <code>scala.Some</code> containing a
* <code>scala.util.Failure</code> containing a <code>java.util.concurrent.ExecutionException</code>, and this
* exception contains a non-<code>null</code> cause, that cause will be included in the <code>TestFailedException</code> as its cause. The
* <code>ExecutionException</code> will be be included as the <code>TestFailedException</code>'s cause only if the
* <code>ExecutionException</code>'s cause is <code>null</code>.
* </p>
*
*
* @param future the future to query
* @param fun the function to which pass the future's value when it is ready
* @param config an <code>PatienceConfig</code> object containing <code>timeout</code> and
* <code>interval</code> parameters that are unused by this method
* @return the result of invoking the <code>fun</code> parameter
*/
final def whenReady[T, U](future: FutureConcept[T])(fun: T => U)(implicit config: PatienceConfig): U = {
val result = future.futureValue(config)
fun(result)
}
}
| cheeseng/scalatest | scalatest/src/main/scala/org/scalatest/concurrent/Futures.scala | Scala | apache-2.0 | 33,320 |
package uk.gov.gds.ier.transaction.ordinary.dateOfBirth
import uk.gov.gds.ier.validation.{CountryValidator, ErrorTransformForm}
import play.api.mvc.Call
import play.api.templates.Html
import uk.gov.gds.ier.step.StepTemplate
import uk.gov.gds.ier.transaction.ordinary.InprogressOrdinary
import play.api.data.validation.Constraint
import uk.gov.gds.ier.model.Country
import uk.gov.gds.ier.service.ScotlandService
trait DateOfBirthMustache extends StepTemplate[InprogressOrdinary] {
case class DateOfBirthModel(
question:Question,
day: Field,
month: Field,
year: Field,
noDobReason: Field,
isScot: Boolean,
rangeFieldSet: FieldSet,
rangeUnder18: Field,
rangeOver75: Field,
range18to75: Field,
range14to15_YoungScot: Field,
range16to17_YoungScot: Field,
rangeOver18_YoungScot: Field,
rangeDontKnow: Field,
noDobReasonShowFlag: Text,
emailField: Field
) extends MustacheData
val scotlandService: ScotlandService
val mustache = MultilingualTemplate("ordinary/dateOfBirth") { implicit lang => (form, post) =>
implicit val progressForm = form
val country = (form(keys.country.residence).value, form(keys.country.origin).value) match {
case (Some("Abroad"), origin) => Country(origin.getOrElse(""), true)
case (residence, _) => Country(residence.getOrElse(""), false)
}
val emailAddress = form(keys.contact.email.detail).value
val postcode = form(keys.address.postcode).value.getOrElse("").toUpperCase
DateOfBirthModel(
question = Question(
postUrl = post.url,
errorMessages = Messages.translatedGlobalErrors(form),
title = Messages("ordinary_dob_title")
),
day = TextField(
key = keys.dob.dob.day
),
month = TextField(
key = keys.dob.dob.month
),
year = TextField(
key = keys.dob.dob.year
),
noDobReason = TextField(
key = keys.dob.noDob.reason
),
isScot = scotlandService.isScotByPostcodeOrCountry(postcode, country),
rangeFieldSet = FieldSet (
classes = if (form(keys.dob.noDob.range).hasErrors) "invalid" else ""
),
rangeUnder18 = RadioField(
key = keys.dob.noDob.range,
value = "under18"
),
range18to75 = RadioField(
key = keys.dob.noDob.range,
value = "18to75"
),
rangeOver75 = RadioField(
key = keys.dob.noDob.range,
value = "over75"
),
range14to15_YoungScot = RadioField(
key = keys.dob.noDob.range,
value = "14to15"
),
range16to17_YoungScot = RadioField(
key = keys.dob.noDob.range,
value = "16to17"
),
rangeOver18_YoungScot = RadioField(
key = keys.dob.noDob.range,
value = "over18"
),
rangeDontKnow = RadioField(
key = keys.dob.noDob.range,
value = "dontKnow"
),
noDobReasonShowFlag = Text (
value = progressForm(keys.dob.noDob.reason).value.map(noDobReason => "-open").getOrElse("")
),
emailField = TextField(
key = keys.contact.email.detail,
default = emailAddress
)
)
}
}
| alphagov/ier-frontend | app/uk/gov/gds/ier/transaction/ordinary/dateOfBirth/DateOfBirthMustache.scala | Scala | mit | 3,222 |
package org.camunda.feel.api.context
import org.camunda.feel.context.VariableProvider
case class SimpleTestContext(context: Map[String, _]) extends VariableProvider {
override def getVariable(name: String): Option[Any] = {
if (context.contains(name)) {
Some(context.get(name))
} else {
None
}
}
override def keys: Iterable[String] = context.keys
}
| camunda/feel-scala | src/test/scala/org/camunda/feel/api/context/SimpleTestContext.scala | Scala | apache-2.0 | 383 |
package monocle.law.function
import monocle.function._
import monocle.law.{IsoLaws, OptionalLaws, PrismLaws, TraversalLaws}
import org.scalacheck.Prop._
import org.scalacheck.{Arbitrary, Properties}
import scalaz.Equal
import scalaz.std.anyVal._
import scalaz.std.tuple._
import scalaz.syntax.equal._
/**
* Laws that Optics for a sequence like data structure should satisfy
*/
object SequenceLaws {
def apply[S, A](implicit aEq: Equal[A], aArb: Arbitrary[A],
sEq: Equal[S], sArb: Arbitrary[S],
evEmpty: Empty[S],
evReverse: Reverse[S, S],
evCons: Cons[S, A],
evSnoc: Snoc[S, A],
evEach: Each[S, A],
evIndex: Index[S, Int, A],
evFilterIndex: FilterIndex[S, Int, A]) = new Properties("Sequence") {
include(IsoLaws(reverse[S, S]))
include(PrismLaws(empty[S]))
include(PrismLaws(cons[S, A]))
include(PrismLaws(snoc[S, A]))
include(OptionalLaws(index(2)))
include(TraversalLaws(filterIndex[S, Int, A](_ % 2 == 0)))
include(TraversalLaws(each[S, A]))
property("cons == snoc . reverse") = forAll { as: List[A] =>
as.foldRight(_empty[S])(_cons) === as.foldLeft(_empty[S])(_snoc)
}
}
}
| CapeSepias/Monocle | law/src/main/scala/monocle/law/function/SequenceLaws.scala | Scala | mit | 1,350 |
package _99Problems.WorkingWithLists
/**
* Created by dan.dixey on 27/06/2017.
*/
object P28 {
/**
* Sorting a list of lists according to length of sublists.
*/
def lsort[T](xa: List[List[T]]): List[List[T]] =
xa.sortWith((l1, l2) => l1.length - l2.length < 0)
/**
* Again, we suppose that a list (InList) contains elements that are lists themselves.
* But this time the objective is to sort the elements of InList according to their length frequency;
* i.e. in the default, where sorting is done ascending order, lists with rare lengths
* are placed first, others with a more frequent length come later.
*/
def lsortFreq[T](list: List[List[T]]): List[List[T]] =
lsort(list)
.groupBy(l => l.length)
.values
.toList
.sortWith((l1, l2) => l1.length - l2.length < 0)
.flatten
}
| dandxy89/LearningScala | src/main/scala/_99Problems/WorkingWithLists/P28.scala | Scala | mit | 868 |