@Test public void shareSingleSparkContext() throws InterruptedException { // create another SparkInterpreter SparkInterpreter repl2 = new SparkInterpreter(getSparkTestProperties()); repl2.setInterpreterGroup(intpGroup); intpGroup.get("note").add(repl2); repl2.open(); assertEquals( Code.SUCCESS, repl.interpret("print(sc.parallelize(1 to 10).count())", context).code()); assertEquals( Code.SUCCESS, repl2.interpret("print(sc.parallelize(1 to 10).count())", context).code()); repl2.close(); }
@Test public void testZShow() { String code = ""; repl.interpret("case class Person(name:String, age:Int)\n", context); repl.interpret( "val people = sc.parallelize(Seq(Person(\"moon\", 33), Person(\"jobs\", 51), Person(\"gates\", 51), Person(\"park\", 34)))\n", context); if (getSparkVersionNumber() < 13) { repl.interpret("people.registerTempTable(\"people\")", context); code = "z.show(sqlc.sql(\"select * from people\"))"; } else { code = "z.show(people.toDF)"; } assertEquals(Code.SUCCESS, repl.interpret(code, context).code()); }
@Test public void testReferencingUndefinedVal() { InterpreterResult result = repl.interpret( "def category(min: Int) = {" + " if (0 <= value) \"error\"" + "}", context); assertEquals(Code.ERROR, result.code()); }
@Test public void testBasicIntp() { assertEquals( InterpreterResult.Code.SUCCESS, repl.interpret("val a = 1\nval b = 2", context).code()); // when interpret incomplete expression InterpreterResult incomplete = repl.interpret("val a = \"\"\"", context); assertEquals(InterpreterResult.Code.INCOMPLETE, incomplete.code()); assertTrue(incomplete.message().length() > 0); // expecting some error // message /* * assertEquals(1, repl.getValue("a")); assertEquals(2, repl.getValue("b")); * repl.interpret("val ver = sc.version"); * assertNotNull(repl.getValue("ver")); assertEquals("HELLO\n", * repl.interpret("println(\"HELLO\")").message()); */ }
@Test public void testCreateDataFrame() { if (getSparkVersionNumber() >= 13) { repl.interpret("case class Person(name:String, age:Int)\n", context); repl.interpret( "val people = sc.parallelize(Seq(Person(\"moon\", 33), Person(\"jobs\", 51), Person(\"gates\", 51), Person(\"park\", 34)))\n", context); repl.interpret("people.toDF.count", context); assertEquals( new Long(4), context .getResourcePool() .get( context.getNoteId(), context.getParagraphId(), WellKnownResourceName.ZeppelinReplResult.toString()) .get()); } }
@Test public void testSparkSql() { repl.interpret("case class Person(name:String, age:Int)\n", context); repl.interpret( "val people = sc.parallelize(Seq(Person(\"moon\", 33), Person(\"jobs\", 51), Person(\"gates\", 51), Person(\"park\", 34)))\n", context); assertEquals(Code.SUCCESS, repl.interpret("people.take(3)", context).code()); if (getSparkVersionNumber() <= 11) { // spark 1.2 or later does not allow create multiple SparkContext in the same jvm // by default. // create new interpreter SparkInterpreter repl2 = new SparkInterpreter(getSparkTestProperties()); repl2.setInterpreterGroup(intpGroup); intpGroup.get("note").add(repl2); repl2.open(); repl2.interpret("case class Man(name:String, age:Int)", context); repl2.interpret( "val man = sc.parallelize(Seq(Man(\"moon\", 33), Man(\"jobs\", 51), Man(\"gates\", 51), Man(\"park\", 34)))", context); assertEquals(Code.SUCCESS, repl2.interpret("man.take(3)", context).code()); repl2.close(); } }
@Test public void testEnableImplicitImport() { if (getSparkVersionNumber() >= 13) { // Set option of importing implicits to "true", and initialize new Spark repl Properties p = getSparkTestProperties(); p.setProperty("zeppelin.spark.importImplicit", "true"); SparkInterpreter repl2 = new SparkInterpreter(p); repl2.setInterpreterGroup(intpGroup); intpGroup.get("note").add(repl2); repl2.open(); String ddl = "val df = Seq((1, true), (2, false)).toDF(\"num\", \"bool\")"; assertEquals(Code.SUCCESS, repl2.interpret(ddl, context).code()); repl2.close(); } }
@Test public void testDisableImplicitImport() { if (getSparkVersionNumber() >= 13) { // Set option of importing implicits to "false", and initialize new Spark repl // this test should return error status when creating DataFrame from sequence Properties p = getSparkTestProperties(); p.setProperty("zeppelin.spark.importImplicit", "false"); SparkInterpreter repl2 = new SparkInterpreter(p); repl2.setInterpreterGroup(intpGroup); intpGroup.get("note").add(repl2); repl2.open(); String ddl = "val df = Seq((1, true), (2, false)).toDF(\"num\", \"bool\")"; assertEquals(Code.ERROR, repl2.interpret(ddl, context).code()); repl2.close(); } }
@Test public void testEndWithComment() { assertEquals( InterpreterResult.Code.SUCCESS, repl.interpret("val c=1\n//comment", context).code()); }
@Test public void testNextLineCompanionObject() { String code = "class Counter {\nvar value: Long = 0\n}\n // comment\n\n object Counter {\n def apply(x: Long) = new Counter()\n}"; assertEquals(InterpreterResult.Code.SUCCESS, repl.interpret(code, context).code()); }
@Test public void testNextLineComments() { assertEquals( InterpreterResult.Code.SUCCESS, repl.interpret("\"123\"\n/*comment here\n*/.toInt", context).code()); }
@Test public void testNextLineInvocation() { assertEquals(InterpreterResult.Code.SUCCESS, repl.interpret("\"123\"\n.toInt", context).code()); }