大数据学习-scala(1)
来源:互联网 发布:知乎创意体验馆 编辑:程序博客网 时间:2024/05/14 00:29
1.scala是纯面向对象的
如1,在java中是基本类型,不是对象,
1在scala中是对象,有方法
2.scala有函数
3.val定义不可变变量
4.var定义可变变量
var name = "Scala"
name = "Spark"
5.没有指定类型,可以手动指定类型
val age :Int = 2;
6.多个相同变量
var age1,age2,age3 = 0;
7.to
0.t0(5)
自动进行隐式转换
8.+号是方法
1.+(1)
9.scala中没有++,--
10.条件判断
val age = 19
age: Int = 19
scala> if(age >= 18) "adult"
11.scala的最后一行就是返回值
12.打印输出println("spark")
13.print("\n spark")
14.printf()
15.readLine
16.if else
17.while
18.for(i<-0 to element)println(i)
19.定义函数
进入paste模式
:paste
val n = 10
def f1:Int = {
for(i<-1 to 10){
if(i==n)return i
println(i)
}
return 0
}
// Exiting paste mode, now interpreting.
n: Int = 10
f1: Int
scala> f1
1
2
3
4
5
6
7
8
9
res0: Int = 10
scala> println _
res1: () => Unit = <function0>
scala> def f2 = println
f2: Unit
scala> f2("good")
<console>:9: error: Unit does not take parameters
f2("good")
^
scala> def f3(param1 : String,param2:int=20) = param1+param2
<console>:7: error: not found: type int
def f3(param1 : String,param2:int=20) = param1+param2
^
scala> def f3(param1 : String,param2:Int=20) = param1+param2
f3: (param1: String, param2: Int)String
scala> f3("spark")
res3: String = spark20
scala> f3(param2=100,param1="scala")
res4: String = scala100
sum (1 to 100: _*)
_*把里面的每个元素提取出来
scala> def sum(numbers : Int*)={var result =0; for(element <- numbers) result += element;result}
sum: (numbers: Int*)Int
scala> sum(1,2,3,4,5,6,7,8)
res5: Int = 36
scala> sum (1 to 100: _*)
res6: Int = 5050
scala> def morning(content: String) = "Good " + content
morning: (content: String)String
scala> morning("good")
res7: String = Good good
scala> def morning(content: String) {println( "Good " + content)}
morning: (content: String)Unit
scala> morning("luck")
Good luck
scala>
过程就是没有结果,只进行计算
scala> import scala.io.Source._
import scala.io.Source._
scala> lazy val contecnt = fromFile("/root/adsfa")
contecnt: scala.io.BufferedSource = <lazy>
20.懒加载和打印文件
scala> val contecnt = fromFile("/root/adsfa")
java.io.FileNotFoundException: /root/adsfa (没有那个文件或目录)
at java.io.FileInputStream.open(Native Method)
at java.io.FileInputStream.<init>(FileInputStream.java:146)
at scala.io.Source$.fromFile(Source.scala:90)
at scala.io.Source$.fromFile(Source.scala:75)
at scala.io.Source$.fromFile(Source.scala:53)
scala> lazy val contecnt = fromFile("/hadoop/hivedata/a.txt").mkString
contecnt: String = <lazy>
scala> val content = fromFile("/hadoop/hivedata/a.txt").mkString
content: String =
1 w
3 r
5 e
5 v
21.try catch finally
scala> lazy val contecnt = fromFile("/hadoop/hivedata/a.txt").mkString
contecnt: String = <lazy>
scala> val content = fromFile("/hadoop/hivedata/a.txt").mkString
content: String =
1 w
3 r
5 e
5 v
22.array数组
scala> val arr = new Array[Int](5)
arr: Array[Int] = Array(0, 0, 0, 0, 0)
scala> arr(3)
res10: Int = 0
scala> arr(3)=5
scala> arr(3)
res12: Int = 5
scala> arr
res13: Array[Int] = Array(0, 0, 0, 5, 0)
scala> val arr2 = Array("Scala","Spark")
arr2: Array[String] = Array(Scala, Spark)
scala> arr2(1)
res14: String = Spark
scala> arr2(0)
res15: String = Scala
scala> import scala.collection.mutable.Array
ArrayBuffer ArrayBuilder ArrayLike ArrayOps ArraySeq ArrayStack
scala> import scala.collection.mutable.ArrayBu
ArrayBuffer ArrayBuilder
scala> import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.ArrayBuffer
scala> val arrBuffer = ArrayBuffer[Int]()
arrBuffer: scala.collection.mutable.ArrayBuffer[Int] = ArrayBuffer()
scala> arrBuffer +=10
res16: arrBuffer.type = ArrayBuffer(10)
scala> arrBuffer
res17: scala.collection.mutable.ArrayBuffer[Int] = ArrayBuffer(10)
scala> arrBuffer +=(2 to 10)
<console>:13: error: type mismatch;
found : scala.collection.immutable.Range.Inclusive
required: Int
arrBuffer +=(2 to 10)
^
scala> arrBuffer +=(2 to 10 _*)
<console>:13: error: _ must follow method; cannot follow Int(10)
arrBuffer +=(2 to 10 _*)
^
scala> arrBuffer +=(2,5,6 )
res20: arrBuffer.type = ArrayBuffer(10, 2, 5, 6)
scala> arrBuffer ++= Array(5,6,7)
res21: arrBuffer.type = ArrayBuffer(10, 2, 5, 6, 5, 6, 7)
scala> arrBuffer.trimEnd(7)
scala> arrBuffer
res23: scala.collection.mutable.ArrayBuffer[Int] = ArrayBuffer()
scala> arrBuffer
res24: scala.collection.mutable.ArrayBuffer[Int] = ArrayBuffer()
scala> arrBuffer +=10
res25: arrBuffer.type = ArrayBuffer(10)
scala> arrBuffer +=(5,6,7)
res26: arrBuffer.type = ArrayBuffer(10, 5, 6, 7)
scala> arrBuffer.insert(2,333)
scala> arrBuffer
res28: scala.collection.mutable.ArrayBuffer[Int] = ArrayBuffer(10, 5, 333, 6, 7)
scala> arrBuffer.insert(3,333,555,666,777)
scala> arrBuffer
res30: scala.collection.mutable.ArrayBuffer[Int] = ArrayBuffer(10, 5, 333, 333, 555, 666, 777, 6, 7)
scala>
scala> arrBuffer.remove(2)
res31: Int = 333
scala> val arr2 = arr
arr arr2 arrBuffer
scala> val arr2 = arrBuffer.toArray
arr2: Array[Int] = Array(10, 5, 333, 555, 666, 777, 6, 7)
scala> arr2.toBuffer
res32: scala.collection.mutable.Buffer[Int] = ArrayBuffer(10, 5, 333, 555, 666, 777, 6, 7)
scala> for(elem <- arr2) println(elem)
10
5
333
555
666
777
6
7
scala> for(i <- 1 until(arr2.length,1)) println(arr2(i))
5
333
555
666
777
6
7
scala> for(i <- 1 until(arr2.length,2)) println(arr2(i))
5
555
777
7
scala> arr2.sum
res37: Int = 2359
scala> scala.util.Sorting.quickSort(arr2)
scala> arr2
res40: Array[Int] = Array(5, 6, 7, 10, 333, 555, 666, 777)
scala> arr2.mkString
res41: String = 56710333555666777
scala> arr2.mkString(" ,")
res42: String = 5 ,6 ,7 ,10 ,333 ,555 ,666 ,777
scala> val arr3 = for(i <- arr2) yield i*i
arr3: Array[Int] = Array(25, 36, 49, 100, 110889, 308025, 443556, 603729)
scala> val arr3 = for(i <- arr2 if(i%3 ==0)) yield i*i
arr3: Array[Int] = Array(36, 110889, 308025, 443556, 603729)
scala> arr2.filter{_%3 ==0}map{i =>i*i}
res43: Array[Int] = Array(36, 110889, 308025, 443556, 603729)
23.map
scala> val persons = Map("spark" ->6,"hadoop" -> 11)
persons: scala.collection.immutable.Map[String,Int] = Map(spark -> 6, hadoop -> 11)
scala> persons(spark)
<console>:13: error: not found: value spark
persons(spark)
^
scala> persons("spark")
res45: Int = 6
scala> val persons = scala.collection.mutable.Map("spark" ->6,"hadoop" -> 11)
persons: scala.collection.mutable.Map[String,Int] = Map(hadoop -> 11, spark -> 6)
scala> persons +=("flink" -> 5)
res47: persons.type = Map(hadoop -> 11, spark -> 6, flink -> 5)
scala> val sparkValue = persons.getOrElse("spark",1000)
sparkValue: Int = 6
scala> for(key <- persons.keySet) println(key +":")
hadoop:
spark:
flink:
scala> val persons =scala.collection.immutable.SortedMap("spark" ->6,"hadoop" -> 11)
persons: scala.collection.immutable.SortedMap[String,Int] = Map(hadoop -> 11, spark -> 6)
scala> persons
res49: scala.collection.immutable.SortedMap[String,Int] = Map(hadoop -> 11, spark -> 6)
0 0
- 大数据学习-scala(1)
- Scala 学习大数据
- 大数据系列-Scala学习1
- 大数据学习-scala作业(2)
- 大数据学习之Scala中数据类型(3)
- 大数据学习之Scala的环境搭建和Hello World程序的实现(1)
- 大数据工程师为什么要学习scala
- 大数据系列-Scala学习2
- 大数据系列-Scala学习3
- 大数据系列-Scala学习4
- 大数据系列修炼-Scala课程96(1)
- 大数据学习之Scala中列表(List)的使用学习(5)
- 大数据学习之Scala中main函数的分析以及基本规则(2)
- Scala学习到精通是spark大数据
- 学习大数据的第一步-搭建Scala开发环境,以及使用Intellij IDEA开发Scala程序
- 学习大数据的第一步-搭建Scala开发环境,以及使用Intellij IDEA开发Scala程序
- 学习大数据的第一步-搭建Scala开发环境,以及使用Intellij IDEA开发Scala程序
- 大数据 spark scala语言
- ORA-03113: End-of-file on Communication Channel Upon Startup of Database
- MySQL Replication 常用架构
- spring01 环境搭建、控制反转、依赖注入、springIOC和DI实现MVC模式
- 基于HBase做Storm 实时计算指标存储
- PhoneWindow,ViewRoot,Activity之间的大致关系 - 3
- 大数据学习-scala(1)
- Swift开发IOS-UILabel
- Programming Computer Vision with Python (学习笔记十二)
- 多种方法修改Mysql root密码
- HTML5标准程序--画一个半圆
- 技术合伙人之殇
- sql server 2008 创建作业执行失败
- spring02 注解方式实现MVC、spring的继承、代理模式(静/动) :jdk动态代理,cglib动态代理
- 如何基于Yarn开发你的分布式程序