nmap scan

掃同網段有那些 IP (用 ping 的)

nmap -sP 192.168.1.0/24
nmap -sP 192.168.1.1-254

OS

nmap -O 192.168.1.1

Port

nmap 192.168.1.1 -p21

全面掃描

nmap -A -T4 192.168.1.1

只列出網段清單

nmap -sL 192.168.0/24

TCP SYN

nmap -sS 192.168.1.1

TCP connect

nmap -sT 192.168.1.1

UDP

nmap -sU 192.168.1.1

tor

prep

apt -y install tor deb.torproject.org-keyring
apt -y install torsocks
apt -y install socat
apt -y install expect

configuration

/etc/tor/torrc #may not need to modify it.

service up

service tor start

torsocks :test

torsocks ssh bbsu@ptt.cc -p 22

socat :test

socat TCP-LISTEN:4141 SOCKS4A:localhost:ptt.cc:22,socksport=9050 &
netstat -antup|grep socat #上一行使用9050的tor在4141建立起socket導向到ptt.cc:22
ssh bbsu@localhost -p 4141
netstat -antup|grep socat #port4141用後即毀

expectPtt.sh

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
#!/usr/bin/expect
set BBS_ID "tabenyaki"
set BBS_PW "taroyaki"
set timeout 60
#log_file -noappend $env(HOME)/expectPtt.log
log_file $env(HOME)/expectPtt.log
#spawn ssh -oBatchMode=no -oStrictHostKeyChecking=no bbsu@ptt.cc
spawn ssh -oBatchMode=no -oStrictHostKeyChecking=no bbsu@localhost -p 4141
expect {
"請輸入代號" { send "$BBS_ID\r" ; exp_continue }
"請輸入您的密碼" { send "$BBS_PW\r" ; exp_continue }
"您想刪除其他重複登入的連線嗎" { send "N\r" ; exp_continue }
"您要刪除以上錯誤嘗試的記錄嗎" { send "N\r" ; exp_continue }
"任意鍵繼續" { send "q\r" ; exp_continue }
"密碼不對喔" { exit }
"裡沒有這個人啦" { exit }
"請勿頻繁登入以免造成系統過度負荷" { send "\r" ; exp_continue }
"請按任意鍵繼續" { send "\r" ; exp_continue }
"oodbye" { interact }
"批踢踢實業坊" {exit}
}
exit

cron and log

socatPttDaily.sh

1
2
3
4
5
6
#!/bin/sh
socat TCP-LISTEN:4141 SOCKS4A:localhost:ptt.cc:22,socksport=9050 &
netstat -antup|grep socat
#ssh bbsu@localhost -p 4141
#netstat -antup|grep socat
/home/tabenyaki/expectPtt.sh

crontab -e

1
* */3 * * * /home/tabenyaki/socatPttDaily.sh

crontab -l
cat /var/spool/cron/crontabs/tabenyaki

cat /var/log/cron.log
grep CRON /var/log/syslog

intellij idea CE for scala

downlad

https://www.jetbrains.com/idea/download/
version 2016.2.4
Community Edition for free. Ultimate Edition 30 days trial

tune option

C:\Program Files (x86)\JetBrains\IntelliJ IDEA Community Edition 2016.2.2\bin\idea.exe.vmoptions
or for x64
C:\Program Files (x86)\JetBrains\IntelliJ IDEA Community Edition 2016.2.2\bin\idea64.exe.vmoptions

  1. increase xms and xmx
    -Xms1128m
    -Xmx2750m

change keymap

File>Settings> search keymap
Change it from Default to Eclipse if you like.

disable folding

File>Setting> search fold
uncheck “Show code folding outline”

ubuntu disable ipv6, avahi-daemon

ipv6沒在用還開著可能使網路變慢
sudo vi /etc/sysctl.conf

1
2
3
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1

sudo sysctl -p #立即套用設定
reboot

ip a | grep inet6 #沒內容就已經關閉ipv6了

disable avahi-daemon

透過 Zeroconf 自動的分析與管理網路。 可以關閉他
systemctl list-units —all | grep avahi-daemon
systemctl stop avahi-daemon.service
systemctl stop avahi-daemon.socket
systemctl disable avahi-daemon.service avahi-daemon.socket
netstat -atunp

zookeeper3.4.8, scala2.11 and kafka0.10.0.1 on ubuntu16.04-server-amd64

zookeeper 3.4,8

#see http://zookeeper.apache.org/releases.html
wget http://apache.stu.edu.tw/zookeeper/zookeeper-3.4.8/zookeeper-3.4.8.tar.gz
tar zxvf zookeeper-3.4.8.tar.gz
mv zookeeper-3.4.8 /usr/local/zookeeper
cd /usr/local/zookeeper

configuration

vi conf/zoo.cfg

1
2
3
4
5
tickTime=2000
dataDir=/usr/local/zookeeper/data
clientPort=2181
initLimit=5
syncLimit=2
start & status

bin/zkServer.sh start
bin/zkServer.sh status
netstat|grep 2181
jps

use

bin/zkCli.sh

1
2
ls /
quit
stop

bin/zkServer.sh stop

scala 2.11

apt-get isntall scala #2.11.6-6
scala -version

kafka 0.10.0.1

wget http://apache.stu.edu.tw/kafka/0.10.0.1/kafka_2.11-0.10.0.1.tgz
tar zxvf kafka_2.11-0.10.0.1.tgz
mv kafka_2.11-0.10.0.1 /usr/local/kafka
cd /usr/local/kafka

start

netstat|grep 2181
netstat|grep 9092
jps

bin/zookeeper-server-start.sh config/zookeeper.properties #自帶zookeeper,可以不用自己裝。
bin/kafka-server-start.sh config/server.properties
netstat|grep 2181
netstat|grep 9092
jps
QuorumPeerMain
kafka
jps

use

bin/kafka-topics.sh --zookeeper localhost:2181 --list #list all topics
bin/kafka-topics.sh --zookeeper localhost:2181 --create --replication-factor 1 --partitions 1 --topic hellokafka #create topic hellokafka
bin/kafka-topics.sh --zookeeper localhost:2181 --list #list all topics
bin/kafka-console-producer.sh --broker-list localhost:9092 -topic hellokafka #input some messages into topic hellokafka for test
bin/kafka-console-consumer.sh --zookeeper localhost:2181 -topic hellokafka --from-beginning #retrieve messages from topic hellokafka

stop

bin/zookeeper-server-stop.sh
bin/kafka-server-stop.sh

oracle-java8, hadoop2.6.4, hbase0.98.21 and mysql5.7 on ubuntu16.04-amd64

oracle-java 8

apt-get update
apt-get upgrade
#apt-get install default-jdk default-jre #openjdk-8*也可以

apt-get install software-properties-common # if ubuntu16-server-amd64
add-apt-repository ppa:webupd8team/java
apt-get update
#apt-get install oracle-java6-installer
#apt-get install oracle-java7-installer
apt-get install oracle-java8-installer

update-alternatives --config java
vi /etc/environment
JAVA_HOME=”/usr/lib/jvm/java-8-oracle”
source /etc/environment
echo $JAVA_HOME
java -version

prepare hduser:group

addgroup hadoop
adduser --ingroup hadoop hduser
adduser hduser sudo #allow sudo
sudo su hduser
sudo apt-get install openssh-server ssh
ssh-keygen -t rsa
cat ~/.ssh/id-rsa.pub >> ~/.ssh/authorized_keys

hadoop 2.6.4

disable ipv6 #Hadoop doesn’t work on IPv6
wget http://apache.stu.edu.tw/hadoop/common/hadoop-2.6.4/hadoop-2.6.4.tar.gz
tar zxvf hadoop-2.6.4.tar.gz -C /usr/local
cd /usr/local
mv hadoop-2.6.4 hadoop
chown hduser:hadoop -R /usr/local/hadoop
cd /usr/local/hadoop
vi ~/.bashrc

1
2
3
4
5
6
7
8
9
10
11
12
#HADOOP VARIABLES START
export JAVA_HOME=/usr/lib/jvm/java-8-oracle
export HADOOP_HOME=/usr/local/hadoop
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"
#HADOOP VARIABLES END

source ~/.bashrc

cd /usr/local/hadoop/etc/hadoop/

vi /usr/local/hadoop/etc/hadoop/hadoop-env.sh

1
export JAVA_HOME=/usr/lib/jvm/java-8-oracle

vi /usr/local/hadoop/etc/hadoop/core-site.xml

1
2
3
4
5
6
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:9000</value>
</property>
</configuration>

cp mapred-site.xml.template mapred-site.xml
vi /usr/local/hadoop/etc/hadoop/mapred-site.xml

1
2
3
4
5
6
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>

vi /usr/local/hadoop/etc/hadoop/hdfs-site.xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.name.dir</name>
<value>file:///home/hduser/hadoopinfra/hdfs/namenode</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>file:///home/hduser/hadoopinfra/hdfs/datanode</value>
</property>
</configuration>

vi /usr/local/hadoop/etc/hadoop/yarn-site.xml

1
2
3
4
5
6
7
8
9
10
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>
init

bin/hdfs namenode -format

start

sbin/start-dfs.sh
sbin/start-yarn.sh
bin/hadoop versions
jps
netstat -plten |grep java

use

bin/hadoop fs -df
bin/hadoop fs -ls /
sudo -u hduser bin/hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.4.jar pi 2 5
#sudo -u hduser bin/hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.4.jar pi 16 10000000

web ui

http://192.168.3.204:50070/ #hadoop services
http://192.168.3.204:8088/cluster #hadoop applications

stop

sbin/stop-yarn.sh
sbin/stop-dfs.sh

hbase 0.98.21

wget http://apache.stu.edu.tw/hbase/0.98.21/hbase-0.98.21-hadoop2-bin.tar.gz
tar zxvf hbase-0.98.21-hadoop2-bin.tar.gz -C /usr/local
cd /usr/local
mv hbase-0.98.21-hadoop2 hbase
chown hduser:hadoop -R /usr/local/hbase
cd /usr/local/hbase
vi conf/hbase-site.xml
hbase.rootdir=file:///home/hduser/hbase/hfiles or hdfs://localhost:9000/hbase
hbase.zookeeper.property.dataDir=/home/hduser/zookeeper
其它hbase.zookeeper相關改port什麼的就隨意

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
<property>
<name>hbase.rootdir</name>
<value>file:///home/hduser/hbase/hfiles</value>
<!--
<value>hdfs://localhost:8030/hbase</value>
-->

</property>

<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/home/hduser/zookeeper</value>
</property>
<property>
<name>hbase.zookeeper.peerport</name>
<value>2889</value>
</property>
<property>
<name>hbase.zookeeper.leaderport</name>
<value>2889</value>
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2182</value>
</property>

vi /usr/local/hbase/conf/hbase-env.sh

1
2
export JAVA_HOME=/usr/lib/jvm/java-8-oracle
export HBASE_HOME=/usr/local/hbase
start

bin/start-hbase.sh #export HBASE_MANAGES_ZK=true #控制使用內建的zookeeper

use

jps
bin/hbase shell

1
2
3
4
5
6
7
8
9
status
list
create 'test','cf'
is_enabled 'test'
put 'test','rowkey1','cf:name','alex'
put 'test','rowkey2','cf:name','banana'
put 'test','rowke31','cf:nickname','charlie'
scan 'test'
exit
web ui

http://192.168.3.204:60010/

stop

bin/stop-hbase.sh

logs

tail logs/hbase-hduser-master-ubuntu.out
tail logs/hbase-hduser-zookeeper.ubuntu.out

mysql 5.7

apt -y update
apt -y upgrade
apt -y install mysql-server mysql-client #mysql5.7
mysql_secure_installation
systemctl status mysql
service mysql status
mysql -u root -p #enter password

1
2
3
4
5
6
7
8
status
create database test;
grant all on test.* to 'leon' identified by 'Pw12345678';
select * from mysql.user;
use test;
show tables;
show databases;
quit

vi /etc/mysql/mysql.conf.d/mysqld.cnf

Package Project into JAR in Eclipse

印象中早期eclipse提供的jar packing功能很陽春的,就classes下包成jar檔,最多再附個META-INF/MANIFEST.MF。
最近在找比較方便,可以連dependencies都一起打包的機制。結果一查,其實新版(Mars2)已經可以連dependencies一起打包了,不需要用上maven。

File/Export > Runnable JAR file

1. Extract required libraries into genereated JAR

可能是最常見常用的打包方式。
所有dependenies統統解開然後跟build出來的class檔,混在一起包成JAR檔。優點不用說就是狂,簡單粗暴。缺點是META-INF/路徑下混合了所有dependenies解開後的META-INF資料夾,相對混亂,也可能會造成重複檔案覆蓋的問題。

2. Package required libraries into generated JAR

這個跟下面的3有點像,但是把所有dependenies放在jar檔內的根目錄下。
可是jar in jar是無法被java classpath的預設機制使用到的,所以META-INF/MANIFEST.MF裡Main-Class會指定成eclipse提供的org.eclipse.jdt.internal.jarinjarloader.JarRsrcLoader及加上Rsrc*設定來載入jar檔並啟動main class。
雖然這是個人覺得比較漂亮的打包,但就像上面說的;多了一層eclipse客制的機制還是讓人覺得有點阿雜。

3. Copy required libraries into sub-folder next to generated JAR

在包出來的JAR檔所在目錄下會建一個跟JAR檔同名的目錄,裡面放相依的dependenies。
JAR檔內的META-INF/MANIFEST.MF裡的Class-Path會指定那些dependenies的相依路徑。只要相對路徑不變,就可以直接用java指定跑程式,缺點就是搬移jar時多一個外部folder要拷貝。

windows tips

uptime

net statistics server
或是開windows task manager(工作管理員) > perforamnce(效能) > up time (存留時間)

linux
uptime

installation date

systeminfo | find “Original”
systeminfo | find /i “Original”
systeminfo | find “System Boot Time:”
systeminfo | find /i “install date”
C:>wmic os get installdate
InstallDate
20141213185722.000000+480

linux
root@ubuntu:~# ls -ld /var/log/installer
drwxr-xr-x 3 root root 4096 Aug 16 09:17 /var/log/installer

jenkins on docker

docker pull jenkins #2016aug24 目前pull到的是2.7.2 LTS @see https://jenkins-ci.org https://jenkins.io

prepare

mkdir /home/jenkins_dir
chown 1000:docker /home/jenkins_dir
docker run -d —name jenkins -p 8080:8080 -v /home/jenkins_dir:/var/jenkins_home jenkins

第一次啟動,使用initialAdminPassword

cat /home/jenkins_dir/secrets/initialAdminPassword

docker logs jenkins

web ui

http://192.168.3.204:8080/ #第一次啟動使用initialAdminPassword後,會要你選要安裝的plugins

裝完plugins後。建立user account

coscup2016

司改會

全民司法改革運動
小市民權益保護99招

搭配cassandra跟kafka這種底層用commitlog機制創造scale-out可能性的工具去理解

萬事萬物皆是log

這是曾在linkedin任職Principal Staff Engineer使用kafka建立以log為系統架構核心的Jay Kreps所推行,有別於lambda architecture的kappa architecture概念。
more
https://medium.com/@poga/萬事萬物皆是-log-參考資料-340737133a94
https://videlalvaro.github.io/2015/12/learning-about-distributed-systems.html
http://www.confluent.io/blog/turning-the-database-inside-out-with-apache-samza/
https://www.oreilly.com/ideas/questioning-the-lambda-architecture
http://milinda.pathirage.org/kappa-architecture.com/

https://loveforprogramming.quora.com/Distributed-Systems-Part-1-A-peek-into-consistent-hashing
https://loveforprogramming.quora.com/Distributed-Systems-Part-2-Consistency-versus-Availability-A-Pragmatic-Example
https://loveforprogramming.quora.com/Distributed-Systems-Part-3-Managing-Anti-Entropy-using-Merkle-Trees

DevOps工具箱 介紹不少實用工具

OpenSource 的 DevOps工具箱
hackpad