From 015ab7fdf3cdfbce839decb5fe76f0f5cd7b55d8 Mon Sep 17 00:00:00 2001 From: "duwenkai@antiy.cn" Date: Wed, 15 May 2019 13:41:15 +0800 Subject: [PATCH] =?UTF-8?q?ADD:=20=E6=B7=BB=E5=8A=A0k8s=E7=9B=B8=E5=85=B3?= =?UTF-8?q?=E5=AD=A6=E4=B9=A0=E8=AE=B0=E5=BD=95.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- DevOps/Kubernetes/DockerPlayground.md | 309 ++++++++++++++++++ .../Kubernetes \346\246\202\345\277\265.md" | 26 +- .../Kubernetes-\345\216\237\347\220\206.md" | 197 +++++++++++ .../Q&A/kubeadm\344\273\243\347\220\206.md" | 38 +++ DevOps/Kubernetes/kubeadm-playround.md | 132 ++++++++ DevOps/Kubernetes/kubernetes.md | 26 ++ DevOps/Kubernetes/setup.md | 75 +++++ .../docker\345\274\200\345\217\221/docker.md" | 18 + .../CALDERA.md" | 28 ++ .../HIDS.md" | 24 ++ ...45\345\205\267\350\265\204\346\272\220.md" | 43 --- ...70\347\224\250\345\221\275\344\273\244.md" | 69 ++++ .../Linux/iptables.md" | 4 +- .../DNS/DNS.md" | 5 + .../Golang/Golang.md" | 3 +- .../Golang/GolangTips.md" | 33 ++ .../tools/CentOS7/change_yum_and_kernel.sh" | 2 - .../tools/CentOS7/install_pkg.sh" | 4 +- .../ProxmoxVE.md" | 7 + 19 files changed, 969 insertions(+), 74 deletions(-) create mode 100644 DevOps/Kubernetes/DockerPlayground.md rename "\345\244\247\346\225\260\346\215\256/docker\345\274\200\345\217\221/Kubernetes.md" => "DevOps/Kubernetes/Kubernetes \346\246\202\345\277\265.md" (53%) create mode 100644 "DevOps/Kubernetes/Kubernetes-\345\216\237\347\220\206.md" create mode 100644 "DevOps/Kubernetes/Q&A/kubeadm\344\273\243\347\220\206.md" create mode 100644 DevOps/Kubernetes/kubeadm-playround.md create mode 100644 DevOps/Kubernetes/kubernetes.md create mode 100644 DevOps/Kubernetes/setup.md rename "\345\244\247\346\225\260\346\215\256/docker\345\274\200\345\217\221/docker.md" => "DevOps/docker\345\274\200\345\217\221/docker.md" (94%) create mode 100644 "\345\256\211\345\205\250\346\212\200\350\203\275\346\240\221/\345\256\211\345\205\250\344\277\241\346\201\257\350\265\204\346\272\220/\345\256\211\345\205\250\345\267\245\345\205\267/CALDERA.md" create mode 100644 "\345\256\211\345\205\250\346\212\200\350\203\275\346\240\221/\345\256\211\345\205\250\344\277\241\346\201\257\350\265\204\346\272\220/\345\256\211\345\205\250\345\267\245\345\205\267/HIDS.md" create mode 100644 "\345\270\270\350\247\201\345\215\217\350\256\256/DNS/DNS.md" create mode 100644 "\347\274\226\347\250\213\350\257\255\350\250\200/Golang/GolangTips.md" create mode 100644 "\350\277\220\347\273\264/\350\231\232\346\213\237\345\214\226\345\267\245\345\205\267/ProxmoxVE.md" diff --git a/DevOps/Kubernetes/DockerPlayground.md b/DevOps/Kubernetes/DockerPlayground.md new file mode 100644 index 0000000..bdd19a3 --- /dev/null +++ b/DevOps/Kubernetes/DockerPlayground.md @@ -0,0 +1,309 @@ +## 尝试k8s沙箱环境 + +```sh + + WARNING!!!! + + This is a sandbox environment. Using personal credentials + is HIGHLY! discouraged. Any consequences of doing so, are + completely the user's responsibilites. + + You can bootstrap a cluster as follows: + + 1. Initializes cluster master node: + + kubeadm init --apiserver-advertise-address $(hostname -i) + + + 2. Initialize cluster networking: + + kubectl apply -n kube-system -f \ + "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 |tr -d '\n')" + + + 3. (Optional) Create an nginx deployment: + + kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/cn/docs/user-guide/nginx-app.yaml + + + The PWK team. +``` + +### 试用第一个命令 + +```sh +[node1 ~]$ kubeadm init --apiserver-advertise-address $(hostname -i) +Initializing machine ID from random generator. +[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters. +[init] Using Kubernetes version: v1.8.15 +[init] Using Authorization modes: [Node RBAC] +[preflight] Skipping pre-flight checks +[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --token-ttl 0) +[certificates] Generated ca certificate and key. +[certificates] Generated apiserver certificate and key. +[certificates] apiserver serving cert is signed for DNS names [node1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.18] +[certificates] Generated apiserver-kubelet-client certificate and key. +[certificates] Generated sa key and public key. +[certificates] Generated front-proxy-ca certificate and key. +[certificates] Generated front-proxy-client certificate and key. +[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki" +[kubeconfig] Wrote KubeConfig file to disk: "admin.conf" +[kubeconfig] Wrote KubeConfig file to disk: "kubelet.conf" +[kubeconfig] Wrote KubeConfig file to disk: "controller-manager.conf" +[kubeconfig] Wrote KubeConfig file to disk: "scheduler.conf" +[controlplane] Wrote Static Pod manifest for component kube-apiserver to "/etc/kubernetes/manifests/kube-apiserver.yaml" +[controlplane] Wrote Static Pod manifest for component kube-controller-manager to "/etc/kubernetes/manifests/kube-controller-manager.yaml" +[controlplane] Wrote Static Pod manifest for component kube-scheduler to "/etc/kubernetes/manifests/kube-scheduler.yaml" +[etcd] Wrote Static Pod manifest for a local etcd instance to "/etc/kubernetes/manifests/etcd.yaml" +[init] Waiting for the kubelet to boot up the control plane as Static Pods from directory "/etc/kubernetes/manifests" +[init] This often takes around a minute; or longer if the control plane images have to be pulled. +[apiclient] All control plane components are healthy after 32.002605 seconds +[uploadconfig] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace +[markmaster] Will mark node node1 as master by adding a label and a taint +[markmaster] Master node1 tainted and labelled with key/value: node-role.kubernetes.io/master="" +[bootstraptoken] Using token: df0dca.acb50a30486ace08 +[bootstraptoken] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials +[bootstraptoken] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token +[bootstraptoken] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster +[bootstraptoken] Creating the "cluster-info" ConfigMap in the "kube-public" namespace +[addons] Applied essential addon: kube-dns +[addons] Applied essential addon: kube-proxy + +Your Kubernetes master has initialized successfully! + +To start using your cluster, you need to run (as a regular user): + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +You should now deploy a pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + http://kubernetes.io/docs/admin/addons/ + +You can now join any number of machines by running the following on each node +as root: + + kubeadm join --token df0dca.acb50a30486ace08 192.168.0.18:6443 --discovery-token-ca-cert-hash sha256:4b3bd5451974ea29e92d00c83123b442a428f609c6800bbf7d50b135c948a8b9 + +Waiting for api server to startup........... +Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply +daemonset "kube-proxy" configured +No resources found +``` + + + +```sh +[node1 ~]$ kubeadm config view +api: + advertiseAddress: 192.168.0.18 + bindPort: 6443 +authorizationModes: +- Node +- RBAC +certificatesDir: /etc/kubernetes/pki +cloudProvider: "" +etcd: + caFile: "" + certFile: "" + dataDir: /var/lib/etcd + endpoints: null + image: "" + keyFile: "" +imageRepository: gcr.io/google_containers +kubernetesVersion: v1.8.15 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +nodeName: node1 +token: "" +tokenTTL: 24h0m0s +unifiedControlPlaneImage: "" + +[node1 ~]$ kubectl version +Client Version: version.Info{Major:"1", Minor:"8", GitVersion:"v1.8.11", GitCommit:"1df6a8381669a6c753f79cb31ca2e3d57ee7c8a3", GitTreeState:"clean", BuildDate:"2018-04-05T17:24:03Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"} +Server Version: version.Info{Major:"1", Minor:"8", GitVersion:"v1.8.15", GitCommit:"c2bd642c70b3629223ea3b7db566a267a1e2d0df", GitTreeState:"clean", BuildDate:"2018-07-11T17:52:15Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"} + + +[node1 ~]$ kubectl api-versions +apiextensions.k8s.io/v1beta1 +apiregistration.k8s.io/v1beta1 +apps/v1beta1 +apps/v1beta2 +authentication.k8s.io/v1 +authentication.k8s.io/v1beta1 +authorization.k8s.io/v1 +authorization.k8s.io/v1beta1 +autoscaling/v1 +autoscaling/v2beta1 +batch/v1 +batch/v1beta1 +certificates.k8s.io/v1beta1 +extensions/v1beta1 +networking.k8s.io/v1 +policy/v1beta1 +rbac.authorization.k8s.io/v1 +rbac.authorization.k8s.io/v1beta1 +storage.k8s.io/v1 +storage.k8s.io/v1beta1 +v1 + +[node1 ~]$ kubectl config +Modify kubeconfig files using subcommands like "kubectl config set current-context my-context" + +The loading order follows these rules: + + 1. If the --kubeconfig flag is set, then only that file is loaded. The flag may only be set once and no merging takes +place. + 2. If $KUBECONFIG environment variable is set, then it is used a list of paths (normal path delimitting rules for your +system). These paths are merged. When a value is modified, it is modified in the file that defines the stanza. When a +value is created, it is created in the first file that exists. If no files in the chain exist, then it creates the last +file in the list. + 3. Otherwise, ${HOME}/.kube/config is used and no merging takes place. + +Available Commands: + current-context Displays the current-context + delete-cluster Delete the specified cluster from the kubeconfig + delete-context Delete the specified context from the kubeconfig + get-clusters Display clusters defined in the kubeconfig + get-contexts Describe one or many contexts + rename-context Renames a context from the kubeconfig file. + set Sets an individual value in a kubeconfig file + set-cluster Sets a cluster entry in kubeconfig + set-context Sets a context entry in kubeconfig + set-credentials Sets a user entry in kubeconfig + unset Unsets an individual value in a kubeconfig file + use-context Sets the current-context in a kubeconfig file + view Display merged kubeconfig settings or a specified kubeconfig file + +Usage: + kubectl config SUBCOMMAND [options] + +Use "kubectl --help" for more information about a given command. +Use "kubectl options" for a list of global command-line options (applies to all commands). + +[node1 ~]$ kubectl config view +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: REDACTED + server: https://192.168.0.18:6443 + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: kubernetes-admin + name: kubernetes-admin@kubernetes +current-context: kubernetes-admin@kubernetes +kind: Config +preferences: {} +users: +- name: kubernetes-admin + user: + client-certificate-data: REDACTED + client-key-data: REDACTED + +[node1 ~]$ kubectl config get-clusters +NAME +kubernetes + +[node1 ~]$ kubectl config get-contexts +CURRENT NAME CLUSTER AUTHINFO NAMESPACE +* kubernetes-admin@kubernetes kubernetes kubernetes-admin + +[node1 ~]$ kubectl config current-context +kubernetes-admin@kubernetes +``` + +## 调用第二个命令 + +```sh +[node1 ~]$ kubectl apply -n kube-system -f \ +> "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 |tr -d '\n')" +serviceaccount "weave-net" created +clusterrole "weave-net" created +clusterrolebinding "weave-net" created +role "weave-net" created +rolebinding "weave-net" created +daemonset "weave-net" created +``` + +### 调用第三个命令 + +```sh +[node1 ~]$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/cn/docs/user-guide/nginx-app.yaml +service "my-nginx-svc" created +deployment "my-nginx" created +``` + +```sh +[node1 ~]$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +d20e811ba46f gcr.io/google_containers/k8s-dns-sidecar-amd64 "/sidecar --v=2 --lo…" About a minute ago Up About a minute k8s_sidecar_kube-dns-545bc4bfd4-54pj7_kube-system_5dbef6ab-a514-11e8-8000-0242feac80ab_0 +5f662519973c gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64 "/dnsmasq-nanny -v=2…" About a minute ago Up About a minute k8s_dnsmasq_kube-dns-545bc4bfd4-54pj7_kube-system_5dbef6ab-a514-11e8-8000-0242feac80ab_0 +06fc97724223 gcr.io/google_containers/k8s-dns-kube-dns-amd64 "/kube-dns --domain=…" About a minute ago Up About a minute k8s_kubedns_kube-dns-545bc4bfd4-54pj7_kube-system_5dbef6ab-a514-11e8-8000-0242feac80ab_0 +a86fdbc84cbf k8s.gcr.io/pause-amd64:3.1 "/pause" About a minute ago Up About a minute k8s_POD_kube-dns-545bc4bfd4-54pj7_kube-system_5dbef6ab-a514-11e8-8000-0242feac80ab_0 +7ec76b4081fd weaveworks/weave-npc "/usr/bin/weave-npc" About a minute ago Up About a minute k8s_weave-npc_weave-net-59fp5_kube-system_c4849d74-a515-11e8-8000-0242feac80ab_0 +0b9b40dd60e1 weaveworks/weave-kube "/home/weave/launch.…" About a minute ago Up About a minute k8s_weave_weave-net-59fp5_kube-system_c4849d74-a515-11e8-8000-0242feac80ab_0 +44eec6d709a6 k8s.gcr.io/pause-amd64:3.1 "/pause" About a minute ago Up About a minute k8s_POD_weave-net-59fp5_kube-system_c4849d74-a515-11e8-8000-0242feac80ab_0 +dfae24e15cdc gcr.io/google_containers/kube-proxy-amd64 "/usr/local/bin/kube…" 11 minutes ago Up 11 minutes k8s_kube-proxy_kube-proxy-slhnz_kube-system_5db19be4-a514-11e8-8000-0242feac80ab_0 +3c55746f18b8 k8s.gcr.io/pause-amd64:3.1 "/pause" 11 minutes ago Up 11 minutes k8s_POD_kube-proxy-slhnz_kube-system_5db19be4-a514-11e8-8000-0242feac80ab_0 +033fa5844695 6015bf4bb1f1 "kube-controller-man…" 11 minutes ago Up 11 minutes k8s_kube-controller-manager_kube-controller-manager-node1_kube-system_846f06b74abd4522e4c089d7730d2c7f_1 +45553ff3a5f9 053cf553fa2d "kube-apiserver --re…" 11 minutes ago Up 11 minutes k8s_kube-apiserver_kube-apiserver-node1_kube-system_836e476f1e0225b2782e3e33908849fd_0 +32ce18e00906 k8s.gcr.io/pause-amd64:3.1 "/pause" 11 minutes ago Up 11 minutes k8s_POD_kube-apiserver-node1_kube-system_836e476f1e0225b2782e3e33908849fd_0 +ee8691339cdf gcr.io/google_containers/etcd-amd64 "etcd --listen-clien…" 12 minutes ago Up 12 minutes k8s_etcd_etcd-node1_kube-system_43a7b79b0d85dfa85932daa25361ac87_0 +69e06f5d90e2 gcr.io/google_containers/kube-scheduler-amd64 "kube-scheduler --ad…" 12 minutes ago Up 12 minutes k8s_kube-scheduler_kube-scheduler-node1_kube-system_6a17e9027c31b6807f6bf0f4dbc8fa17_0 +81a84754706c k8s.gcr.io/pause-amd64:3.1 "/pause" 12 minutes ago Up 12 minutes k8s_POD_kube-controller-manager-node1_kube-system_846f06b74abd4522e4c089d7730d2c7f_0 +b3710632bada k8s.gcr.io/pause-amd64:3.1 "/pause" 12 minutes ago Up 12 minutes k8s_POD_etcd-node1_kube-system_43a7b79b0d85dfa85932daa25361ac87_0 +f125234e73a1 k8s.gcr.io/pause-amd64:3.1 "/pause" 12 minutes ago Up 12 minutes k8s_POD_kube-scheduler-node1_kube-system_6a17e9027c31b6807f6bf0f4dbc8fa17_0 +``` + + +```sh +[node1 ~]$ cat .kube/config +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNE1EZ3lNVEEzTXpFek5sb1hEVEk0TURneE9EQTNNekV6Tmxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTjJVCi9ralNsMmU5QXBhSjhOTkJEazF3MTRwWWcrTUw1aGdKMzIzamllNFlpWmNzVGpaSEZrUDFZcWw4RVpKa2JkbEUKRnRPRWZWWG42dWlYcWRqUXF2cWJFU2hJR3htSytxQ05aUHIwaTlBeitxV0NrbnQ4WnhOemNVSW16QWd1WUYyYwpTR3dzdXFReks2TWxlS2EyU0IxU2xzczZqU3lnaVZqNzY0dU05akJ4SjVHb0k0SSthWHU3Ym5iclhaZVhwR3VsCmk1MjVvMlVrSEtkSXQvVlJ4bklWK1pWU25jUXZuRlN0SHVXbmVwS291UEMxMndnRGhJZXVKVkx4bjZPK01XaEQKMnppWnh1VTZTZDVWYktsWHJDTUxXUmJqbmlIck5RRFAxVjRkeUFPRE82TzR1Y2J1a0lrVHNoUHdxRi9JUG53Zwp2ckx4dHM5TVlIU3FLTWc1bGpzQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFEVGZGaXRGMEhrQUt4QWtqMmtDZGk5aENocy8KL1ltcGxCNmNJbDVwSklnY2lDWjgxdTlTRzZLVlhVb2Z3TlBBT1F6NHMzYjZrYTh6c2xkTWR2SkVMQVpqdUFjYQpmdW1pMUw2Y1hMK0FHb0UyREs5TXk3VjhkOE9KQndRUUJYTzdqZ2o1U2VyaVJhWFVlcHJaRmF1OU9NSjgycG1YCjRKeGFXRTNxemlEKzdlQjI5TGYzTmtxY3pyZDE1aW5Oem1FWU1VN1ZkQy8xdHIzVCtBRFhHMjI3Tlk5VHJhcWMKOHdsYjlRQW4xaUJyZXU4blRtZWUrbG5FcVZSMHlrOFZlZFV3S21pL0NKQkZwN1hyL1pMdXQrOWZhUEZBUGs2bgozTXZFSkwrNWpTRXp5T1FSNk1WVzhkaVlVdEpVTUxDckh6bUVIUGI0Q2VBQjB3WWVzdGhtQUh3ZlNJUT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://192.168.0.18:6443 + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: kubernetes-admin + name: kubernetes-admin@kubernetes +current-context: kubernetes-admin@kubernetes +kind: Config +preferences: {} +users: +- name: kubernetes-admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJSGVyYmhHdzZjN1l3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB4T0RBNE1qRXdOek14TXpaYUZ3MHhPVEE0TWpFd056TXhNemhhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXVtMG9wZlZaNlRoWnR6ZmcKNnpOR2NNTnBUb0R3bWdESDJXZTJleTQ0b0FMSVRaZXJuNUhwYitHNFE5czFZL2EwN2RiSlBkYVlDTUJSampsRQo3KzVMYVpqRHEwRG1pL2tZeHVFZUxBeTd6a21Fa2pPemJWQ2xTSXVpNm9kcG03aTE3TEhMVmdyOGRGbWNhSDd0CjR4VkVtK2JrSGxlVGdUV1lHZncxdzJXYUc1TlB6emZlN1hlUVYvNkU0cXpQMm9Hd3JpdVNydGQxT1NtOFJZa3MKRUYzWUZkMTJTVnl4QUlKL0FvNnd4SHpIK0xlQTBMem1pd25DL0o1SWNITmZEbWlKVnNJWG5nVEhHRlZMbm5qMwpqMTlwb1V0bHRoeFpwQlZUVnZnN05PNzJ0aTdhY2VIVEo5c2hVRnhVTnFlZkdCc294bUVnZCtnR3JMcG9OTHlVCnIrK09kd0lEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFMcTZGNS94ZmxyNUNuY1VrSW5pb2lpMC9qeHBJRzFJWHkyOQpHZEFDL0FSeWR1SEtONzVVMUdmMi9Gc3pzcFR1TGJuRGJFM25ZVmgyS25VaE82Z0ZlZE5lOFY3SHI2UVVQTTI1CkF5T3Avcm5NaFlIT285VzhoSmdtaW13d3dYd0JETDQ4K3RUMHJ5UUNZcTRidi9aNFlhOHRvSWJOdndyak1zNXEKamk4Z3pPUDdaSlFWNTg3c2pzcmFDMUpuYTl0N2oxZXUzelI2RGNZZDcwYjBaWkNQRXZKeDFmZEVVVDBPRUtIbApQV1ZoaklMYzR2RjZiUDFwVlBLY1hIaTdMdlJocWFRL2tFQ0tFNXZVdVNlczNtZkI4dWRBSFF3UmM2WlF6U0FxClNwbEppNkQvSjRBOGRCNkliZm83TjNDby9aQU42V29QZEo2bTZRNmRJb2ZKZ1lQRW9sVT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdW0wb3BmVlo2VGhadHpmZzZ6TkdjTU5wVG9Ed21nREgyV2UyZXk0NG9BTElUWmVyCm41SHBiK0c0UTlzMVkvYTA3ZGJKUGRhWUNNQlJqamxFNys1TGFaakRxMERtaS9rWXh1RWVMQXk3emttRWtqT3oKYlZDbFNJdWk2b2RwbTdpMTdMSExWZ3I4ZEZtY2FIN3Q0eFZFbStia0hsZVRnVFdZR2Z3MXcyV2FHNU5QenpmZQo3WGVRVi82RTRxelAyb0d3cml1U3J0ZDFPU204Ullrc0VGM1lGZDEyU1Z5eEFJSi9BbzZ3eEh6SCtMZUEwTHptCml3bkMvSjVJY0hOZkRtaUpWc0lYbmdUSEdGVkxubmozajE5cG9VdGx0aHhacEJWVFZ2ZzdOTzcydGk3YWNlSFQKSjlzaFVGeFVOcWVmR0Jzb3htRWdkK2dHckxwb05MeVVyKytPZHdJREFRQUJBb0lCQVFDdzRmMmRhV3dEMHNtZApBOUhPbC9vUDUvazAyVFp3aUtrV2FFZEdIUmJ4QzNMTjNFaWt3S2NDMXVJazBBcWFiR3hDOW5ZK3pDdTRzZkV2CnhycllBL3RkbCtxZnhKWjYzTUJiZ3lGRG5sbGJZM243MWo0NlREaG0yc2FiVEJ3eCtsSHk0TWNUUHAxR0wvZ3QKVmJHVXNvdEVlUkNja1crTk96SzBCUTlsOURNQ1VXWnd5OFhjejA2L1lSMmw4YWtnc2xSNmlFYW1CYXJqMUxCTAo2YkNBa0tkckFrN2hreGJpM0o0VjRJeTUzMFhaWkVhNm8wR0ZMVUd3Y3hhWjMvZjBHL2tiSUViQXh2YzZnY01HCjdFcktlYnh6c1ZFeVQvSHcwOWRWTUhjODBUbE4zWlBla2VGd2R4TDMwRW13NGdnckg3VmV2Tmp3Mlg5VXN6bWwKb0xwZytrblJBb0dCQU5BK08yRm8vb0M5cHVDRldBRFRRa2ZKb0RjMTJWQUgxQ3d0b2MzQmJWSjRKK1JqcFdSYQpsWlRZdno4VXBoZEJkNDNEMzE3U1hDWStOVEN5c1RQb3A0R0JqNlV2R010ZU53ek5vdkpUUXZxdFEvR3FCRndCCk5XK0dvdi9KN0t2QnlSOVgxeDRPYWNDOFVnRzNmUVRYR2JLd0t2bzB6dXZCN3ZBcEl1RFFWd0E5QW9HQkFPVXUKRm81Y2t5b0tBcy92R1VnT2phc0dHa1NscE5RWFJwT2ZqSXY5TEJZRDVyZnVIM2tGVTB5ZExEelMwdzl2a0tRZAp5MWRCUWtGVkVqUlJMeHFLRlpaeWt2TGdzTkNFaVQ4cXdNRGo3SVE5Zml5V09JZXk4SlpyVCtjcjkrYXZvZXYyCkJ4NE9MMTVUcDRKamM4ZjBuTFdWc0JIdkVjS0FOR3F0bmdBdG1lRERBb0dBRmNES2R5eCttWGcyUEhIeGRYU2MKRWQrd0N0Um5OUUw4V1BrVFUra0d3SHlvdmlUaW5BOFNYUkFmT2ptdjkvSHd5VEJTM3hiZVpXaE9LaDRnZ0ttbgpCZEpBN1NKMFZwb3E2ZjE1TGhNZ2hnc2ZjOWtzeDJlZzYxdUhIMjA2ZWQ2dVljWE5ST2lBaDQra2lXVmZlK2YwCldVUXAyU1UzRUUyYjVoNkY5RElQUklrQ2dZQllBdXNHb09hakxsVUdKWHcvLzliSDV3QVBmTTFmNWRCa3dORG8Kc2NGTWN4TVhiNHNHbnRUdWh5MkY3akMvck1pUFBpSG8wRmJ4WUFvdHJUKzVvVzNJNlJjbHpzUVlUODJsUkhYaApuYVltNHJhaHlNN016aHMycU45Mmw1d01LMGF6cHpmMGd0NE9DM0k0MEppV0lZcE04Yjg4SStOZUhuNEN1Y29ECnAzK0FvUUtCZ0M3Y0c3SklDc3RPUXZra2hndE9KdWwyaFdtOFR5cXhZYzlpQ2Y4VE5lZXNYSVR6Y1VUSlQ3WEQKWUN1Nmo4ZHVoZFQrTzJ3aUFoeDNnaEFMb0hGZWh2aVpuZkFTckp5VVdjVE5sSk1QeGRRRi9SK3M1b1lRVXNiYQprMEVlS0VlZ2JyOU0xU0Q4TlpTR2U0ZnFlUlNWVVNRR05TWnMvYUxNdDNaUDJSWW82eFVYCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== +``` + + +```sh +[node1 ~]$ ls .kube/cache/discovery/192.168.0.18_6443/ +apiextensions.k8s.io apps authorization.k8s.io batch extensions policy servergroups.json v1 +apiregistration.k8s.io authentication.k8s.io autoscaling certificates.k8s.io networking.k8s.io rbac.authorization.k8s.io storage.k8s.io +``` + +```sh +[node1 ~]$ ls .kube/http-cache/ +01cb950acba014a4088e8a81c554dd8d 59febcd5519751707bb880efa7b5ab20 8f7690db01fca87b663335de1b691cb2 df07a45be9279c34ed1417f79177b3b5 +17530063965cc4ea5d0b24eaa93d8233 694c50f725e48086aa2ae8c445c8737f 94af4d7a881c3dc002f098f99009838e ee23796d54ebcb59e0757145a2d45a21 +17d77745f1ec84e741ed19bf35e70589 6aadc9fb9366f4f02333ba58d545c56f a6298e87761566fa642e4c5b9ebd219b f5568ac4afc5913a5aa680e0fdae9929 +1ce5725e1e762b409d26af04bd9bbe83 6eae4a12a007abb3497d5e68922d3f92 a947b24676ad161ece629d937378c394 f7940f510850679616c87ecb93c96f01 +401f1eb169da5cd85a7b53ce1dcbaeca 6f041d2f11810aa00fe275a12c409ed8 ad616d73eff52198b12896bf7d75e928 +411e0fa7a743fd7dfb804991c3d10b2e 87d2e1091e6edb0e75b8a9b1d9ee82f3 be2540e4ac2e0bc1d1337cf83af25776 +542415af432272c6a50f82ea9306cff5 8ec6ed70f8c8e8fd1cddceaf81454b6d cb2419a70261f4a92781246cf1ca392a +``` + +> Refrence + +[尝试搭建一个k8s集群体验环境](https://labs.play-with-k8s.com/) + +[katacoda k8s模拟环境](https://www.katacoda.com/courses/kubernetes/playground) \ No newline at end of file diff --git "a/\345\244\247\346\225\260\346\215\256/docker\345\274\200\345\217\221/Kubernetes.md" "b/DevOps/Kubernetes/Kubernetes \346\246\202\345\277\265.md" similarity index 53% rename from "\345\244\247\346\225\260\346\215\256/docker\345\274\200\345\217\221/Kubernetes.md" rename to "DevOps/Kubernetes/Kubernetes \346\246\202\345\277\265.md" index cfe4256..63e89b6 100644 --- "a/\345\244\247\346\225\260\346\215\256/docker\345\274\200\345\217\221/Kubernetes.md" +++ "b/DevOps/Kubernetes/Kubernetes \346\246\202\345\277\265.md" @@ -1,27 +1,5 @@ # Kubernetes -## 安装kubeadm -```sh -cat < /etc/yum.repos.d/kubernetes.repo -[kubernetes] -name=Kubernetes -baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 -enabled=1 -gpgcheck=0 -repo_gpgcheck=0 -gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg - http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg -EOF - -# 将 SELinux 设置为 permissive 模式(将其禁用) -setenforce 0 -sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config - -yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes - -systemctl enable kubelet && systemctl start kubelet -``` - ## Service 服务式分布式集群架构的核心, 一个Service对象拥有如下关键特征: @@ -36,6 +14,6 @@ Pod里运行着一个被称之为Pause的容器,其它容器则为业务容器, 并不是每个Pod和它里面运行的容器都能"映射"到一个Service上,只有那些提供服务的一组Pod才会被"映射"成一个服务. 参考: -https://kubernetes.io/zh/ -[kubernetes 阿里云镜像](http://ljchen.net/2018/10/23/%E5%9F%BA%E4%BA%8E%E9%98%BF%E9%87%8C%E4%BA%91%E9%95%9C%E5%83%8F%E7%AB%99%E5%AE%89%E8%A3%85kubernetes/) \ No newline at end of file +[kubernetes io](https://kubernetes.io/zh/) + diff --git "a/DevOps/Kubernetes/Kubernetes-\345\216\237\347\220\206.md" "b/DevOps/Kubernetes/Kubernetes-\345\216\237\347\220\206.md" new file mode 100644 index 0000000..d33252c --- /dev/null +++ "b/DevOps/Kubernetes/Kubernetes-\345\216\237\347\220\206.md" @@ -0,0 +1,197 @@ +# kubernetes + +## kubernetes 主要功能 + +Kubernetes 是谷歌开源的容器集群管理系统,是 Google 多年大规模容器管理技术 Borg 的开源版本,也是 CNCF 最重要的项目之一,主要功能包括: +- 基于容器的应用部署、维护和滚动升级 +- 负载均衡和服务发现 +- 跨机器和跨地区的集群调度 +- 自动伸缩 +- 无状态服务和有状态服务 +- 广泛的 Volume 支持 +- 插件机制保证扩展性 + +## Kubernetes 的核心组件 + +- etcd 保存了整个集群的状态; + +- apiserver提供了资源操作的唯一入口,并提供认证,授权,访问控制,API注册和发现等机制. + +- controller manager负责维护集群的状态,比如故障检测,自动扩展,滚动更新等; + +- scheduler负责资源的调度,按照预定的调度策略将Pod调度到相应的机器上; + +- kubelet负责维护容器的生命周期,同时也负责Volume(CVI)和网络(CNI)的管理 + +- Container runtime负责镜像管理以及Pod和容器的真正运行(CRI)、 + +- kube-proxy负责为Service提供cluster内部的服务发现和负载均衡 + +## Master节点上运行着以下关键进程 + +- Kubernetes API Server(kube-apiserver), 提供了HTTP Rest接口的关键服务进程,是k8s里所有资源增,删,改,查等操作的唯一入口,也是集群控制的入口进程. + +- Kubernetes Controller Manager(kube-controller-manager),k8s里所有资源对象的自动化控制中心,可以理解为资源对象的"大总管" + +- Kubernetes Scheduler(kube-scheduler),负责资源调度(Pod调度)的进程,相当与公交公司的"调度室" + +- etcd Server,保存k8s里所有资源对象. + + +## Node节点上运行着以下关键进程 + +- kubelet: 负责Pod对应的容器的创建,启停等任务,同时与Master节点密切协作,实现集群管理的基础功能. + +- kube-proxy: 实现Kuberbetes Service的通信与负载均衡机制的重要组件. + +- Docker Engine: Docker引擎,负责本机的容器创建和管理工作. + +## Kubernetes 基本概念 + +### Container + +Container(容器)是一种便携式、轻量级的操作系统级虚拟化技术。它使用 namespace 隔离不同的软件运行环境,并通过镜像自包含软件的运行环境,从而使得容器可以很方便的在任何地方运行. + +容器同样比虚拟机更加透明,这有助于监测和管理。尤其是容器进程的生命周期由基础设施管理,而不是被进程管理器隐藏在容器内部。最后,每个应用程序用容器封装,管理容器部署就等同于管理应用程序部署. + +### Pod + +Kubernetes 使用 Pod 来管理容器,每个 Pod 可以包含一个或多个紧密关联的容器。 + +Pod 是一组紧密关联的容器集合,它们共享 PID、IPC、Network 和 UTS namespace,是 Kubernetes 调度的基本单位。Pod 内的多个容器共享网络和文件系统,可以通过进程间通信和文件共享这种简单高效的方式组合完成服务。 + +### Node + +Node 是 Pod 真正运行的主机,可以是物理机,也可以是虚拟机。为了管理 Pod,每个 Node 节点上至少要运行 container runtime(比如 docker 或者 rkt)、`kubelet` 和 `kube-proxy` 服务。 + +### Namespace + +Namespace 是对一组资源和对象的抽象集合,比如可以用来将系统内部的对象划分为不同的项目组或用户组。常见的 pods, services, replication controllers 和 deployments 等都是属于某一个 namespace 的(默认是 default),而 node, persistentVolumes 等则不属于任何 namespace。 + +### Service + +Service 是应用服务的抽象,通过 labels 为应用提供负载均衡和服务发现。匹配 labels 的 Pod IP 和端口列表组成 endpoints,由 kube-proxy 负责将服务 IP 负载均衡到这些 endpoints 上 + +每个 Service 都会自动分配一个 cluster IP(仅在集群内部可访问的虚拟地址)和 DNS 名,其他容器可以通过该地址或 DNS 来访问服务,而不需要了解后端容器的运行。 + +### Label + +Label 是识别 Kubernetes 对象的标签,以 key/value 的方式附加到对象上(key 最长不能超过 63 字节,value 可以为空,也可以是不超过 253 字节的字符串)。 + +## Kubernetes 架构原理 + +Kubernetes 最初源于谷歌内部的 Borg,提供了面向应用的容器集群部署和管理系统。Kubernetes 的目标旨在消除编排物理 / 虚拟计算,网络和存储基础设施的负担,并使应用程序运营商和开发人员完全将重点放在以容器为中心的原语上进行自助运营。Kubernetes 也提供稳定、兼容的基础(平台),用于构建定制化的 workflows 和更高级的自动化任务。 Kubernetes 具备完善的集群管理能力,包括多层次的安全防护和准入机制、多租户应用支撑能力、透明的服务注册和服务发现机制、内建负载均衡器、故障发现和自我修复能力、服务滚动升级和在线扩容、可扩展的资源自动调度机制、多粒度的资源配额管理能力。 Kubernetes 还提供完善的管理工具,涵盖开发、部署测试、运维监控等各个环节。 + +### Borg 简介 + +Borg 是谷歌内部的大规模集群管理系统,负责对谷歌内部很多核心服务的调度和管理。Borg 的目的是让用户能够不必操心资源管理的问题,让他们专注于自己的核心业务,并且做到跨多个数据中心的资源利用率最大化. + +Brog 主要是由以下部分组成: + +- BorgMaster + BorgMaster 是整个集群的大脑,负责维护整个集群的状态,并将数据持久化到 Paxos 存储中; +- Borglet + Borglet 负责真正运行任务(在容器中); +- borgcfg + borgcfg 是 Borg 的命令行工具,用于跟 Borg 系统交互,一般通过一个配置文件来提交任务。 +- Scheduler + Scheduer 负责任务的调度,根据应用的特点将其调度到具体的机器上去; + + +### 分层架构 + +- 核心层 + Kubernetes 最核心的功能,对外提供 API 构建高层的应用,对内提供插件式应用执行环境 +- 应用层 + 部署(无状态应用、有状态应用、批处理任务、集群应用等)和路由(服务发现、DNS 解析等) +- 管理层 + 系统度量(如基础设施、容器和网络的度量),自动化(如自动扩展、动态 Provision 等)以及策略管理(RBAC、Quota、PSP、NetworkPolicy 等) +- 接口层 + kubectl 命令行工具、客户端 SDK 以及集群联邦 +- 生态系统 + 在接口层之上的庞大容器集群管理调度的生态系统,可以划分为两个范畴 + Kubernetes 外部:日志、监控、配置管理、CI、CD、Workflow、FaaS、OTS 应用、ChatOps 等 + Kubernetes 内部:CRI、CNI、CVI、镜像仓库、Cloud Provider、集群自身的配置和管理等 + +## 设计理念 + +核心技术概念和API对象 +--- + +API对象是K8s集群中的管理操作单元。K8s集群系统每支持一项新功能,引入一项新技术,一定会新引入对应的API对象,支持对该功能的管理操作。例如副本集Replica Set对应的API对象是RS。 + +每个API对象都有3大类属性: +- 元数据metadata + +用来标识API对象的,每个对象至少有3个元数据: namespace, name, uid; +还有各种标签labels用来标识和匹配不同的对象. + +- 规范spec + +描述了用户期望k8s集群中的分布式系统达到的理想状态。比如用户可以通过复制控制器Replication Controller设置期望值的Pod副本数为3. + +- 状态status. + +描述了系统实际当前达到的状态. + +### Pod + +微服务Pod是在K8s集群中运行部署应用或服务的最小单元,它是可以支持多容器的在一个Pod中共享网络地址和文件系统,可以通过进程间通信和文件共享这种简单高效的方式组合完成服务. + +Pod是K8s集群中所有业务类型的基础,目前K8s中的业务主要可以分为: +- 长期伺服型(long-running) --- Deployment +- 批处理型(batch) --- Job +- 节点后台支撑型(node-daemon) --- DaemonSet +- 有状态应用型(stateful application) --- StatefulSet + +### 复制控制器 (Replication Cotroller, RC) + +RC是K8s集群中最早的保证Pod高可用的API对象。通过监控运行中的Pod来保证集群中运行指定数目的Pod副本. +作用: +- 指定的数目可以是多个也可以是1个;即使在指定数目为1的情况下,通过RC运行Pod也比直接运行Pod更明智,因为RC也可以发挥它高可用的能力,保证永远有1个Pod在运行。 +- 少于指定数目,RC就会启动运行新的Pod副本; +- 多于指定数目,RC就会杀死多余的Pod副本。 + +### 副本集 (Replica Set, RS) + +RS是新一代RC, 提供同样高可用的能力,区别主要在于RS后来居上,能支撑更多种类的匹配模式. + +### 部署(Deployment) + +部署表示用户对k8s集群的一次更新操作.部署是一个比RS应用模式更广的API对象,可以是创建一个新的服务,更新一个新的服务,也可以是滚动升级一个服务. + +### 服务(Service) + +RC、RS和Deployment只是保证了支撑服务的微服务Pod的数量,但是没有解决如何访问这些服务的问题。要稳定地提供服务需要服务发现和负载均衡能力。 + +服务发现: + +客户端需要访问的服务就是Service对象。每个Service会对应一个集群内部有效的虚拟IP,集群内部通过虚拟IP访问一个服务。 + +负载均衡: + +Kube-proxy是K8s集群内部的负载均衡器。它是一个分布式代理服务器,在K8s的每个节点上都有一个; + +### 任务(Job) + +Job是K8s用来控制批处理型任务的API对象。批处理业务与长期伺服业务的主要区别是批处理业务的运行有头有尾,而长期伺服业务在用户不停止的情况下永远运行。 + +### 后台支撑服务集 (DaemonSet) + +长期伺服型和批处理型服务的核心在业务应用,可能有些节点运行多个同类业务的Pod,有些节点上又没有这类Pod运行;而后台支撑型服务的核心关注点在K8s集群中的节点(物理机或虚拟机),要保证每个节点上都有一个此类Pod运行 + +### 存储卷(Volume) + +K8s集群中的存储卷跟Docker的存储卷有些类似,只不过Docker的存储卷作用范围为一个容器,而K8s的存储卷的生命周期和作用范围是一个Pod。每个Pod中声明的存储卷由Pod中的所有容器共享。 + +### 持久存储卷(Persistent Volume, PV)和持久存储卷声明(Persistent Volume Claim, PVC) + +PV和PVC使得K8s集群具备了存储的逻辑抽象能力,使得在配置Pod的逻辑里可以忽略对实际后台存储技术的配置,而把这项配置的工作交给PV的配置者,即集群的管理者。 + +参考: + +[kubernetes 最佳实践](https://kubernetes.feisky.xyz/zh/) + +[architecture roadmap](https://github.com/kubernetes/community/tree/master/sig-architecture) + +[kubernetes 设计理念](https://kubernetes.feisky.xyz/zh/architecture/concepts.html) \ No newline at end of file diff --git "a/DevOps/Kubernetes/Q&A/kubeadm\344\273\243\347\220\206.md" "b/DevOps/Kubernetes/Q&A/kubeadm\344\273\243\347\220\206.md" new file mode 100644 index 0000000..10668d6 --- /dev/null +++ "b/DevOps/Kubernetes/Q&A/kubeadm\344\273\243\347\220\206.md" @@ -0,0 +1,38 @@ +```sh +[root@master ~]# proxychains4 kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.13.5 --pod-network-cidr=192.168.0.0/16 +[proxychains] config file found: /root/proxychains.conf +[proxychains] preloading /usr/local/lib/libproxychains4.so +[init] Using Kubernetes version: v1.13.5 +[preflight] Running pre-flight checks + [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ +[preflight] The system verification failed. Printing the output from the verification: +KERNEL_VERSION: [proxychains] DLL init +3.10.0-327.28.3.el7.x86_64 +DOCKER_VERSION: 18.09.6 +DOCKER_GRAPH_DRIVER: overlay2 +OS: [proxychains] DLL init +Linux +CGROUPS_CPU: enabled +CGROUPS_CPUACCT: enabled +CGROUPS_CPUSET: enabled +CGROUPS_DEVICES: enabled +CGROUPS_FREEZER: enabled +CGROUPS_MEMORY: enabled +error execution phase preflight: [preflight] Some fatal errors occurred: + [ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables contents are not set to 1 + [ERROR Swap]: running with swap on is not supported. Please disable swap + [ERROR SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "[proxychains] DLL init\nmodprobe: FATAL: Module configs not found.\n", err: exit status 1 + [ERROR SystemVerification]: unsupported operating system: [proxychains] DLL init +Linux + [ERROR KubeletVersion]: couldn't get kubelet version: Unable to parse output from Kubelet: "[proxychains] DLL init\nKubernetes v1.14.1" +[root@master ~]# proxychains4 kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.13.5 --pod-network-cidr=192.168.0.0/16^C +[root@master ~]# echo 1 /proc/sys/net/bridge/bridge-nf-call-iptables +1 /proc/sys/net/bridge/bridge-nf-call-iptables +[root@master ~]# cat /proc/sys/net/bridge/bridge-nf-call-iptables +0 +[root@master ~]# echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables +[root@master ~]# echo 1 > /proc/sys/net/bridge/bridge-nf-call-ip6tables +``` + + +[remove swap](https://serverfault.com/questions/684771/best-way-to-disable-swap-in-linux) \ No newline at end of file diff --git a/DevOps/Kubernetes/kubeadm-playround.md b/DevOps/Kubernetes/kubeadm-playround.md new file mode 100644 index 0000000..3a5f9d0 --- /dev/null +++ b/DevOps/Kubernetes/kubeadm-playround.md @@ -0,0 +1,132 @@ +# kubeadm 练习 + +## 内网机器通过能上网的机器联网设置 +156 +157 +158 +为内网ip;都通过10.255.175.96这个(docker hub)能上网的机器进行联网. + +在各个内网机器设置96为默认网关. +在96这个机器上添加NAT规则 +iptables -t nat -A POSTROUTING -s 10.255.175.0/24 -o eth0 -j SNAT --to 10.255.175.96 + +再在各个内网机器systemctl restart network ;便可联网 + +在三台机器都安装kubeadm,docker: +156 master +157 slave01 +158 slave02 + +96 DockerHub + +0. 检测docker版本并检验对应兼容的k8s版本 +[root@node1 ~]# docker -v +Docker version 18.09.6, build 481bc77156 + +选用 v1.13.6 k8s + +1. 配置HOSTS和安装 kubeadm + +```sh +[root@localhost ~]# echo "node1.com" > /etc/hostname +[root@localhost ~]# hostname +node1.com +[root@localhost ~]# cat /etc/hosts +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 +127.0.0.1 localhost +10.255.175.156 master.com +10.255.175.157 node1.com +10.255.175.158 node2.com + +# 此处由于该宿主机需要代理才能上网 +proxychains4 yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes +# 之前未docker设为自启动 +systemctl enable docker && systemctl start docker +systemctl enable kubelet && systemctl start kubelet +``` + +2. 关闭SELinux +```sh +[root@master ~]# cat /etc/selinux/config + +# This file controls the state of SELinux on the system. +# SELINUX= can take one of these three values: +# enforcing - SELinux security policy is enforced. +# permissive - SELinux prints warnings instead of enforcing. +# disabled - No SELinux policy is loaded. +SELINUX=disabled +# SELINUXTYPE= can take one of three two values: +# targeted - Targeted processes are protected, +# minimum - Modification of targeted policy. Only selected processes are protected. +# mls - Multi Level Security protection. +SELINUXTYPE=targeted + +[root@master ~]# sestatus +SELinux status: disabled +``` + +3. 配置各台机器上的hub和镜像加速地址 +```sh +[root@localhost ~]# cat /etc/docker/daemon.json +{ + "registry-mirrors": ["https://q0i1jkif.mirror.aliyuncs.com"], # 阿里云加速地址 + "insecure-registries":["10.255.175.96:5000"], + "exec-opts": ["native.cgroupdriver=systemd"], + "log-driver": "json-file", + "log-opts": { + "max-size": "100m" + }, + "storage-driver": "overlay2", + "storage-opts": [ + "overlay2.override_kernel_check=true" + ] +} + +systemctl daemon-reload && +systemctl restart docker.service && +systemctl status docker -l +``` + + +4. 安装Master节点 + +K8s的控制面板组件运行在Master节点上,包括etcd和API server(Kubectl便是通过API server与k8s通信)。 + +在执行初始化之前,我们还有一下3点需要注意: + +1.选择一个网络插件,并检查它是否需要在初始化Master时指定一些参数,比如我们可能需要根据选择的插件来设置--pod-network-cidr参数。参考:Installing a pod network add-on。 + +2.kubeadm使用eth0的默认网络接口(通常是内网IP)做为Master节点的advertise address,如果我们想使用不同的网络接口,可以使用--apiserver-advertise-address=参数来设置。如果适应IPv6,则必须使用IPv6d的地址,如:--apiserver-advertise-address=fd00::101。 + +3.使用kubeadm config images pull来预先拉取初始化需要用到的镜像,用来检查是否能连接到Kubenetes的Registries。 + +Kubenetes默认Registries地址是k8s.gcr.io,很明显,在国内并不能访问gcr.io,因此在kubeadm v1.13之前的版本,安装起来非常麻烦,但是在1.13版本中终于解决了国内的痛点,其增加了一个--image-repository参数,默认值是k8s.gcr.io,我们将其指定为国内镜像地址:registry.aliyuncs.com/google_containers,其它的就可以完全按照官方文档来愉快的玩耍了。 + +其次,我们还需要指定--kubernetes-version参数,因为它的默认值是stable-1,会导致从https://dl.k8s.io/release/stable-1.txt下载最新的版本号,我们可以将其指定为固定版本(最新版:v1.13.5)来跳过网络请求。 + +```sh +proxychains4 kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.14.1 --pod-network-cidr=192.168.0.0/16 +``` + +参考: + +[kubeadm 创建集群](https://www.cnblogs.com/RainingNight/p/using-kubeadm-to-create-a-cluster-1-13.html) + +[kubeadm 搭建集群](https://mritd.me/2016/10/29/set-up-kubernetes-cluster-by-kubeadm/) + +[kubeadm安装 kubernets](https://www.cnblogs.com/cocowool/p/kubeadm_install_kubernetes.html) + +[disabled selinux](https://howto.lintel.in/enable-disable-selinux-centos/) + +[daemon.json作用](https://blog.csdn.net/u013948858/article/details/79974796) + +[docker daemon.json 配置各台机器上的hub和镜像加速地址](https://blog.csdn.net/kozazyh/article/details/79795559) + +[docker 私有仓库登录](https://segmentfault.com/a/1190000012175537) + +[docker 和k8s兼容版本](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md#downloads-for-v1136) + +[kubeadm version constant](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/constants) + +[kubeadm constants](https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/constants/constants.go) diff --git a/DevOps/Kubernetes/kubernetes.md b/DevOps/Kubernetes/kubernetes.md new file mode 100644 index 0000000..4acba1a --- /dev/null +++ b/DevOps/Kubernetes/kubernetes.md @@ -0,0 +1,26 @@ +# kubernetes + +## Docker 和K8s的兼容性 + +Kubernetes 1.9 <--Docker 1.11.2 to 1.13.1 and 17.03.x + +Kubernetes 1.8 <--Docker 1.11.2 to 1.13.1 and 17.03.x + +Kubernetes 1.7 <--Docker 1.10.3, 1.11.2, 1.12.6 + +Kubernetes 1.6 <--Docker 1.10.3, 1.11.2, 1.12.6 + +Kubernetes 1.5 <--Docker 1.10.3, 1.11.2, 1.12.3 + + +参考: + +[k8s wiki](https://zh.wikipedia.org/wiki/Kubernetes) + +[gitlabrunner](https://docs.gitlab.com/runner/) + +[k8s cn](https://kubernetes.io/cn/docs/tutorials/kubernetes-basics/) + +[k8s 对docker的兼容性](https://blog.csdn.net/CSDN_duomaomao/article/details/79171027) + +[k8s本地开发](https://blog.qikqiak.com/post/skaffold-simple-local-develop-k8s-app-tools/) \ No newline at end of file diff --git a/DevOps/Kubernetes/setup.md b/DevOps/Kubernetes/setup.md new file mode 100644 index 0000000..06a8ae0 --- /dev/null +++ b/DevOps/Kubernetes/setup.md @@ -0,0 +1,75 @@ +# Mincube + +安装Mincube分以下几步: +- BIOS开启虚拟化 +- 安装Hypervisor虚拟化工具 +- 安装kubectl +- 安装 + +## 使用阿里镜像源安装 +```sh +cat < /etc/yum.repos.d/kubernetes.repo +[kubernetes] +name=Kubernetes +baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=0 +repo_gpgcheck=0 +gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg +EOF +yum install -y kubectl +``` + +## kubectl + +它是user-commands工具.目前kubectl还未配置成功 + +```sh +(base) [root@bogon home]# kubectl cluster-info +Kubernetes master is running at http://localhost:8080 + +To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. +The connection to the server localhost:8080 was refused - did you specify the right host or port? +``` + +## 安装kubeadm +```sh +function addKubRep() { +cat < /etc/yum.repos.d/kubernetes.repo +[kubernetes] +name=Kubernetes +baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=0 +repo_gpgcheck=0 +gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg + http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg +EOF +} + +function disabledSELinux() { + # 将 SELinux 设置为 permissive 模式(将其禁用) + setenforce 0 + sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config +} + +functuion installKubctl() { + yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes + + systemctl enable kubelet && systemctl start kubelet +} + +``` + + +参考: + +[选择最佳的实践策略](https://kubernetes.io/docs/setup/pick-right-solution/) + +[kubernetes 最佳实践](https://kubernetes.feisky.xyz/zh/) + +[安装 kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +[尝试搭建一个k8s集群体验环境](https://labs.play-with-k8s.com/) + +[kubernetes 阿里云镜像](http://ljchen.net/2018/10/23/%E5%9F%BA%E4%BA%8E%E9%98%BF%E9%87%8C%E4%BA%91%E9%95%9C%E5%83%8F%E7%AB%99%E5%AE%89%E8%A3%85kubernetes/) \ No newline at end of file diff --git "a/\345\244\247\346\225\260\346\215\256/docker\345\274\200\345\217\221/docker.md" "b/DevOps/docker\345\274\200\345\217\221/docker.md" similarity index 94% rename from "\345\244\247\346\225\260\346\215\256/docker\345\274\200\345\217\221/docker.md" rename to "DevOps/docker\345\274\200\345\217\221/docker.md" index 9951a02..6b8193c 100644 --- "a/\345\244\247\346\225\260\346\215\256/docker\345\274\200\345\217\221/docker.md" +++ "b/DevOps/docker\345\274\200\345\217\221/docker.md" @@ -1,7 +1,25 @@ ## Docker 搭建数据采集环境 +## Docker Hub +```sh +/etc/init/docker.conf + +# 拉取hub +docker run -d -p 5000:5000 --restart=always -v /opt/data/registry:/var/lib/registry --name registry registry + +# 标记本地镜像 +docker tag nginx:latest hubip:5000/nginx:latest + +# 推送 +docker push hubip:5000/nginx:latest + +# 拉取 +docker pull hubip:5000/nginx:latest +``` + +[]() ## Kafka diff --git "a/\345\256\211\345\205\250\346\212\200\350\203\275\346\240\221/\345\256\211\345\205\250\344\277\241\346\201\257\350\265\204\346\272\220/\345\256\211\345\205\250\345\267\245\345\205\267/CALDERA.md" "b/\345\256\211\345\205\250\346\212\200\350\203\275\346\240\221/\345\256\211\345\205\250\344\277\241\346\201\257\350\265\204\346\272\220/\345\256\211\345\205\250\345\267\245\345\205\267/CALDERA.md" new file mode 100644 index 0000000..8f2b382 --- /dev/null +++ "b/\345\256\211\345\205\250\346\212\200\350\203\275\346\240\221/\345\256\211\345\205\250\344\277\241\346\201\257\350\265\204\346\272\220/\345\256\211\345\205\250\345\267\245\345\205\267/CALDERA.md" @@ -0,0 +1,28 @@ +### CALDERA + +CALDERA专注于后渗透阶段.它包含一个逻辑编码,用于描述该技术的要求(前置条件)和技术的效果(后置条件). +对手仿真的核心是red team,但是对手仿真不是使用攻击者的一般心态,而是采用特定现实世界对手的方法,参与的重点是让仿真团队和防御者共同努力改进系统,网络和防御过程,以更好地检测对手生命周期中使用的技术. + +限制: 1.决定不模仿C2,原因是已经存在几种模拟C2网络流量的工具.通过关注仿真的其他方面产生更大的影响. + - 实用角度: CALDERA最初是为测试基于主机的防御和传感器而创建的.基于主机的防御主要使用主机上的活动而不是网络上的。 + - 哲学角度: C2协议很容易改变,且由很多变化,不同的差异会很大. + +```sh +git clone https://github.com/mitre/caldera.git --recursive +docker-compose up +``` + +默认账户密码 +username: admin +password: caldera + +Logic是CALDERA能够自动运行的核心部分.每一个敌手动作,称为步骤在CALDERA包含的步骤的要求和影响的逻辑描述. +CALDERA解析这些逻辑描述,以告知何时可以运行Step并预测Ste的结果.这让CALDERA通过迭代检测给定当前状态的那些步骤是可以执行的,选择步骤,然后根据逻辑规则生成该步骤的输出状态来生成[计划][planning] + +[caldera](https://caldera.readthedocs.io/en/latest/) + +[docker-compose install](https://docs.docker.com/compose/install/) + +[caldera install](https://caldera.readthedocs.io/en/latest/installation.html) + +[AI planning](https://www.isi.edu/~blythe/cs541/) diff --git "a/\345\256\211\345\205\250\346\212\200\350\203\275\346\240\221/\345\256\211\345\205\250\344\277\241\346\201\257\350\265\204\346\272\220/\345\256\211\345\205\250\345\267\245\345\205\267/HIDS.md" "b/\345\256\211\345\205\250\346\212\200\350\203\275\346\240\221/\345\256\211\345\205\250\344\277\241\346\201\257\350\265\204\346\272\220/\345\256\211\345\205\250\345\267\245\345\205\267/HIDS.md" new file mode 100644 index 0000000..c7e8411 --- /dev/null +++ "b/\345\256\211\345\205\250\346\212\200\350\203\275\346\240\221/\345\256\211\345\205\250\344\277\241\346\201\257\350\265\204\346\272\220/\345\256\211\345\205\250\345\267\245\345\205\267/HIDS.md" @@ -0,0 +1,24 @@ +## HIDS + +OSSEC分为三部分: +Manager(or Server): Agents通过1514/udp端口连接Server,agents可以通过这个端口和server沟通. +Agents + +Systemd 中的service分为 agent和server: +- server + +agentless.service +analysisd.service +csyslog.service +dbd.service +execd.service +logcollector.service +maild.service +monitord.service +remoted.service + +参考: + +[ossec-hids github](https://github.com/ossec/ossec-hids) + +[ossec doc](http://www.ossec.net/docs/index.html) \ No newline at end of file diff --git "a/\345\256\211\345\205\250\346\212\200\350\203\275\346\240\221/\345\256\211\345\205\250\344\277\241\346\201\257\350\265\204\346\272\220/\351\273\221\345\256\242\345\267\245\345\205\267\350\265\204\346\272\220.md" "b/\345\256\211\345\205\250\346\212\200\350\203\275\346\240\221/\345\256\211\345\205\250\344\277\241\346\201\257\350\265\204\346\272\220/\351\273\221\345\256\242\345\267\245\345\205\267\350\265\204\346\272\220.md" index 17f3f8e..841c054 100644 --- "a/\345\256\211\345\205\250\346\212\200\350\203\275\346\240\221/\345\256\211\345\205\250\344\277\241\346\201\257\350\265\204\346\272\220/\351\273\221\345\256\242\345\267\245\345\205\267\350\265\204\346\272\220.md" +++ "b/\345\256\211\345\205\250\346\212\200\350\203\275\346\240\221/\345\256\211\345\205\250\344\277\241\346\201\257\350\265\204\346\272\220/\351\273\221\345\256\242\345\267\245\345\205\267\350\265\204\346\272\220.md" @@ -11,49 +11,6 @@ adversary emulation, 模拟攻击提供了一种用来测试网络在应对高 [攻击模拟](https://www.4hou.com/web/11241.html) -### CALDERA - -CALDERA专注于后渗透阶段.它包含一个逻辑编码,用于描述该技术的要求(前置条件)和技术的效果(后置条件). -对手仿真的核心是red team,但是对手仿真不是使用攻击者的一般心态,而是采用特定现实世界对手的方法,参与的重点是让仿真团队和防御者共同努力改进系统,网络和防御过程,以更好地检测对手生命周期中使用的技术. - -限制: 1.决定不模仿C2,原因是已经存在几种模拟C2网络流量的工具.通过关注仿真的其他方面产生更大的影响. - - 实用角度: CALDERA最初是为测试基于主机的防御和传感器而创建的.基于主机的防御主要使用主机上的活动而不是网络上的。 - - 哲学角度: C2协议很容易改变,且由很多变化,不同的差异会很大. - -```sh -git clone https://github.com/mitre/caldera.git --recursive -docker-compose up -``` - -默认账户密码 -username: admin -password: caldera - -Logic是CALDERA能够自动运行的核心部分.每一个敌手动作,称为步骤在CALDERA包含的步骤的要求和影响的逻辑描述. -CALDERA解析这些逻辑描述,以告知何时可以运行Step并预测Ste的结果.这让CALDERA通过迭代检测给定当前状态的那些步骤是可以执行的,选择步骤,然后根据逻辑规则生成该步骤的输出状态来生成[计划][planning] - -[caldera](https://caldera.readthedocs.io/en/latest/) - -[docker-compose install](https://docs.docker.com/compose/install/) - -[caldera install](https://caldera.readthedocs.io/en/latest/installation.html) - -[AI planning](https://www.isi.edu/~blythe/cs541/) - - -## HIDS - -OSSEC分为三部分: -Manager(or Server): Agents通过1514/udp端口连接Server,agents可以通过这个端口和server沟通. -Agents - - -参考: - -[ossec-hids github](https://github.com/ossec/ossec-hids) - -[ossec doc](http://www.ossec.net/docs/index.html) - ## CTF 夺旗赛 [CTF 夺旗赛资源](https://ctftime.org/) diff --git "a/\345\270\270\347\224\250\345\221\275\344\273\244/\347\263\273\347\273\237\345\270\270\347\224\250\345\221\275\344\273\244/Linux/Linux\345\270\270\347\224\250\345\221\275\344\273\244.md" "b/\345\270\270\347\224\250\345\221\275\344\273\244/\347\263\273\347\273\237\345\270\270\347\224\250\345\221\275\344\273\244/Linux/Linux\345\270\270\347\224\250\345\221\275\344\273\244.md" index 1c42578..1e9d48a 100644 --- "a/\345\270\270\347\224\250\345\221\275\344\273\244/\347\263\273\347\273\237\345\270\270\347\224\250\345\221\275\344\273\244/Linux/Linux\345\270\270\347\224\250\345\221\275\344\273\244.md" +++ "b/\345\270\270\347\224\250\345\221\275\344\273\244/\347\263\273\347\273\237\345\270\270\347\224\250\345\221\275\344\273\244/Linux/Linux\345\270\270\347\224\250\345\221\275\344\273\244.md" @@ -11,10 +11,79 @@ iptables -F chainname # 添加NAT规则 iptables -A nat -s 10.255.175.76 -p tcp --dport 8880 -j ACCEPT + +# 内网虚拟机机器通过`10.255.175.96`宿主机能访问外网的机器上网 +iptables -t nat -A POSTROUTING -s 10.255.175.0/24 -o eth0 -j SNAT --to 10.255.175.96 + +iptables -t nat -D POSTROUTING -s 10.255.175.0/24 -j SNAT --to 10.255.175.96 + +# 再在内网机器上将GATEWAY设置为`10.255.175.96` ``` + +````sh +# 开启转发并持久化 +sysctl -p /etc/sysctl.conf +[root@slave02 ~]# cat /etc/sysctl.conf +# System default settings live in /usr/lib/sysctl.d/00-system.conf. +# To override those settings, enter new settings here, or in an /etc/sysctl.d/.conf file +# +# For more information, see sysctl.conf(5) and sysctl.d(5). +vm.max_map_count=262144 +vm.swappiness=10 +net.ipv4.ip_forward = 1 + + +# 删掉后原来内网ip由能ping 通 baidu.com转为不能ping通 +iptables -t nat -A POSTROUTING -s 10.255.175.0/24 -o eth0 -j SNAT --to 10.255.175.96 +# 开机自启保存 +iptables-save > /etc/sysconfig/iptables + + +# 能够上网的机器的nat表配置 +[root@slave02 ~]# iptables -t nat -L +Chain PREROUTING (policy ACCEPT) +target prot opt source destination +DOCKER all -- anywhere anywhere ADDRTYPE match dst-type LOCAL + +Chain INPUT (policy ACCEPT) +target prot opt source destination + +Chain OUTPUT (policy ACCEPT) +target prot opt source destination +DOCKER all -- anywhere !loopback/8 ADDRTYPE match dst-type LOCAL + +Chain POSTROUTING (policy ACCEPT) +target prot opt source destination +MASQUERADE all -- 172.17.0.0/16 anywhere +MASQUERADE tcp -- 172.17.0.2 172.17.0.2 tcp dpt:commplex-main + +SNAT all -- 10.255.175.0/24 anywhere to:10.255.175.96 + +Chain DOCKER (2 references) +target prot opt source destination +RETURN all -- anywhere anywhere +DNAT tcp -- anywhere anywhere tcp dpt:commplex-main to:172.17.0.2:5000 + + +更改完成之后在内网机器(配置默认网关为'能上网的机器ip 96')上执行 +systemctl restart network +后就可以通过能上网的机器连接baidu.com了 +```` + + [iptables](http://www.zsythink.net/archives/1199) + +## 修改网卡名称 + +```sh +/sbin/ip link set eth1 down +/sbin/ip link set eth1 name eth123 +/sbin/ip link set eth123 up +``` + +[change network interface withou reboot](https://unix.stackexchange.com/questions/205010/centos-7-rename-network-interface-without-rebooting) ## 访问socket ```json diff --git "a/\345\270\270\347\224\250\345\221\275\344\273\244/\347\263\273\347\273\237\345\270\270\347\224\250\345\221\275\344\273\244/Linux/iptables.md" "b/\345\270\270\347\224\250\345\221\275\344\273\244/\347\263\273\347\273\237\345\270\270\347\224\250\345\221\275\344\273\244/Linux/iptables.md" index e05849d..d2edb46 100644 --- "a/\345\270\270\347\224\250\345\221\275\344\273\244/\347\263\273\347\273\237\345\270\270\347\224\250\345\221\275\344\273\244/Linux/iptables.md" +++ "b/\345\270\270\347\224\250\345\221\275\344\273\244/\347\263\273\347\273\237\345\270\270\347\224\250\345\221\275\344\273\244/Linux/iptables.md" @@ -52,4 +52,6 @@ DNAt: (PREROUTING,修改得到是即将到来的数据包.修改的是目的IP, [iptables详解](https://blog.csdn.net/reyleon/article/details/12976341) -[iptables 分析](http://www.zsythink.net/archives/1199) \ No newline at end of file +[iptables 分析](http://www.zsythink.net/archives/1199) + +[内网上网](http://xstarcd.github.io/wiki/Linux/iptables_forward_internetshare.html) \ No newline at end of file diff --git "a/\345\270\270\350\247\201\345\215\217\350\256\256/DNS/DNS.md" "b/\345\270\270\350\247\201\345\215\217\350\256\256/DNS/DNS.md" new file mode 100644 index 0000000..1ac927a --- /dev/null +++ "b/\345\270\270\350\247\201\345\215\217\350\256\256/DNS/DNS.md" @@ -0,0 +1,5 @@ +# DNS + +参考: + +[DNS 协议](https://luodichen.com/blog/2013/03/03/dns-protocol/) \ No newline at end of file diff --git "a/\347\274\226\347\250\213\350\257\255\350\250\200/Golang/Golang.md" "b/\347\274\226\347\250\213\350\257\255\350\250\200/Golang/Golang.md" index 97223eb..7e9669c 100644 --- "a/\347\274\226\347\250\213\350\257\255\350\250\200/Golang/Golang.md" +++ "b/\347\274\226\347\250\213\350\257\255\350\250\200/Golang/Golang.md" @@ -223,4 +223,5 @@ or [golang环境配置](https://zhuanlan.zhihu.com/p/52517506) -[vscode golang环境搭建](https://maiyang.me/post/2018-09-14-tips-vscode/) \ No newline at end of file +[vscode golang环境搭建](https://maiyang.me/post/2018-09-14-tips-vscode/) + diff --git "a/\347\274\226\347\250\213\350\257\255\350\250\200/Golang/GolangTips.md" "b/\347\274\226\347\250\213\350\257\255\350\250\200/Golang/GolangTips.md" new file mode 100644 index 0000000..4049150 --- /dev/null +++ "b/\347\274\226\347\250\213\350\257\255\350\250\200/Golang/GolangTips.md" @@ -0,0 +1,33 @@ +# Golang Tips + +参考: + +[跨包调用函数](https://www.jianshu.com/p/70d780e2716a) + +[反射调用](https://jinfagang.github.io/2017/06/15/Golang%E4%B8%80%E9%97%A8%E7%A5%9E%E5%A5%87%E7%9A%84%E8%AF%AD%E8%A8%80%E4%B9%8B%E4%B8%80-%E5%8F%8D%E5%B0%84%E5%AE%9E%E7%8E%B0%E5%87%BD%E6%95%B0%E8%B0%83%E7%94%A8%EF%BC%8C%E5%90%84%E7%A7%8D%E8%B0%83/) + +[检查空结构体](https://stackoverflow.com/questions/28447297/how-to-check-for-an-empty-struct) + +[gdb 调试golang](http://blog.studygolang.com/2012/12/gdb%E8%B0%83%E8%AF%95go%E7%A8%8B%E5%BA%8F/) + +[golang json marshal 和unmarshal](https://www.restapiexample.com/golang-tutorial/marshal-and-unmarshal-of-struct-data-using-golang/) + +[golang 接口作为参数传递](https://stackoverflow.com/questions/20314604/go-syntax-and-interface-as-parameter-to-function) + +[接口传参](https://stackoverflow.com/questions/49246719/reflect-on-struct-passed-into-interface-in-function-parameter-golang) + +[golang 接口反射](http://legendtkl.com/2015/11/28/go-interface-reflect/) + +[](https://juejin.im/post/5a75a4fb5188257a82110544) + +[Golang 错误处理](https://ethancai.github.io/2017/12/29/Error-Handling-in-Go/) + +[打印interface 数组](https://stackoverflow.com/questions/39649446/dump-an-array-of-interfaces-in-golang) + +[字符串数组类型断言](http://www.cnblogs.com/superfat/p/7376330.html) + +[golang init 函数何时运行](https://stackoverflow.com/questions/24790175/when-is-the-init-function-run) + +[golang new 函数](http://www.cnblogs.com/hustcat/p/4004889.html) + +[new 和make的区别](https://stackoverflow.com/questions/9320862/why-would-i-make-or-new) \ No newline at end of file diff --git "a/\350\277\220\347\273\264/tools/CentOS7/change_yum_and_kernel.sh" "b/\350\277\220\347\273\264/tools/CentOS7/change_yum_and_kernel.sh" index 11e85e8..55a7f0b 100644 --- "a/\350\277\220\347\273\264/tools/CentOS7/change_yum_and_kernel.sh" +++ "b/\350\277\220\347\273\264/tools/CentOS7/change_yum_and_kernel.sh" @@ -3,7 +3,6 @@ function ChangeYum() { cd /etc/yum.repos.d && - echo "curr_path:"${pwd} && mv CentOS-Base.repo CentOS-Base.repo.bk && curl -O http://mirrors.aliyun.com/repo/Centos-7.repo && mv Centos-7.repo CentOS-Base.repo && @@ -31,4 +30,3 @@ echo "== ChangeYum! ==" ChangeYum UpdateKernel - diff --git "a/\350\277\220\347\273\264/tools/CentOS7/install_pkg.sh" "b/\350\277\220\347\273\264/tools/CentOS7/install_pkg.sh" index 953b6fc..b4f5376 100644 --- "a/\350\277\220\347\273\264/tools/CentOS7/install_pkg.sh" +++ "b/\350\277\220\347\273\264/tools/CentOS7/install_pkg.sh" @@ -41,6 +41,4 @@ function InstallDocker() InstallWget InstallGo InstallGit -InstallDocker - - +InstallDocker \ No newline at end of file diff --git "a/\350\277\220\347\273\264/\350\231\232\346\213\237\345\214\226\345\267\245\345\205\267/ProxmoxVE.md" "b/\350\277\220\347\273\264/\350\231\232\346\213\237\345\214\226\345\267\245\345\205\267/ProxmoxVE.md" new file mode 100644 index 0000000..4cea4e3 --- /dev/null +++ "b/\350\277\220\347\273\264/\350\231\232\346\213\237\345\214\226\345\267\245\345\205\267/ProxmoxVE.md" @@ -0,0 +1,7 @@ +# Proxmox VE + +https://服务器IP:8006 + +参考: + +[Proxmox VE](http://www.cnblogs.com/sweetWinne/p/6526911.html) \ No newline at end of file