removed submodules

This commit is contained in:
2023-12-13 18:03:49 +01:00
parent 2e3bb35f86
commit 757ab5a092
41 changed files with 0 additions and 2607 deletions

12
.gitmodules vendored
View File

@@ -1,9 +1,6 @@
[submodule "kube-prometheus"]
path = kube-prometheus
url = https://github.com/coreos/kube-prometheus.git
[submodule "cluster-monitoring"]
path = cluster-monitoring
url = git@git.lan:chaos/k8s-cluster-monitoring.git
[submodule "gluster-kubernetes"]
path = gluster-kubernetes
url = https://github.com/jayflory/gluster-kubernetes.git
@@ -28,15 +25,6 @@
[submodule "csi-s3/storage-csi-s3"]
path = csi-s3/storage-csi-s3
url = https://github.com/ctrox/csi-s3.git
[submodule "csi-s3/external-attacher"]
path = csi-s3/external-attacher
url = https://github.com/kubernetes-csi/external-attacher.git
[submodule "csi-s3/external-provisioner"]
path = csi-s3/external-provisioner
url = https://github.com/kubernetes-csi/external-provisioner.git
[submodule "csi-s3/node-driver-registrar"]
path = csi-s3/node-driver-registrar
url = https://github.com/kubernetes-csi/node-driver-registrar.git
[submodule "apps/tekton/dashboard"]
path = apps/tekton/dashboard
url = https://github.com/tektoncd/dashboard.git

View File

@@ -1,12 +0,0 @@
COMMON:
** git tag -l
** V=GIT_TAG git checkout -b branch=$V $V
** run: build.sh dir-name
external-provisioner:
external-attacher:
node-driver-registrar:

View File

@@ -1,27 +0,0 @@
#!/bin/bash
APP=$1
cd $APP
VERSION=arm64 make -j8 GOARCH=arm64
docker build -t $APP:arm64 --platform linux/arm64 .
docker tag ${APP}:arm64 docker-registry.lan/${APP}:arm64
echo "=============================================="
while true; do
read -p "Push it real good? " yn
case $yn in
[Yy]* )
docker push docker-registry.lan/${APP}:arm64;
echo "-> Cheers";
echo;
break;;
[Nn]* )
echo "x> Cheers!";
echo;
exit;;
* ) echo "Please answer [y]es or [n]o.";;
esac
done
cd -

View File

@@ -1,12 +0,0 @@
# This is where the result of the go build goes
/output*/
/_output*/
/_output
# Go test binaries
*.test
# Godeps or dep workspace
/Godeps/_workspace
vendor
vendor.*

View File

@@ -1,24 +0,0 @@
image:
name: ctrox/csi-s3:test
entrypoint: [""]
variables:
DOCKER_HOST: tcp://docker:2375
DOCKER_DRIVER: overlay2
stages:
- build
- test
build:
stage: build
script:
- make build
test:
stage: test
image: docker:stable
services:
- docker:dind
script:
- docker run --rm --privileged -v $(pwd):/app --device /dev/fuse ctrox/csi-s3:test

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,37 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: test build container push clean
PROJECT_DIR=/app
REGISTRY_NAME=docker-registry.lan
IMAGE_NAME=csi-s3
VERSION ?= dev
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(VERSION)
FULL_IMAGE_TAG=$(IMAGE_TAG)-full
TEST_IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):test
build:
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -a -ldflags '-extldflags "-static"' -o _output/s3driver ./cmd/s3driver
test:
docker build -t $(TEST_IMAGE_TAG) -f test/Dockerfile .
docker run --rm --privileged -v $(PWD):$(PROJECT_DIR) --device /dev/fuse $(TEST_IMAGE_TAG)
container: build
docker build --platform linux/arm64 -t $(IMAGE_TAG) -f cmd/s3driver/Dockerfile .
docker build --platform linux/arm64 -t $(FULL_IMAGE_TAG) --build-arg VERSION=$(VERSION) -f cmd/s3driver/Dockerfile.full .
push: container
docker push $(IMAGE_TAG)
docker push $(FULL_IMAGE_TAG)
clean:
go clean -r -x
-rm -rf _output

View File

@@ -1,173 +0,0 @@
# CSI for S3
This is a Container Storage Interface ([CSI](https://github.com/container-storage-interface/spec/blob/master/spec.md)) for S3 (or S3 compatible) storage. This can dynamically allocate buckets and mount them via a fuse mount into any container.
## Status
This is still very experimental and should not be used in any production environment. Unexpected data loss could occur depending on what mounter and S3 storage backend is being used.
## Kubernetes installation
### Requirements
* Kubernetes 1.13+ (CSI v1.0.0 compatibility)
* Kubernetes has to allow privileged containers
* Docker daemon must allow shared mounts (systemd flag `MountFlags=shared`)
### 1. Create a secret with your S3 credentials
```yaml
apiVersion: v1
kind: Secret
metadata:
name: csi-s3-secret
stringData:
accessKeyID: <YOUR_ACCESS_KEY_ID>
secretAccessKey: <YOUR_SECRET_ACCES_KEY>
# For AWS set it to "https://s3.<region>.amazonaws.com"
endpoint: <S3_ENDPOINT_URL>
# If not on S3, set it to ""
region: <S3_REGION>
```
The region can be empty if you are using some other S3 compatible storage.
### 2. Deploy the driver
```bash
cd deploy/kubernetes
kubectl create -f provisioner.yaml
kubectl create -f attacher.yaml
kubectl create -f csi-s3.yaml
```
### 3. Create the storage class
```bash
kubectl create -f storageclass.yaml
```
### 4. Test the S3 driver
1. Create a pvc using the new storage class:
```bash
kubectl create -f pvc.yaml
```
2. Check if the PVC has been bound:
```bash
$ kubectl get pvc csi-s3-pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
csi-s3-pvc Bound pvc-c5d4634f-8507-11e8-9f33-0e243832354b 5Gi RWO csi-s3 9s
```
3. Create a test pod which mounts your volume:
```bash
kubectl create -f poc.yaml
```
If the pod can start, everything should be working.
4. Test the mount
```bash
$ kubectl exec -ti csi-s3-test-nginx bash
$ mount | grep fuse
s3fs on /var/lib/www/html type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
$ touch /var/lib/www/html/hello_world
```
If something does not work as expected, check the troubleshooting section below.
## Additional configuration
### Mounter
As S3 is not a real file system there are some limitations to consider here. Depending on what mounter you are using, you will have different levels of POSIX compability. Also depending on what S3 storage backend you are using there are not always [consistency guarantees](https://github.com/gaul/are-we-consistent-yet#observed-consistency).
The driver can be configured to use one of these mounters to mount buckets:
* [rclone](https://rclone.org/commands/rclone_mount)
* [s3fs](https://github.com/s3fs-fuse/s3fs-fuse)
* [goofys](https://github.com/kahing/goofys)
* [s3backer](https://github.com/archiecobbs/s3backer)
The mounter can be set as a parameter in the storage class. You can also create multiple storage classes for each mounter if you like.
All mounters have different strengths and weaknesses depending on your use case. Here are some characteristics which should help you choose a mounter:
#### rclone
* Almost full POSIX compatibility (depends on caching mode)
* Files can be viewed normally with any S3 client
#### s3fs
* Large subset of POSIX
* Files can be viewed normally with any S3 client
* Does not support appends or random writes
#### goofys
* Weak POSIX compatibility
* Performance first
* Files can be viewed normally with any S3 client
* Does not support appends or random writes
#### s3backer (experimental*)
* Represents a block device stored on S3
* Allows to use a real filesystem
* Files are not readable with other S3 clients
* Support appends
* Supports compression before upload (Not yet implemented in this driver)
* Supports encryption before upload (Not yet implemented in this driver)
*s3backer is experimental at this point because volume corruption can occur pretty quickly in case of an unexpected shutdown of a Kubernetes node or CSI pod.
The s3backer binary is not bundled with the normal docker image to keep that as small as possible. Use the `<version>-full` image tag for testing s3backer.
Fore more detailed limitations consult the documentation of the different projects.
## Troubleshooting
### Issues while creating PVC
Check the logs of the provisioner:
```bash
kubectl logs -l app=csi-provisioner-s3 -c csi-s3
```
### Issues creating containers
1. Ensure feature gate `MountPropagation` is not set to `false`
2. Check the logs of the s3-driver:
```bash
kubectl logs -l app=csi-s3 -c csi-s3
```
## Development
This project can be built like any other go application.
```bash
go get -u github.com/ctrox/csi-s3
```
### Build executable
```bash
make build
```
### Tests
Currently the driver is tested by the [CSI Sanity Tester](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity). As end-to-end tests require S3 storage and a mounter like s3fs, this is best done in a docker container. A Dockerfile and the test script are in the `test` directory. The easiest way to run the tests is to just use the make command:
```bash
make test
```

View File

@@ -1,14 +0,0 @@
FROM debian:buster-slim
LABEL maintainers="Cyrill Troxler <cyrilltroxler@gmail.com>"
LABEL description="csi-s3 slim image"
#RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
# s3fs and some other dependencies
RUN apt-get update && \
apt-get install -y \
s3fs curl unzip rclone && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/*
COPY ./s3driver /s3driver
ENTRYPOINT ["/s3driver"]

View File

@@ -1,43 +0,0 @@
FROM debian:buster-slim as s3backer
ARG S3BACKER_VERSION=1.5.4
#RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN apt-get update && apt-get install -y \
build-essential \
autoconf \
libcurl4-openssl-dev \
libfuse-dev libfuse3-dev \
libexpat1-dev \
libssl-dev \
zlib1g-dev \
psmisc \
pkg-config \
git && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/*
# Compile & install s3backer
RUN git clone https://github.com/archiecobbs/s3backer.git /src/s3backer
WORKDIR /src/s3backer
RUN git checkout tags/${S3BACKER_VERSION}
RUN ./autogen.sh && \
./configure && \
make -j8 && \
make install
FROM debian:buster-slim
LABEL maintainers="Cyrill Troxler <cyrilltroxler@gmail.com>"
LABEL description="csi-s3 image"
COPY --from=s3backer /usr/bin/s3backer /usr/bin/s3backer
# s3fs and some other dependencies
RUN apt-get update && \
apt-get install -y \
libfuse3-3 gcc sqlite3 libsqlite3-dev \
s3fs psmisc procps libcurl4 xfsprogs curl unzip rclone && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
COPY ./_output/s3driver /s3driver
ENTRYPOINT ["/s3driver"]

View File

@@ -1,45 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"log"
"os"
"github.com/ctrox/csi-s3/pkg/s3"
)
func init() {
flag.Set("logtostderr", "true")
}
var (
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
nodeID = flag.String("nodeid", "", "node id")
)
func main() {
flag.Parse()
driver, err := s3.NewS3(*nodeID, *endpoint)
if err != nil {
log.Fatal(err)
}
driver.Run()
os.Exit(0)
}

View File

@@ -1,90 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update","patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io
---
# needed for StatefulSet
kind: Service
apiVersion: v1
metadata:
name: csi-attacher-s3
namespace: kube-system
labels:
app: csi-attacher-s3
spec:
selector:
app: csi-attacher-s3
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-attacher-s3
namespace: kube-system
spec:
serviceName: "csi-attacher-s3"
replicas: 1
selector:
matchLabels:
app: csi-attacher-s3
template:
metadata:
labels:
app: csi-attacher-s3
spec:
serviceAccount: csi-attacher-sa
containers:
- name: csi-attacher
image: docker-registry.lan/csi-attacher:arm64
args:
- "--v=4"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/ch.ctrox.csi.s3-driver/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ch.ctrox.csi.s3-driver
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/ch.ctrox.csi.s3-driver
type: DirectoryOrCreate

View File

@@ -1,121 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-s3
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-s3
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-s3
subjects:
- kind: ServiceAccount
name: csi-s3
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-s3
apiGroup: rbac.authorization.k8s.io
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-s3
namespace: kube-system
spec:
selector:
matchLabels:
app: csi-s3
template:
metadata:
labels:
app: csi-s3
spec:
serviceAccount: csi-s3
hostNetwork: true
containers:
- name: driver-registrar
image: docker-registry.lan/node-driver-registrar:arm64
args:
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
- "--v=4"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ch.ctrox.csi.s3-driver/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration/
- name: csi-s3
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: docker-registry.lan/csi-s3:arm64
args:
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(NODE_ID)"
- "--v=4"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
imagePullPolicy: "Always"
#imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: fuse-device
mountPath: /dev/fuse
volumes:
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/ch.ctrox.csi.s3-driver
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: fuse-device
hostPath:
path: /dev/fuse

View File

@@ -1,17 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: csi-s3-test-nginx
namespace: test
spec:
containers:
- name: csi-s3-test-nginx
image: nginx
volumeMounts:
- mountPath: /usr/share/nginx/html
name: webroot
volumes:
- name: webroot
persistentVolumeClaim:
claimName: csi-s3-pvc
readOnly: false

View File

@@ -1,105 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Service
apiVersion: v1
metadata:
name: csi-provisioner-s3
namespace: kube-system
labels:
app: csi-provisioner-s3
spec:
selector:
app: csi-provisioner-s3
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-provisioner-s3
namespace: kube-system
spec:
serviceName: "csi-provisioner-s3"
replicas: 1
selector:
matchLabels:
app: csi-provisioner-s3
template:
metadata:
labels:
app: csi-provisioner-s3
spec:
serviceAccount: csi-provisioner-sa
containers:
- name: csi-provisioner
image: docker-registry.lan/csi-provisioner:arm64
args:
- "--provisioner=ch.ctrox.csi.s3-driver"
- "--csi-address=$(ADDRESS)"
- "--v=4"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/ch.ctrox.csi.s3-driver/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ch.ctrox.csi.s3-driver
- name: csi-s3
image: docker-registry.lan/csi-s3:arm64
args:
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(NODE_ID)"
- "--v=4"
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/kubelet/plugins/ch.ctrox.csi.s3-driver/csi.sock
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ch.ctrox.csi.s3-driver
volumes:
- name: socket-dir
emptyDir: {}

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: csi-s3-pvc
namespace: test
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: csi-s3-slow

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: csi-s3-secret
# needs to be in ALL namespaces
namespace: kube-system
stringData:
accessKeyID: minio
secretAccessKey: minio2020
endpoint: http://ebin02:9000
# If not on S3, set it to ""
region: ""

View File

@@ -1,21 +0,0 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: csi-s3-slow
provisioner: ch.ctrox.csi.s3-driver
reclaimPolicy: Retain
allowVolumeExpansion: true
parameters:
# specify which mounter to use
# can be set to rclone, s3fs, goofys or s3backer
# https://github.com/CTrox/csi-s3
mounter: rclone
csi.storage.k8s.io/provisioner-secret-name: csi-s3-secret
csi.storage.k8s.io/provisioner-secret-namespace: kube-system
csi.storage.k8s.io/controller-publish-secret-name: csi-s3-secret
csi.storage.k8s.io/controller-publish-secret-namespace: kube-system
csi.storage.k8s.io/node-stage-secret-name: csi-s3-secret
csi.storage.k8s.io/node-stage-secret-namespace: kube-system
csi.storage.k8s.io/node-publish-secret-name: csi-s3-secret
csi.storage.k8s.io/node-publish-secret-namespace: kube-system

View File

@@ -1,44 +0,0 @@
module github.com/ctrox/csi-s3
go 1.14
require (
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/aws/aws-sdk-go v1.14.27 // indirect
github.com/container-storage-interface/spec v1.1.0
github.com/go-ini/ini v1.38.1 // indirect
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/golang/protobuf v1.1.0 // indirect
github.com/jacobsa/fuse v0.0.0-20180417054321-cd3959611bcb // indirect
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 // indirect
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 // indirect
github.com/kahing/go-xattr v1.1.1 // indirect
github.com/kahing/goofys v0.19.0
github.com/kubernetes-csi/csi-lib-utils v0.6.1 // indirect
github.com/kubernetes-csi/csi-test v2.0.0+incompatible
github.com/kubernetes-csi/drivers v1.0.2
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 // indirect
github.com/minio/minio-go v0.0.0-20190430232750-10b3660b8f09
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936
github.com/onsi/ginkgo v1.5.0
github.com/onsi/gomega v1.4.0
github.com/shirou/gopsutil v0.0.0-20180625081143-4a180b209f5f // indirect
github.com/sirupsen/logrus v1.0.5 // indirect
github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa // indirect
github.com/spf13/afero v1.2.1 // indirect
github.com/stretchr/testify v1.3.0 // indirect
github.com/urfave/cli v1.20.0 // indirect
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 // indirect
google.golang.org/genproto v0.0.0-20180716172848-2731d4fa720b // indirect
google.golang.org/grpc v1.13.0
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
gopkg.in/ini.v1 v1.41.0
gopkg.in/yaml.v2 v2.2.1 // indirect
k8s.io/apimachinery v0.0.0-20180714051327-705cfa51a97f // indirect
k8s.io/klog v0.2.0 // indirect
k8s.io/kubernetes v1.13.4
k8s.io/utils v0.0.0-20180703210027-ab9069044f32 // indirect
)

View File

@@ -1,131 +0,0 @@
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/aws/aws-sdk-go v1.14.27 h1:fRVME5X3sxZnctdCcabNTWZq7ZGrpVgUAYk4OA5EG0A=
github.com/aws/aws-sdk-go v1.14.27/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k=
github.com/container-storage-interface/spec v1.0.0 h1:3DyXuJgf9MU6kyULESegQUmozsSxhpyrrv9u5bfwA3E=
github.com/container-storage-interface/spec v1.0.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs=
github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/ctrox/csi-test v0.0.0-20190311173153-80a2484bf798 h1:nfii2XdBGLaje6HWjtMCKaUBRv86HLg9uiOtAW9NRJA=
github.com/ctrox/csi-test v0.0.0-20190311173153-80a2484bf798/go.mod h1:Sdb3sQ5DaEikqpKZNzj+abr8x/OCMXB0KTaxIAXP1RI=
github.com/ctrox/csi-test v1.1.0 h1:YwOvPrlZw6/qgG+G8EQMkMniPt2HJmTOYVBiawgfiQ8=
github.com/ctrox/csi-test v1.1.0/go.mod h1:Sdb3sQ5DaEikqpKZNzj+abr8x/OCMXB0KTaxIAXP1RI=
github.com/ctrox/csi-test v1.1.1-0.20190310103436-e50382dcb47f h1:FLD1xv7Vwv7+JZizABfim+tR8Ctj68B2mnS529kHBPg=
github.com/ctrox/csi-test v1.1.1-0.20190310103436-e50382dcb47f/go.mod h1:Sdb3sQ5DaEikqpKZNzj+abr8x/OCMXB0KTaxIAXP1RI=
github.com/ctrox/csi-test v1.1.1-0.20190311173153-80a2484bf798/go.mod h1:Sdb3sQ5DaEikqpKZNzj+abr8x/OCMXB0KTaxIAXP1RI=
github.com/ctrox/csi-test v1.1.2-0.20190310094942-e965dacfef26 h1:KbZ3qIvoWP0CD7ZnUULipd5QGg0gmNLCfxikgAYnKwQ=
github.com/ctrox/csi-test v1.1.2-0.20190310094942-e965dacfef26/go.mod h1:Sdb3sQ5DaEikqpKZNzj+abr8x/OCMXB0KTaxIAXP1RI=
github.com/ctrox/csi-test v1.1.2-0.20190310103005-3f3cc7817699 h1:bQ82DNERrJuin7/+sRCoeBz7FV8/HNS6LpIe48XUWCo=
github.com/ctrox/csi-test v1.1.2-0.20190310103005-3f3cc7817699/go.mod h1:Sdb3sQ5DaEikqpKZNzj+abr8x/OCMXB0KTaxIAXP1RI=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-ini/ini v1.38.1 h1:hbtfM8emWUVo9GnXSloXYyFbXxZ+tG6sbepSStoe1FY=
github.com/go-ini/ini v1.38.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/jacobsa/fuse v0.0.0-20180417054321-cd3959611bcb h1:TRAjtOoio6kvnrIMLeXesGT9IydfO+zQoioKWrv40nI=
github.com/jacobsa/fuse v0.0.0-20180417054321-cd3959611bcb/go.mod h1:9Aml1MG17JVeXrN4D2mtJvYHtHklJH5bESjCKNzVjFU=
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 h1:sHsPfNMAG70QAvKbddQ0uScZCHQoZsT5NykGRCeeeIs=
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kahing/go-xattr v1.1.1 h1:7Ft/P9Gc6iqRVzBRLVw/yLL/dbtzL6FsZzGQj3T9ZY8=
github.com/kahing/go-xattr v1.1.1/go.mod h1:DXZs3JwPmH2DnyFxWjLZWb65lq8pOPtsf9LD+2Gbbpw=
github.com/kahing/goofys v0.19.0 h1:jcuffrnpvZq+LjXtRODo0pvNOglw32ClzBZ1XLShFnk=
github.com/kahing/goofys v0.19.0/go.mod h1:erC9E45nY5m8v6FE+tYIGRVjIC2N8viMlJrgrsXB2Q4=
github.com/kubernetes-csi/csi-lib-utils v0.6.1 h1:+AZ58SRSRWh2vmMoWAAGcv7x6fIyBMpyCXAgIc9kT28=
github.com/kubernetes-csi/csi-lib-utils v0.6.1/go.mod h1:GVmlUmxZ+SUjVLXicRFjqWUUvWez0g0Y78zNV9t7KfQ=
github.com/kubernetes-csi/csi-test v1.1.0 h1:a7CfGqhGDs0h7AZt1f6LTIUzBazcRf6eBdTUBXB4xE4=
github.com/kubernetes-csi/csi-test v1.1.0/go.mod h1:YxJ4UiuPWIhMBkxUKY5c267DyA0uDZ/MtAimhx/2TA0=
github.com/kubernetes-csi/csi-test v1.1.1 h1:L4RPre34ICeoQW7ez4X5t0PnFKaKs8K5q0c1XOrvXEM=
github.com/kubernetes-csi/csi-test v1.1.1/go.mod h1:YxJ4UiuPWIhMBkxUKY5c267DyA0uDZ/MtAimhx/2TA0=
github.com/kubernetes-csi/csi-test v2.0.0+incompatible h1:ia04uVFUM/J9n/v3LEMn3rEG6FmKV5BH9QLw7H68h44=
github.com/kubernetes-csi/csi-test v2.0.0+incompatible/go.mod h1:YxJ4UiuPWIhMBkxUKY5c267DyA0uDZ/MtAimhx/2TA0=
github.com/kubernetes-csi/drivers v0.0.0-20181207022357-c1e71bdcce6e h1:BkkRJIF329ps8digiMWthYzDPl9KB8PwkDwvVWDwM4A=
github.com/kubernetes-csi/drivers v0.0.0-20181207022357-c1e71bdcce6e/go.mod h1:V6rHbbSLCZGaQoIZ8MkyDtoXtcKXZM0F7N3bkloDCOY=
github.com/kubernetes-csi/drivers v1.0.2 h1:kaEAMfo+W5YFr23yedBIY+NGnNjr6/PbPzx7N4GYgiQ=
github.com/kubernetes-csi/drivers v1.0.2/go.mod h1:V6rHbbSLCZGaQoIZ8MkyDtoXtcKXZM0F7N3bkloDCOY=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/minio/minio-go v0.0.0-20190430232750-10b3660b8f09 h1:c64QOQYYVNo2a9kaHCgwyUyllGDYZVMcRGwzBUQMUao=
github.com/minio/minio-go v0.0.0-20190430232750-10b3660b8f09/go.mod h1:/haSOWG8hQNx2+JOfLJ9GKp61EAmgPwRVw/Sac0NzaM=
github.com/minio/minio-go v6.0.5+incompatible h1:qxQQW40lV2vuE9i6yYmt90GSJlT1YrMenWrjM6nZh0Q=
github.com/minio/minio-go v6.0.5+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8=
github.com/mitchellh/go-homedir v0.0.0-20180523094522-3864e76763d9 h1:Y94YB7jrsihrbGSqRNMwRWJ2/dCxr0hdC2oPRohkx0A=
github.com/mitchellh/go-homedir v0.0.0-20180523094522-3864e76763d9/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936 h1:kw1v0NlnN+GZcU8Ma8CLF2Zzgjfx95gs3/GN3vYAPpo=
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
github.com/onsi/ginkgo v1.5.0 h1:uZr+v/TFDdYkdA+j02sPO1kA5owrfjBGCJAogfIyThE=
github.com/onsi/ginkgo v1.5.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.0 h1:p/ZBjQI9G/VwoPrslo/sqS6R5vHU9Od60+axIiP6WuQ=
github.com/onsi/gomega v1.4.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/shirou/gopsutil v0.0.0-20180625081143-4a180b209f5f h1:lv02BiKkf3A85oirJHx0feXbKV4xrq5Nf7QbrNyILoo=
github.com/shirou/gopsutil v0.0.0-20180625081143-4a180b209f5f/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/sirupsen/logrus v1.0.5 h1:8c8b5uO0zS4X6RPl/sd1ENwSkIc0/H2PaHxE3udaE8I=
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA=
github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU=
github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M=
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8 h1:h7zdf0RiEvWbYBKIx4b+q41xoUVnMmvsGZnIVE5syG8=
golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b h1:Ib/yptP38nXZFMwqWSip+OKuMP9OkyDe3p+DssP8n9w=
golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/net v0.0.0-20180712202826-d0887baf81f4 h1:KDF3PK6A+dkI7c4O8QbMtJqcXE3LdNJFGZECIlifQOg=
golang.org/x/net v0.0.0-20180712202826-d0887baf81f4/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180715085529-ac767d655b30 h1:4bYUqrXBoiI7UFQeibUwFhvcHfaEeL75O3lOcZa964o=
golang.org/x/sys v0.0.0-20180715085529-ac767d655b30/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190124100055-b90733256f2e h1:3GIlrlVLfkoipSReOMNAgApI0ajnalyLa/EZHHca/XI=
golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
google.golang.org/genproto v0.0.0-20180716172848-2731d4fa720b h1:mXqBiicV0B+k8wzFNkKeNBRL7LyRV5xG0s+S6ffLb/E=
google.golang.org/genproto v0.0.0-20180716172848-2731d4fa720b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc=
google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/ini.v1 v1.38.1 h1:8E3nEICVJ6kxl6aTXYp77xYyObhw7YG9/avdj0r3vME=
gopkg.in/ini.v1 v1.38.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.41.0 h1:Ka3ViY6gNYSKiVy71zXBEqKplnV35ImDLVG+8uoIklE=
gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
k8s.io/apimachinery v0.0.0-20180714051327-705cfa51a97f h1:mjXiDUfs+4mhzRTLNTkAfQS9lqJCXQN/fIcMysNGW/Y=
k8s.io/apimachinery v0.0.0-20180714051327-705cfa51a97f/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
k8s.io/klog v0.2.0 h1:0ElL0OHzF3N+OhoJTL0uca20SxtYt4X4+bzHeqrB83c=
k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/kubernetes v1.13.4 h1:gQqFv/pH8hlbznLXQUsi8s5zqYnv0slmUDl/yVA0EWc=
k8s.io/kubernetes v1.13.4/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20180703210027-ab9069044f32 h1:Bhn4kFG8fxBouj05v9Y7bOYXKGYfnpmqiTXGgPCmPr8=
k8s.io/utils v0.0.0-20180703210027-ab9069044f32/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=

View File

@@ -1,10 +0,0 @@
package s3
// Config holds values to configure the driver
type Config struct {
AccessKeyID string
SecretAccessKey string
Region string
Endpoint string
Mounter string
}

View File

@@ -1,201 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package s3
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"io"
"strings"
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/container-storage-interface/spec/lib/go/csi"
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type controllerServer struct {
*csicommon.DefaultControllerServer
}
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
volumeID := sanitizeVolumeID(req.GetName())
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid create volume req: %v", req)
return nil, err
}
// Check arguments
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Name missing in request")
}
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Capabilities missing in request")
}
capacityBytes := int64(req.GetCapacityRange().GetRequiredBytes())
params := req.GetParameters()
mounter := params[mounterTypeKey]
glog.V(4).Infof("Got a request to create volume %s", volumeID)
s3, err := newS3ClientFromSecrets(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
exists, err := s3.bucketExists(volumeID)
if err != nil {
return nil, fmt.Errorf("failed to check if bucket %s exists: %v", volumeID, err)
}
if exists {
var b *bucket
b, err = s3.getBucket(volumeID)
if err != nil {
return nil, fmt.Errorf("failed to get bucket metadata of bucket %s: %v", volumeID, err)
}
// Check if volume capacity requested is bigger than the already existing capacity
if capacityBytes > b.CapacityBytes {
return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Volume with the same name: %s but with smaller size already exist", volumeID))
}
} else {
if err = s3.createBucket(volumeID); err != nil {
return nil, fmt.Errorf("failed to create volume %s: %v", volumeID, err)
}
if err = s3.createPrefix(volumeID, fsPrefix); err != nil {
return nil, fmt.Errorf("failed to create prefix %s: %v", fsPrefix, err)
}
}
b := &bucket{
Name: volumeID,
Mounter: mounter,
CapacityBytes: capacityBytes,
FSPath: fsPrefix,
}
if err := s3.setBucket(b); err != nil {
return nil, fmt.Errorf("Error setting bucket metadata: %v", err)
}
glog.V(4).Infof("create volume %s", volumeID)
s3Vol := s3Volume{}
s3Vol.VolName = volumeID
s3Vol.VolID = volumeID
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: volumeID,
CapacityBytes: capacityBytes,
VolumeContext: req.GetParameters(),
},
}, nil
}
func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
volumeID := req.GetVolumeId()
// Check arguments
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("Invalid delete volume req: %v", req)
return nil, err
}
glog.V(4).Infof("Deleting volume %s", volumeID)
s3, err := newS3ClientFromSecrets(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
exists, err := s3.bucketExists(volumeID)
if err != nil {
return nil, err
}
if exists {
if err := s3.removeBucket(volumeID); err != nil {
glog.V(3).Infof("Failed to remove volume %s: %v", volumeID, err)
return nil, err
}
} else {
glog.V(5).Infof("Bucket %s does not exist, ignoring request", volumeID)
}
return &csi.DeleteVolumeResponse{}, nil
}
func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capabilities missing in request")
}
s3, err := newS3ClientFromSecrets(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
exists, err := s3.bucketExists(req.GetVolumeId())
if err != nil {
return nil, err
}
if !exists {
// return an error if the volume requested does not exist
return nil, status.Error(codes.NotFound, fmt.Sprintf("Volume with id %s does not exist", req.GetVolumeId()))
}
// We currently only support RWO
supportedAccessMode := &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
}
for _, cap := range req.VolumeCapabilities {
if cap.GetAccessMode().GetMode() != supportedAccessMode.GetMode() {
return &csi.ValidateVolumeCapabilitiesResponse{Message: "Only single node writer is supported"}, nil
}
}
return &csi.ValidateVolumeCapabilitiesResponse{
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessMode: supportedAccessMode,
},
},
},
}, nil
}
func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
return &csi.ControllerExpandVolumeResponse{}, status.Error(codes.Unimplemented, "ControllerExpandVolume is not implemented")
}
func sanitizeVolumeID(volumeID string) string {
volumeID = strings.ToLower(volumeID)
if len(volumeID) > 63 {
h := sha1.New()
io.WriteString(h, volumeID)
volumeID = hex.EncodeToString(h.Sum(nil))
}
return volumeID
}

View File

@@ -1,25 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package s3
import (
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type identityServer struct {
*csicommon.DefaultIdentityServer
}

View File

@@ -1,82 +0,0 @@
package s3
import (
"fmt"
"os/exec"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/mount"
)
// Mounter interface which can be implemented
// by the different mounter types
type Mounter interface {
Stage(stagePath string) error
Unstage(stagePath string) error
Mount(source string, target string) error
}
const (
s3fsMounterType = "s3fs"
goofysMounterType = "goofys"
s3backerMounterType = "s3backer"
rcloneMounterType = "rclone"
mounterTypeKey = "mounter"
)
// newMounter returns a new mounter depending on the mounterType parameter
func newMounter(bucket *bucket, cfg *Config) (Mounter, error) {
mounter := bucket.Mounter
// Fall back to mounterType in cfg
if len(bucket.Mounter) == 0 {
mounter = cfg.Mounter
}
switch mounter {
case s3fsMounterType:
return newS3fsMounter(bucket, cfg)
case goofysMounterType:
return newGoofysMounter(bucket, cfg)
case s3backerMounterType:
return newS3backerMounter(bucket, cfg)
case rcloneMounterType:
return newRcloneMounter(bucket, cfg)
default:
// default to s3backer
return newS3backerMounter(bucket, cfg)
}
}
func fuseMount(path string, command string, args []string) error {
cmd := exec.Command(command, args...)
glog.V(3).Infof("Mounting fuse with command: %s and args: %s", command, args)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Error fuseMount command: %s\nargs: %s\noutput: %s", command, args, out)
}
return waitForMount(path, 10*time.Second)
}
func fuseUnmount(path string) error {
if err := mount.New("").Unmount(path); err != nil {
return err
}
// as fuse quits immediately, we will try to wait until the process is done
process, err := findFuseMountProcess(path)
if err != nil {
glog.Errorf("Error getting PID of fuse mount: %s", err)
return nil
}
if process == nil {
glog.Warningf("Unable to find PID of fuse mount %s, it must have finished already", path)
return nil
}
glog.Infof("Found fuse pid %v of mount %s, checking if it still runs", process.Pid, path)
return waitForProcess(process, 1)
}

View File

@@ -1,71 +0,0 @@
package s3
import (
"fmt"
"os"
"context"
goofysApi "github.com/kahing/goofys/api"
)
const (
goofysCmd = "goofys"
defaultRegion = "us-east-1"
)
// Implements Mounter
type goofysMounter struct {
bucket *bucket
endpoint string
region string
accessKeyID string
secretAccessKey string
}
func newGoofysMounter(b *bucket, cfg *Config) (Mounter, error) {
region := cfg.Region
// if endpoint is set we need a default region
if region == "" && cfg.Endpoint != "" {
region = defaultRegion
}
return &goofysMounter{
bucket: b,
endpoint: cfg.Endpoint,
region: region,
accessKeyID: cfg.AccessKeyID,
secretAccessKey: cfg.SecretAccessKey,
}, nil
}
func (goofys *goofysMounter) Stage(stageTarget string) error {
return nil
}
func (goofys *goofysMounter) Unstage(stageTarget string) error {
return nil
}
func (goofys *goofysMounter) Mount(source string, target string) error {
goofysCfg := &goofysApi.Config{
MountPoint: target,
Endpoint: goofys.endpoint,
Region: goofys.region,
DirMode: 0755,
FileMode: 0644,
MountOptions: map[string]string{
"allow_other": "",
},
}
os.Setenv("AWS_ACCESS_KEY_ID", goofys.accessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", goofys.secretAccessKey)
fullPath := fmt.Sprintf("%s:%s", goofys.bucket.Name, goofys.bucket.FSPath)
_, _, err := goofysApi.Mount(context.Background(), fullPath, goofysCfg)
if err != nil {
return fmt.Errorf("Error mounting via goofys: %s", err)
}
return nil
}

View File

@@ -1,56 +0,0 @@
package s3
import (
"fmt"
"os"
)
// Implements Mounter
type rcloneMounter struct {
bucket *bucket
url string
region string
accessKeyID string
secretAccessKey string
}
const (
rcloneCmd = "rclone"
)
func newRcloneMounter(b *bucket, cfg *Config) (Mounter, error) {
return &rcloneMounter{
bucket: b,
url: cfg.Endpoint,
region: cfg.Region,
accessKeyID: cfg.AccessKeyID,
secretAccessKey: cfg.SecretAccessKey,
}, nil
}
func (rclone *rcloneMounter) Stage(stageTarget string) error {
return nil
}
func (rclone *rcloneMounter) Unstage(stageTarget string) error {
return nil
}
func (rclone *rcloneMounter) Mount(source string, target string) error {
args := []string{
"mount",
fmt.Sprintf(":s3:%s/%s", rclone.bucket.Name, rclone.bucket.FSPath),
fmt.Sprintf("%s", target),
"--daemon",
"--s3-provider=AWS",
"--s3-env-auth=true",
fmt.Sprintf("--s3-region=%s", rclone.region),
fmt.Sprintf("--s3-endpoint=%s", rclone.url),
"--allow-other",
// TODO: make this configurable
"--vfs-cache-mode=writes",
}
os.Setenv("AWS_ACCESS_KEY_ID", rclone.accessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", rclone.secretAccessKey)
return fuseMount(target, rcloneCmd, args)
}

View File

@@ -1,154 +0,0 @@
package s3
import (
"fmt"
"net/url"
"os"
"os/exec"
"path"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/mount"
)
// Implements Mounter
type s3backerMounter struct {
bucket *bucket
url string
region string
accessKeyID string
secretAccessKey string
ssl bool
}
const (
s3backerCmd = "s3backer"
s3backerFsType = "xfs"
s3backerDevice = "file"
// blockSize to use in k
s3backerBlockSize = "128k"
s3backerDefaultSize = 1024 * 1024 * 1024 // 1GiB
// S3backerLoopDevice the loop device required by s3backer
S3backerLoopDevice = "/dev/loop0"
)
func newS3backerMounter(bucket *bucket, cfg *Config) (Mounter, error) {
url, err := url.Parse(cfg.Endpoint)
if err != nil {
return nil, err
}
url.Path = path.Join(url.Path, bucket.Name, bucket.FSPath)
// s3backer cannot work with 0 size volumes
if bucket.CapacityBytes == 0 {
bucket.CapacityBytes = s3backerDefaultSize
}
s3backer := &s3backerMounter{
bucket: bucket,
url: cfg.Endpoint,
region: cfg.Region,
accessKeyID: cfg.AccessKeyID,
secretAccessKey: cfg.SecretAccessKey,
ssl: url.Scheme == "https",
}
return s3backer, s3backer.writePasswd()
}
func (s3backer *s3backerMounter) String() string {
return s3backer.bucket.Name
}
func (s3backer *s3backerMounter) Stage(stageTarget string) error {
// s3backer uses the loop device
if err := createLoopDevice(S3backerLoopDevice); err != nil {
return err
}
// s3backer requires two mounts
// first mount will fuse mount the bucket to a single 'file'
if err := s3backer.mountInit(stageTarget); err != nil {
return err
}
// ensure 'file' device is formatted
err := formatFs(s3backerFsType, path.Join(stageTarget, s3backerDevice))
if err != nil {
fuseUnmount(stageTarget)
}
return err
}
func (s3backer *s3backerMounter) Unstage(stageTarget string) error {
// Unmount the s3backer fuse mount
return fuseUnmount(stageTarget)
}
func (s3backer *s3backerMounter) Mount(source string, target string) error {
device := path.Join(source, s3backerDevice)
// second mount will mount the 'file' as a filesystem
err := mount.New("").Mount(device, target, s3backerFsType, []string{})
if err != nil {
// cleanup fuse mount
fuseUnmount(target)
return err
}
return nil
}
func (s3backer *s3backerMounter) mountInit(path string) error {
args := []string{
fmt.Sprintf("--blockSize=%s", s3backerBlockSize),
fmt.Sprintf("--size=%v", s3backer.bucket.CapacityBytes),
fmt.Sprintf("--prefix=%s/", s3backer.bucket.FSPath),
"--listBlocks",
s3backer.bucket.Name,
path,
}
if s3backer.region != "" {
args = append(args, fmt.Sprintf("--region=%s", s3backer.region))
} else {
// only set baseURL if not on AWS (region is not set)
// baseURL must end with /
args = append(args, fmt.Sprintf("--baseURL=%s/", s3backer.url))
}
if s3backer.ssl {
args = append(args, "--ssl")
}
return fuseMount(path, s3backerCmd, args)
}
func (s3backer *s3backerMounter) writePasswd() error {
pwFileName := fmt.Sprintf("%s/.s3backer_passwd", os.Getenv("HOME"))
pwFile, err := os.OpenFile(pwFileName, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
_, err = pwFile.WriteString(s3backer.accessKeyID + ":" + s3backer.secretAccessKey)
if err != nil {
return err
}
pwFile.Close()
return nil
}
func formatFs(fsType string, device string) error {
diskMounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: mount.NewOsExec()}
format, err := diskMounter.GetDiskFormat(device)
if err != nil {
return err
}
if format != "" {
glog.Infof("Disk %s is already formatted with format %s", device, format)
return nil
}
args := []string{
device,
}
cmd := exec.Command("mkfs."+fsType, args...)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Error formatting disk: %s", out)
}
glog.Infof("Formatting fs with type %s", fsType)
return nil
}

View File

@@ -1,65 +0,0 @@
package s3
import (
"fmt"
"os"
)
// Implements Mounter
type s3fsMounter struct {
bucket *bucket
url string
region string
pwFileContent string
}
const (
s3fsCmd = "s3fs"
)
func newS3fsMounter(b *bucket, cfg *Config) (Mounter, error) {
return &s3fsMounter{
bucket: b,
url: cfg.Endpoint,
region: cfg.Region,
pwFileContent: cfg.AccessKeyID + ":" + cfg.SecretAccessKey,
}, nil
}
func (s3fs *s3fsMounter) Stage(stageTarget string) error {
return nil
}
func (s3fs *s3fsMounter) Unstage(stageTarget string) error {
return nil
}
func (s3fs *s3fsMounter) Mount(source string, target string) error {
if err := writes3fsPass(s3fs.pwFileContent); err != nil {
return err
}
args := []string{
fmt.Sprintf("%s:/%s", s3fs.bucket.Name, s3fs.bucket.FSPath),
fmt.Sprintf("%s", target),
"-o", "use_path_request_style",
"-o", fmt.Sprintf("url=%s", s3fs.url),
"-o", fmt.Sprintf("endpoint=%s", s3fs.region),
"-o", "allow_other",
"-o", "mp_umask=000",
}
return fuseMount(target, s3fsCmd, args)
}
func writes3fsPass(pwFileContent string) error {
pwFileName := fmt.Sprintf("%s/.passwd-s3fs", os.Getenv("HOME"))
pwFile, err := os.OpenFile(pwFileName, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
_, err = pwFile.WriteString(pwFileContent)
if err != nil {
return err
}
pwFile.Close()
return nil
}

View File

@@ -1,214 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package s3
import (
"fmt"
"os"
"github.com/golang/glog"
"golang.org/x/net/context"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/kubernetes/pkg/util/mount"
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type nodeServer struct {
*csicommon.DefaultNodeServer
}
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
volumeID := req.GetVolumeId()
targetPath := req.GetTargetPath()
stagingTargetPath := req.GetStagingTargetPath()
// Check arguments
if req.GetVolumeCapability() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
}
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(stagingTargetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Staging Target path missing in request")
}
if len(targetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
notMnt, err := checkMount(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !notMnt {
return &csi.NodePublishVolumeResponse{}, nil
}
deviceID := ""
if req.GetPublishContext() != nil {
deviceID = req.GetPublishContext()[deviceID]
}
// TODO: Implement readOnly & mountFlags
readOnly := req.GetReadonly()
// TODO: check if attrib is correct with context.
attrib := req.GetVolumeContext()
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
glog.V(4).Infof("target %v\ndevice %v\nreadonly %v\nvolumeId %v\nattributes %v\nmountflags %v\n",
targetPath, deviceID, readOnly, volumeID, attrib, mountFlags)
s3, err := newS3ClientFromSecrets(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
b, err := s3.getBucket(volumeID)
if err != nil {
return nil, err
}
mounter, err := newMounter(b, s3.cfg)
if err != nil {
return nil, err
}
if err := mounter.Mount(stagingTargetPath, targetPath); err != nil {
return nil, err
}
glog.V(4).Infof("s3: bucket %s successfuly mounted to %s", b.Name, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
volumeID := req.GetVolumeId()
targetPath := req.GetTargetPath()
// Check arguments
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(targetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
if err := fuseUnmount(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("s3: bucket %s has been unmounted.", volumeID)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
volumeID := req.GetVolumeId()
stagingTargetPath := req.GetStagingTargetPath()
// Check arguments
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(stagingTargetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
if req.VolumeCapability == nil {
return nil, status.Error(codes.InvalidArgument, "NodeStageVolume Volume Capability must be provided")
}
notMnt, err := checkMount(stagingTargetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !notMnt {
return &csi.NodeStageVolumeResponse{}, nil
}
s3, err := newS3ClientFromSecrets(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
b, err := s3.getBucket(volumeID)
if err != nil {
return nil, err
}
mounter, err := newMounter(b, s3.cfg)
if err != nil {
return nil, err
}
if err := mounter.Stage(stagingTargetPath); err != nil {
return nil, err
}
return &csi.NodeStageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
volumeID := req.GetVolumeId()
stagingTargetPath := req.GetStagingTargetPath()
// Check arguments
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(stagingTargetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
return &csi.NodeUnstageVolumeResponse{}, nil
}
// NodeGetCapabilities returns the supported capabilities of the node server
func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
// currently there is a single NodeServer capability according to the spec
nscap := &csi.NodeServiceCapability{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
},
},
}
return &csi.NodeGetCapabilitiesResponse{
Capabilities: []*csi.NodeServiceCapability{
nscap,
},
}, nil
}
func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
return &csi.NodeExpandVolumeResponse{}, status.Error(codes.Unimplemented, "NodeExpandVolume is not implemented")
}
func checkMount(targetPath string) (bool, error) {
notMnt, err := mount.New("").IsLikelyNotMountPoint(targetPath)
if err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(targetPath, 0750); err != nil {
return false, err
}
notMnt = true
} else {
return false, err
}
}
return notMnt, nil
}

View File

@@ -1,153 +0,0 @@
package s3
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/url"
"github.com/golang/glog"
"github.com/minio/minio-go"
)
const (
metadataName = ".metadata.json"
fsPrefix = "csi-fs"
)
type s3Client struct {
cfg *Config
minio *minio.Client
}
type bucket struct {
Name string
Mounter string
FSPath string
CapacityBytes int64
}
func newS3Client(cfg *Config) (*s3Client, error) {
var client = &s3Client{}
client.cfg = cfg
u, err := url.Parse(client.cfg.Endpoint)
if err != nil {
return nil, err
}
ssl := u.Scheme == "https"
endpoint := u.Hostname()
if u.Port() != "" {
endpoint = u.Hostname() + ":" + u.Port()
}
minioClient, err := minio.NewWithRegion(endpoint, client.cfg.AccessKeyID, client.cfg.SecretAccessKey, ssl, client.cfg.Region)
if err != nil {
return nil, err
}
client.minio = minioClient
return client, nil
}
func newS3ClientFromSecrets(secrets map[string]string) (*s3Client, error) {
return newS3Client(&Config{
AccessKeyID: secrets["accessKeyID"],
SecretAccessKey: secrets["secretAccessKey"],
Region: secrets["region"],
Endpoint: secrets["endpoint"],
// Mounter is set in the volume preferences, not secrets
Mounter: "",
})
}
func (client *s3Client) bucketExists(bucketName string) (bool, error) {
return client.minio.BucketExists(bucketName)
}
func (client *s3Client) createBucket(bucketName string) error {
return client.minio.MakeBucket(bucketName, client.cfg.Region)
}
func (client *s3Client) createPrefix(bucketName string, prefix string) error {
_, err := client.minio.PutObject(bucketName, prefix+"/", bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{})
if err != nil {
return err
}
return nil
}
func (client *s3Client) removeBucket(bucketName string) error {
if err := client.emptyBucket(bucketName); err != nil {
return err
}
return client.minio.RemoveBucket(bucketName)
}
func (client *s3Client) emptyBucket(bucketName string) error {
objectsCh := make(chan string)
var listErr error
go func() {
defer close(objectsCh)
doneCh := make(chan struct{})
defer close(doneCh)
for object := range client.minio.ListObjects(bucketName, "", true, doneCh) {
if object.Err != nil {
listErr = object.Err
return
}
objectsCh <- object.Key
}
}()
if listErr != nil {
glog.Error("Error listing objects", listErr)
return listErr
}
select {
default:
errorCh := client.minio.RemoveObjects(bucketName, objectsCh)
for e := range errorCh {
glog.Errorf("Failed to remove object %s, error: %s", e.ObjectName, e.Err)
}
if len(errorCh) != 0 {
return fmt.Errorf("Failed to remove all objects of bucket %s", bucketName)
}
}
// ensure our prefix is also removed
return client.minio.RemoveObject(bucketName, fsPrefix)
}
func (client *s3Client) setBucket(bucket *bucket) error {
b := new(bytes.Buffer)
json.NewEncoder(b).Encode(bucket)
opts := minio.PutObjectOptions{ContentType: "application/json"}
_, err := client.minio.PutObject(bucket.Name, metadataName, b, int64(b.Len()), opts)
return err
}
func (client *s3Client) getBucket(bucketName string) (*bucket, error) {
opts := minio.GetObjectOptions{}
obj, err := client.minio.GetObject(bucketName, metadataName, opts)
if err != nil {
return &bucket{}, err
}
objInfo, err := obj.Stat()
if err != nil {
return &bucket{}, err
}
b := make([]byte, objInfo.Size)
_, err = obj.Read(b)
if err != nil && err != io.EOF {
return &bucket{}, err
}
var meta bucket
err = json.Unmarshal(b, &meta)
return &meta, err
}

View File

@@ -1,95 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package s3
import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type s3 struct {
driver *csicommon.CSIDriver
endpoint string
ids *identityServer
ns *nodeServer
cs *controllerServer
}
type s3Volume struct {
VolName string `json:"volName"`
VolID string `json:"volID"`
VolSize int64 `json:"volSize"`
VolPath string `json:"volPath"`
}
var (
vendorVersion = "v1.1.1"
driverName = "ch.ctrox.csi.s3-driver"
)
// NewS3 initializes the driver
func NewS3(nodeID string, endpoint string) (*s3, error) {
driver := csicommon.NewCSIDriver(driverName, vendorVersion, nodeID)
if driver == nil {
glog.Fatalln("Failed to initialize CSI Driver.")
}
s3Driver := &s3{
endpoint: endpoint,
driver: driver,
}
return s3Driver, nil
}
func (s3 *s3) newIdentityServer(d *csicommon.CSIDriver) *identityServer {
return &identityServer{
DefaultIdentityServer: csicommon.NewDefaultIdentityServer(d),
}
}
func (s3 *s3) newControllerServer(d *csicommon.CSIDriver) *controllerServer {
return &controllerServer{
DefaultControllerServer: csicommon.NewDefaultControllerServer(d),
}
}
func (s3 *s3) newNodeServer(d *csicommon.CSIDriver) *nodeServer {
return &nodeServer{
DefaultNodeServer: csicommon.NewDefaultNodeServer(d),
}
}
func (s3 *s3) Run() {
glog.Infof("Driver: %v ", driverName)
glog.Infof("Version: %v ", vendorVersion)
// Initialize default library driver
s3.driver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME})
s3.driver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER})
// Create GRPC servers
s3.ids = s3.newIdentityServer(s3.driver)
s3.ns = s3.newNodeServer(s3.driver)
s3.cs = s3.newControllerServer(s3.driver)
s := csicommon.NewNonBlockingGRPCServer()
s.Start(s3.endpoint, s3.ids, s3.cs, s3.ns)
s.Wait()
}

View File

@@ -1,123 +0,0 @@
package s3_test
import (
"log"
"os"
"github.com/ctrox/csi-s3/pkg/s3"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/kubernetes-csi/csi-test/pkg/sanity"
)
var _ = Describe("S3Driver", func() {
Context("goofys", func() {
socket := "/tmp/csi-goofys.sock"
csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
driver, err := s3.NewS3("test-node", csiEndpoint)
if err != nil {
log.Fatal(err)
}
go driver.Run()
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/goofys-target",
StagingPath: os.TempDir() + "/goofys-staging",
Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{
"mounter": "goofys",
},
}
sanity.GinkgoTest(sanityCfg)
})
})
Context("s3fs", func() {
socket := "/tmp/csi-s3fs.sock"
csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
driver, err := s3.NewS3("test-node", csiEndpoint)
if err != nil {
log.Fatal(err)
}
go driver.Run()
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/s3fs-target",
StagingPath: os.TempDir() + "/s3fs-staging",
Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{
"mounter": "s3fs",
},
}
sanity.GinkgoTest(sanityCfg)
})
})
Context("s3backer", func() {
socket := "/tmp/csi-s3backer.sock"
csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
// Clear loop device so we cover the creation of it
os.Remove(s3.S3backerLoopDevice)
driver, err := s3.NewS3("test-node", csiEndpoint)
if err != nil {
log.Fatal(err)
}
go driver.Run()
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/s3backer-target",
StagingPath: os.TempDir() + "/s3backer-staging",
Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{
"mounter": "s3backer",
},
}
sanity.GinkgoTest(sanityCfg)
})
})
Context("rclone", func() {
socket := "/tmp/csi-rclone.sock"
csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
driver, err := s3.NewS3("test-node", csiEndpoint)
if err != nil {
log.Fatal(err)
}
go driver.Run()
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/rclone-target",
StagingPath: os.TempDir() + "/rclone-staging",
Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{
"mounter": "rclone",
},
}
sanity.GinkgoTest(sanityCfg)
})
})
})

View File

@@ -1,29 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package s3
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestS3Driver(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "S3Driver")
}

View File

@@ -1,106 +0,0 @@
package s3
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"strings"
"syscall"
"time"
"github.com/mitchellh/go-ps"
"k8s.io/kubernetes/pkg/util/mount"
"github.com/golang/glog"
)
func waitForProcess(p *os.Process, backoff int) error {
if backoff == 20 {
return fmt.Errorf("Timeout waiting for PID %v to end", p.Pid)
}
cmdLine, err := getCmdLine(p.Pid)
if err != nil {
glog.Warningf("Error checking cmdline of PID %v, assuming it is dead: %s", p.Pid, err)
return nil
}
if cmdLine == "" {
// ignore defunct processes
// TODO: debug why this happens in the first place
// seems to only happen on k8s, not on local docker
glog.Warning("Fuse process seems dead, returning")
return nil
}
if err := p.Signal(syscall.Signal(0)); err != nil {
glog.Warningf("Fuse process does not seem active or we are unprivileged: %s", err)
return nil
}
glog.Infof("Fuse process with PID %v still active, waiting...", p.Pid)
time.Sleep(time.Duration(backoff*100) * time.Millisecond)
return waitForProcess(p, backoff+1)
}
func waitForMount(path string, timeout time.Duration) error {
var elapsed time.Duration
var interval = 10 * time.Millisecond
for {
notMount, err := mount.New("").IsNotMountPoint(path)
if err != nil {
return err
}
if !notMount {
return nil
}
time.Sleep(interval)
elapsed = elapsed + interval
if elapsed >= timeout {
return errors.New("Timeout waiting for mount")
}
}
}
func findFuseMountProcess(path string) (*os.Process, error) {
processes, err := ps.Processes()
if err != nil {
return nil, err
}
for _, p := range processes {
cmdLine, err := getCmdLine(p.Pid())
if err != nil {
glog.Errorf("Unable to get cmdline of PID %v: %s", p.Pid(), err)
continue
}
if strings.Contains(cmdLine, path) {
glog.Infof("Found matching pid %v on path %s", p.Pid(), path)
return os.FindProcess(p.Pid())
}
}
return nil, nil
}
func getCmdLine(pid int) (string, error) {
cmdLineFile := fmt.Sprintf("/proc/%v/cmdline", pid)
cmdLine, err := ioutil.ReadFile(cmdLineFile)
if err != nil {
return "", err
}
return string(cmdLine), nil
}
func createLoopDevice(device string) error {
if _, err := os.Stat(device); !os.IsNotExist(err) {
return nil
}
args := []string{
device,
"b", "7", "0",
}
cmd := exec.Command("mknod", args...)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Error configuring loop device: %s", out)
}
return nil
}

View File

@@ -1,32 +0,0 @@
FROM ctrox/csi-s3:dev-full
LABEL maintainers="Cyrill Troxler <cyrilltroxler@gmail.com>"
LABEL description="csi-s3 testing image"
RUN apt-get update && \
apt-get install -y \
git wget make && \
rm -rf /var/lib/apt/lists/*
RUN wget -q https://dl.google.com/go/go1.12.5.linux-amd64.tar.gz && \
tar -xf go1.12.5.linux-amd64.tar.gz && \
rm go1.12.5.linux-amd64.tar.gz && \
mv go /usr/local
ENV GOROOT /usr/local/go
ENV GOPATH /go
ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH
RUN wget -q https://dl.min.io/server/minio/release/linux-amd64/minio && \
chmod +x minio &&\
mv minio /usr/local/bin
WORKDIR /app
# prewarm go mod cache
COPY go.mod .
COPY go.sum .
RUN go mod download
ADD test/test.sh /usr/local/bin
ENTRYPOINT ["/usr/local/bin/test.sh"]

View File

@@ -1,25 +0,0 @@
CreateVolumeSecret:
accessKeyID: FJDSJ
secretAccessKey: DSG643HGDS
endpoint: http://127.0.0.1:9000
region: ""
DeleteVolumeSecret:
accessKeyID: FJDSJ
secretAccessKey: DSG643HGDS
endpoint: http://127.0.0.1:9000
region: ""
NodeStageVolumeSecret:
accessKeyID: FJDSJ
secretAccessKey: DSG643HGDS
endpoint: http://127.0.0.1:9000
region: ""
NodePublishVolumeSecret:
accessKeyID: FJDSJ
secretAccessKey: DSG643HGDS
endpoint: http://127.0.0.1:9000
region: ""
ControllerValidateVolumeCapabilitiesSecret:
accessKeyID: FJDSJ
secretAccessKey: DSG643HGDS
endpoint: http://127.0.0.1:9000
region: ""

View File

@@ -1,8 +0,0 @@
#!/usr/bin/env bash
export MINIO_ACCESS_KEY=FJDSJ
export MINIO_SECRET_KEY=DSG643HGDS
mkdir -p /tmp/minio
minio server /tmp/minio &>/dev/null &
sleep 5
go test github.com/ctrox/csi-s3/pkg/s3 -cover