Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 23 additions & 28 deletions s3-backed-ftp/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
FROM factual/docker-base

# Install needed packages and cleanup after
RUN apt-get -y update && apt-get -y install --no-install-recommends \
automake \
autotools-dev \
g++ \
git \
g++ \
git \
libcurl4-gnutls-dev \
libfuse-dev \
libssl-dev \
Expand All @@ -16,34 +17,28 @@ RUN apt-get -y update && apt-get -y install --no-install-recommends \
openssh-server \
supervisor \
&& rm -rf /var/lib/apt/lists/*

RUN pip3 install awscli

RUN git clone https://github.com/s3fs-fuse/s3fs-fuse.git && \
cd s3fs-fuse && \
./autogen.sh && \
./configure && \
make && \
sudo make install
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The sudo it's not required for make install. What do you think?


RUN mkdir -p /home/aws/s3bucket/

ADD s3-fuse.sh /usr/local/

ADD vsftpd.conf /etc/vsftpd.conf

RUN chown root:root /etc/vsftpd.conf

ADD sshd_config /etc/ssh/sshd_config

ADD users.sh /usr/local/

ADD add_users_in_container.sh /usr/local/

RUN echo "/usr/sbin/nologin" >> /etc/shells

# Run commands to set-up everything
RUN pip3 install awscli && \
git clone https://github.com/s3fs-fuse/s3fs-fuse.git && \
cd s3fs-fuse && \
./autogen.sh && \
./configure && \
make && \
sudo make install && \
mkdir -p /home/aws/s3bucket/ && \
echo "/usr/sbin/nologin" >> /etc/shells

# Copy scripts to /usr/local
COPY ["s3-fuse.sh", "users.sh", "add_users_in_container.sh", "/usr/local/"]

# Copy needed config files to their destinations
COPY vsftpd.conf /etc/vsftpd.conf
COPY sshd_config /etc/ssh/sshd_config
COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf

EXPOSE 21 22
# Expose ftp and sftp ports
EXPOSE 21 22

# Run supervisord at container start
CMD ["/usr/bin/supervisord"]
8 changes: 4 additions & 4 deletions s3-backed-ftp/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ To run:

3. Then after building the container (if necessary), run using:

- `docker run --rm -p 21:21 -p 222:22 -p 1024-1048:1024-1048 --name <name> --cap-add SYS_ADMIN --device /dev/fuse --env-file env.list <docker/tag>`
- `docker run --rm -p 21-22:21-22 -p 30000-30100:30000-30100 --name <name> --cap-add SYS_ADMIN --device /dev/fuse --env-file env.list <docker/tag>`
- If you would like the docker to restart after reboot then use:
* `docker run --restart=always -p 21:21 -p 222:22 -p 1024-1048:1024-1048 --name <name> --cap-add SYS_ADMIN --device /dev/fuse --env-file env.list <docker/tag>`
* `docker run --restart=always -p 21-22:21-22 -p 30000-30100:30000-30100 --name <name> --cap-add SYS_ADMIN --device /dev/fuse --env-file env.list <docker/tag>`
- If `env.list` file is named differently change accordingly.
- If you don't want to use the cap-add and device options you could also just use the privileged option instead:
* `docker run --restart=always -p 21:21 -p 222:22 -p 1024-1024:1024-1048 --privileged --env-file env.list <docker/tag>`
* `docker run --restart=always -p 21-22:21-22 -p 30000-30100:30000-30100 --privileged --env-file env.list <docker/tag>`

## Environment Variables

1. ` USERS ` = List of users to add to the ftp/sftp server. Listed in the form username:hashedpassword, each separated by a space.
Expand Down
54 changes: 29 additions & 25 deletions s3-backed-ftp/add_users_in_container.sh
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
#!/bin/bash
# This script will update the env.list file (file containing USERS environrment variable) and add the new users if there are any.
# Will check for new users at a given time interval (change sleep duration on line 33)

FTP_DIRECTORY="/home/aws/s3bucket/ftp-users"
CONFIG_FILE="env.list" # May need to modify config file name to reflect future changes in env file location/name
SLEEP_DURATION=60
# Change theses next two variables to set different permissions for files/directories
# These were default from vsftpd so change accordingly if necessary
FILE_PERMISSIONS=644
DIRECTORY_PERMISSIONS=755
DIRECTORY_PERMISSIONS=750

add_users() {
aws s3 cp s3://$CONFIG_BUCKET/$CONFIG_FILE ~/$CONFIG_FILE
Expand All @@ -17,7 +16,7 @@ add_users() {
for u in $USERS; do
read username passwd <<< $(echo $u | sed 's/:/ /g')

# If account exists set password again
# If account exists set password again
# In cases where password changes in env file
if getent passwd "$username" >/dev/null 2>&1; then
echo $u | chpasswd -e
Expand All @@ -26,36 +25,41 @@ add_users() {
# Permissions when uploaded directly through S3 Web client were set as:
# 000 root:root
# This would not allow ftp users to read the files

# Search for files and directories not owned correctly
find "$FTP_DIRECTORY"/"$username"/files/* \( \! -user "$username" \! -group "$username" \) -print0 | xargs -0 chown "$username:$username"
find "$FTP_DIRECTORY/$username/files/" -mindepth 1 \( \! -user "$username" \! -group "$username" \) -print0 | xargs -0 -r chown "$username:$username"

# Search for files with incorrect permissions
find "$FTP_DIRECTORY"/"$username"/files/* -type f \! -perm "$FILE_PERMISSIONS" -print0 | xargs -0 chmod "$FILE_PERMISSIONS"
find "$FTP_DIRECTORY/$username/files/" -mindepth 1 -type f \! -perm "$FILE_PERMISSIONS" -print0 | xargs -0 -r chmod "$FILE_PERMISSIONS"

# Search for directories with incorrect permissions
find "$FTP_DIRECTORY"/"$username"/files/* -type d \! -perm "$DIRECTORY_PERMISSIONS" -print0 | xargs -0 chmod "$DIRECTORY_PERMISSIONS"
find "$FTP_DIRECTORY/$username/files/" -mindepth 1 -type d \! -perm "$DIRECTORY_PERMISSIONS" -print0 | xargs -0 -r chmod "$DIRECTORY_PERMISSIONS"

# Search for .ssh folders and authorized_keys files with incorrect permissions/ownership
find "$FTP_DIRECTORY/$username/.ssh" -mindepth 1 -type d \! -perm 700 -print0 | xargs -0 -r chmod 700
find "$FTP_DIRECTORY/$username/.ssh" -mindepth 1 -type d \! -user "$username" -print0 | xargs -0 -r chown "$username"

find "$FTP_DIRECTORY/$username/.ssh/authorized_keys" -mindepth 1 -type f \! -perm 600 -print0 | xargs -0 -r chmod 600
find "$FTP_DIRECTORY/$username/.ssh/authorized_keys" -mindepth 1 -type f \! -user "$username" -print0 | xargs -0 -r chown "$username"
fi

# If user account doesn't exist create it
# As well as their home directory
# If user account doesn't exist create it
if ! getent passwd "$username" >/dev/null 2>&1; then
useradd -d "$FTP_DIRECTORY/$username" -s /usr/sbin/nologin $username
usermod -G ftpaccess $username

mkdir -p "$FTP_DIRECTORY/$username"
chown root:ftpaccess "$FTP_DIRECTORY/$username"
chmod 750 "$FTP_DIRECTORY/$username"

mkdir -p "$FTP_DIRECTORY/$username/files"
chown $username:ftpaccess "$FTP_DIRECTORY/$username/files"
chmod 750 "$FTP_DIRECTORY/$username/files"
fi
done
useradd -d "$FTP_DIRECTORY/$username" -s /usr/sbin/nologin $username
usermod -G ftpaccess $username

mkdir -p "$FTP_DIRECTORY/$username"
chown root:ftpaccess "$FTP_DIRECTORY/$username"
chmod 750 "$FTP_DIRECTORY/$username"

mkdir -p "$FTP_DIRECTORY/$username/files"
chown $username:ftpaccess "$FTP_DIRECTORY/$username/files"
chmod 750 "$FTP_DIRECTORY/$username/files"
fi
done
}

while true; do
add_users
sleep $SLEEP_DURATION
done
while true; do
add_users
sleep $SLEEP_DURATION
done
22 changes: 16 additions & 6 deletions s3-backed-ftp/s3-fuse.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ if [ -z $IAM_ROLE ] && [ -z $AWS_ACCESS_KEY_ID ]; then
exit 1
fi

# Abort if the AWS_SECRET_ACCESS_KEY was not provided if an IAM_ROLE was not provided neither.
# Abort if the AWS_SECRET_ACCESS_KEY was not provided if an IAM_ROLE was not provided neither.
if [ -z $IAM_ROLE ] && [ -z $AWS_SECRET_ACCESS_KEY ]; then
echo "You need to set AWS_SECRET_ACCESS_KEY environment variable. Aborting!"
exit 1
Expand All @@ -30,16 +30,26 @@ if [ -z $IAM_ROLE ] && [ ! -z $AWS_ACCESS_KEY_ID ] && [ ! -z $AWS_SECRET_ACCESS_
chmod 600 ~/.passwd-s3fs
fi

# Update the vsftpd.conf file to include the IP address if running on an EC2 instance
if curl -s http://instance-data.ec2.internal > /dev/null ; then
IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
if [ ! -z $PASV_ADDRESS ]; then
sed -i "s/^pasv_address=/pasv_address=$PASV_ADDRESS/" /etc/vsftpd.conf
elif curl -s http://instance-data > /dev/null ; then
IP=$(curl -s http://instance-data/latest/meta-data/public-ipv4)
sed -i "s/^pasv_address=/pasv_address=$IP/" /etc/vsftpd.conf
else
echo "You need to set PASV_ADDRESS environment variable, or run in an EC2 instance. Aborting!"
exit 1
fi

# Update the vsftpd.conf file to include the IP address if running on an EC2 instance
# if curl -s http://instance-data.ec2.internal > /dev/null ; then
# IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
# sed -i "s/^pasv_address=/pasv_address=$IP/" /etc/vsftpd.conf
# else
# echo "Skipping"
# fi

# start s3 fuse
# Code above is not needed if the IAM role is attaced to EC2 instance
# Code above is not needed if the IAM role is attaced to EC2 instance
# s3fs provides the iam_role option to grab those credentials automatically
/usr/local/bin/s3fs $FTP_BUCKET /home/aws/s3bucket -o allow_other -o mp_umask="0022" -o iam_role="$IAM_ROLE" #-d -d -f -o f2 -o curldbg
/usr/local/bin/s3fs $FTP_BUCKET /home/aws/s3bucket -o allow_other -o mp_umask="0022" -o iam_role="$IAM_ROLE" -o stat_cache_expire=600 #-d -d -f -o f2 -o curldbg
/usr/local/users.sh
1 change: 1 addition & 0 deletions s3-backed-ftp/sshd_config
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Change to no to disable tunnelled clear text passwords
PasswordAuthentication no
PubkeyAuthentication yes

#Port 22

Expand Down
9 changes: 8 additions & 1 deletion s3-backed-ftp/supervisord.conf
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,14 @@ logfile=/var/log/supervisord.log
command=/usr/local/s3-fuse.sh
autorestart=false
priority=1
stdout_logfile=/var/log/s3-fuse-startup.log
stderr_logfile=/var/log/s3-fuse-startup.log

[program:sshd]
command=/usr/sbin/sshd -D
command=/usr/sbin/sshd -D -e
autorestart=true
stdout_logfile=/var/log/sshd.log
stderr_logfile=/var/log/sshd.log

[program:vsftpd]
command=/usr/sbin/vsftpd
Expand All @@ -18,3 +22,6 @@ autorestart=true
[program:add_users_in_container]
command=/usr/local/add_users_in_container.sh
autorestart=true
stdout_logfile=/var/log/add-users.log
stderr_logfile=/var/log/add-users.log

19 changes: 14 additions & 5 deletions s3-backed-ftp/users.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ chmod 755 $FTP_DIRECTORY

# Expecing an environment variable called USERS to look like "bob:hashedbobspassword steve:hashedstevespassword"
for u in $USERS; do

read username passwd <<< $(echo $u | sed 's/:/ /g')

# User needs to be created every time since stopping the docker container gets rid of users.
Expand All @@ -22,7 +22,7 @@ for u in $USERS; do

# set the users password
echo $u | chpasswd -e

if [ -z "$username" ] || [ -z "$passwd" ]; then
echo "Invalid username:password combination '$u': please fix to create '$username'"
continue
Expand All @@ -34,18 +34,27 @@ for u in $USERS; do
chmod 750 "$FTP_DIRECTORY/$username"
chown $username:ftpaccess "$FTP_DIRECTORY/$username/files"
chmod 750 "$FTP_DIRECTORY/$username/files"

# Create .ssh folder and authorized_keys file, for ssh-key sftp access
mkdir -p "$FTP_DIRECTORY/$username/.ssh"
chmod 700 "$FTP_DIRECTORY/$username/.ssh"
chown $username "$FTP_DIRECTORY/$username/.ssh"
touch "$FTP_DIRECTORY/$username/.ssh/authorized_keys"
chmod 600 "$FTP_DIRECTORY/$username/.ssh/authorized_keys"
chown $username "$FTP_DIRECTORY/$username/.ssh/authorized_keys"

else
echo "Creating '$username' directory..."

# Root must own all directories leading up to and including users home directory
mkdir -p "$FTP_DIRECTORY/$username"
chown root:ftpaccess "$FTP_DIRECTORY/$username"
chmod 750 "$FTP_DIRECTORY/$username"

# Need files sub-directory for SFTP chroot
mkdir -p "$FTP_DIRECTORY/$username/files"
chown $username:ftpaccess "$FTP_DIRECTORY/$username/files"
chmod 750 "$FTP_DIRECTORY/$username/files"
fi

done
2 changes: 1 addition & 1 deletion s3-backed-ftp/vsftpd.conf
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ connect_from_port_20=YES
#idle_session_timeout=600

# You may change the default value for timing out a data connection.
#data_connection_timeout=120
data_connection_timeout=900

# It is recommended that you define on your system a unique user which the
# ftp server can use as a totally isolated and unprivileged user.
Expand Down