18 .
/usr
/lib
/tool
/bash-utils ||
exit $EIO
20 export PYTHONIOENCODING
=utf8
25 cache_basedir
=~
/.cache
/execfuse-s3
31 local cachekey
=${x//%/%25}
32 cachekey
=${cachekey//./%2E}
33 cachekey
=${cachekey//\//%2F\/}
45 [ $?
= 0 ] && errno
=0 || errno
=$EIO
46 # aws cli's exit code is not distinctive enough.
56 if [[ $stderr =~ Not\ Found$
]]
60 # TODO gather more error messages/codes
63 if [[ $stderr =~ NoSuchKey|NoSuchBucket
]]
66 elif [[ $stderr =~ AccessDenied
]]
69 elif [[ $stderr =~ BucketAlreadyExists|BucketAlreadyOwnedByYou
]]
72 elif [[ $stderr =~ BucketNotEmpty
]]
75 elif [[ $stderr =~ InvalidArgument|InvalidBucketName
]]
78 elif [[ $stderr =~ InlineDataTooLarge|MaxMessageLengthExceeded|MaxPostPreDataLengthExceededError
]]
81 elif [[ $stderr =~ InvalidRange
]]
84 elif [[ $stderr =~ MethodNotAllowed
]]
87 elif [[ $stderr =~ ServiceUnavailable|SlowDown
]]
90 elif [[ $stderr =~ TooManyBuckets
]]
109 cachekey
=`mk_cache_key "$s3uri"`
110 cachedir
=$cache_basedir/$cachekey
112 cachefile
=$cachedir/data
114 if [ -f "$cachefile" ]
116 stat
-c %Y
"$cachefile"
120 fixerrno-aws s3
ls "$s3uri" |
tee "$cachefile"
121 ps
=(${PIPESTATUS[@]})
125 mv -f "$cachefile" "$cachedir/data.error"
126 echo -n $ec > "$cachedir/err"
137 local cachekey
=`mk_cache_key "$s3_pfx_uri"`
138 echo "$entry" >> "$cache_basedir/$cachekey/data"
146 local cachekey
=`mk_cache_key "$s3_pfx_uri"`
147 entry
=$entry perl
-i -ne 'if(/^(\s*PRE|\S+\s+\S+\s+\S+) (.+)/ and $2 eq $ENV{entry}){}else{print}' "$cache_basedir/$cachekey/data"
161 print_base_dir_attr
()
163 echo -ne "ino=1 mode=$dmode nlink=2 uid=$UID gid=$GID rdev=0 size=0 blksize=512 blocks=0 atime=$cache_timestamp mtime=$mtime ctime=0${1:+ $1\0}"
169 path
=${1:1} # strip leading slash
191 print_base_dir_attr .
196 while read -r date time bucket
198 mtime
=`date +%s -d "$date $time"`
199 echo -ne "ino=1 mode=$dmode nlink=2 uid=$UID gid=$GID rdev=0 size=0 blksize=512 blocks=0 atime=$cache_timestamp mtime=$mtime ctime=0 $bucket\0"
204 set_Bucket_and_Key
"$1"
207 cache-s3-ls
"s3://$Bucket/$Key${Key:+/}" |\
208 sed -e 's/^\s\+PRE/0 0 PRE/'
209 exit ${PIPESTATUS[0]}
214 while read -r date time size fname
221 # NOTE: mtime is 0 here
222 print_base_dir_attr
"$fname"
228 if [ -n "${dirs[$fname]}" ]
230 # this is an object which is also a keyprefix
231 # suffix it with '#' sign which is unlikely in real object names
234 mtime
=`date +%s -d "$date $time"`
235 echo -ne "ino=1 mode=$fmode nlink=1 uid=$UID gid=$GID rdev=0 size=$size blksize=512 blocks=0 atime=$cache_timestamp mtime=$mtime ctime=0 $fname\0"
240 return_to_fuse
${PIPESTATUS[@]}
251 set_Bucket_and_Key
"$1"
255 # viewing a bucket folder
256 # ensure the bucket exists based on the default bucket list
257 # TODO: support cross-account buckets
262 while read -r date time this_bucket
264 if [ "$this_bucket" = "$Bucket" ]
266 mtime
=`date +%s -d "$date $time"`
273 return_to_fuse
${PIPESTATUS[@]}
275 # viewing an s3 object
276 # find its type: keyprefix (dir) or object key (file)
277 # lookup the parent dir
281 key_dirname
=${Key%/*}/
285 key_basename
=${Key##*/}
288 cache-s3-ls
"s3://$Bucket/$key_dirname" |\
289 sed -e 's/^\s\+PRE/0 0 PRE/'
290 exit ${PIPESTATUS[0]}
295 while read -r date time size fname
305 if [ -n "${dirs[$fname]}" ]
307 # a keyprefix exists with this object basename, pretend it has a '#' suffix
311 if [ "$fname" = "$key_basename" ]
315 # it's a key prefix, show there is a dir
316 # get mtime from the listing of this current keyprefix, only if it's cached
317 cachekey
=`mk_cache_key "s3://$Bucket/$Key/"`
318 cachefile
=$cache_basedir/$cachekey/data
319 if [ -f "$cachefile" ]
321 datetime
=`sed -ne '/^\S\+ \S\+\s\+0 $/{s/\s\+0 $//p;q}' "$cachefile"`
322 if [ -n "$datetime" ]
324 mtime
=`date +%s -d "$datetime"`
329 # it's an object key, show there is a file
330 mtime
=`date +%s -d "$date $time"`
331 echo -ne "ino=1 mode=$fmode nlink=1 uid=$UID gid=$GID rdev=0 size=$size blksize=512 blocks=0 atime=$cache_timestamp mtime=$mtime ctime=0"
338 return_to_fuse
${PIPESTATUS[@]}
347 # trim trailing '#' sign which is there on object key which is also a keyprefix
348 bucket_and_key
=${bucket_and_key%#}
349 fixerrno-aws s3
cp "s3:/$bucket_and_key" -
355 # trim trailing '#' sign which is there on object key which is also a keyprefix
356 bucket_and_key
=${bucket_and_key%#}
357 fixerrno-aws s3
cp - "s3:/$bucket_and_key"
363 bucket_and_key
=${1:1}
364 cachekey
=`mk_cache_key "s3://$bucket_and_key${bucket_and_key:+/}"`
369 # clear cache recursively from within this directory.
371 x
=`find "$cache_basedir/$cachekey" -type f -name "data" -printf x -delete`
379 # clear cache of this directory.
381 cachefile
="$cache_basedir/$cachekey/data"
382 if [ -e "$cachefile" ]
395 # it was either a single file (which does not have own cachefile)
396 # or the corresponding cache file (of a directory) was missing.
398 # the formal case is more likely (because userspace apps often calls
399 # stat(2) before chmod, so the dir cache will populated anyway),
400 # so we indicate that we do not support clearig cache on files.
406 set_Bucket_and_Key
"$1"
410 fixerrno-aws s3api put-object
--bucket "$Bucket" --key "$Key"
414 parent
=`dirname "$Bucket/$Key"`
415 leaf
=`basename "$Bucket/$Key"`
416 cache-add-entry
"s3://$parent/" "0 0 PRE $leaf/"
425 set_Bucket_and_Key
"$1"
428 if [ "$fuseop" = rmdir ]
433 fixerrno-aws s3api delete-object
--bucket "$Bucket" --key "$Key"
437 parent
=`dirname "$Bucket/$Key"`
438 leaf
=`basename "$Bucket/$Key"`
439 if [ "$fuseop" = rmdir ]
443 cache-del-entry
"s3://$parent/" "$leaf"