make getpeername() return the original socket address which before it was intercepted
[hband-tools.git] / execfuse / s3 / readdir
blob485e494b2877ef894ececb6719cc3f2d7ee3601b
1 #!/bin/bash
3 EPERM=1
4 ENOENT=2
5 EIO=5
6 EAGAIN=11
7 EACCESS=13
8 EBUSY=16
9 EEXIST=17
10 EINVAL=22
11 EFBIG=27
12 ENOSPC=28
13 ESPIPE=29
14 ENOSYS=38
15 ENOTEMPTY=39
16 EOPNOTSUPP=95
18 . /usr/lib/tool/bash-utils || exit $EIO
20 export PYTHONIOENCODING=utf8
21 fmode=-rw-r--r--
22 dmode=drwxr-xr-x
23 #GID=`id -g`
24 GID=0
25 cache_basedir=~/.cache/execfuse-s3
28 mk_cache_key()
30 local x=$1
31 local cachekey=${x//%/%25}
32 cachekey=${cachekey//./%2E}
33 cachekey=${cachekey//\//%2F\/}
34 echo "$cachekey"
37 fixerrno-aws()
39 local errno
40 local stderr
41 local fd2r fd2w
43 pipe fd2r fd2w
44 aws "$@" 2>&$fd2w
45 [ $? = 0 ] && errno=0 || errno=$EIO
46 # aws cli's exit code is not distinctive enough.
48 close $fd2w
49 stderr=$(cat <&$fd2r)
50 close $fd2r
52 echo "$stderr" >&2
54 case "$1" in
55 s3)
56 if [[ $stderr =~ Not\ Found$ ]]
57 then
58 errno=$ENOENT
60 # TODO gather more error messages/codes
62 s3api)
63 if [[ $stderr =~ NoSuchKey|NoSuchBucket ]]
64 then
65 errno=$ENOENT
66 elif [[ $stderr =~ AccessDenied ]]
67 then
68 errno=$EACCESS
69 elif [[ $stderr =~ BucketAlreadyExists|BucketAlreadyOwnedByYou ]]
70 then
71 errno=$EEXIST
72 elif [[ $stderr =~ BucketNotEmpty ]]
73 then
74 errno=$ENOTEMPTY
75 elif [[ $stderr =~ InvalidArgument|InvalidBucketName ]]
76 then
77 errno=$EINVAL
78 elif [[ $stderr =~ InlineDataTooLarge|MaxMessageLengthExceeded|MaxPostPreDataLengthExceededError ]]
79 then
80 errno=$EFBIG
81 elif [[ $stderr =~ InvalidRange ]]
82 then
83 errno=$ESPIPE
84 elif [[ $stderr =~ MethodNotAllowed ]]
85 then
86 errno=$EPERM
87 elif [[ $stderr =~ ServiceUnavailable|SlowDown ]]
88 then
89 errno=$EBUSY
90 elif [[ $stderr =~ TooManyBuckets ]]
91 then
92 errno=$ENOSPC
95 esac
96 return $errno
99 cache-s3-ls()
101 local s3uri
102 local cachekey
103 local cachedir
104 local cachefile
105 declare -a ps
106 local ec
108 s3uri=$1
109 cachekey=`mk_cache_key "$s3uri"`
110 cachedir=$cache_basedir/$cachekey
111 mkdir -p "$cachedir"
112 cachefile=$cachedir/data
114 if [ -f "$cachefile" ]
115 then
116 stat -c %Y "$cachefile"
117 cat "$cachefile"
118 else
119 echo 0
120 fixerrno-aws s3 ls "$s3uri" | tee "$cachefile"
121 ps=(${PIPESTATUS[@]})
122 ec=${ps[0]}
123 if [ $ec != 0 ]
124 then
125 mv -f "$cachefile" "$cachedir/data.error"
126 echo -n $ec > "$cachedir/err"
128 return $ec
132 cache-add-entry()
134 local s3_pfx_uri=$1
135 local entry=$2
137 local cachekey=`mk_cache_key "$s3_pfx_uri"`
138 echo "$entry" >> "$cache_basedir/$cachekey/data"
141 cache-del-entry()
143 local s3_pfx_uri=$1
144 local entry=$2
146 local cachekey=`mk_cache_key "$s3_pfx_uri"`
147 entry=$entry perl -i -ne 'if(/^(\s*PRE|\S+\s+\S+\s+\S+) (.+)/ and $2 eq $ENV{entry}){}else{print}' "$cache_basedir/$cachekey/data"
150 return_to_fuse()
152 local ps=("$@")
153 if [ ${ps[0]} != 0 ]
154 then
155 exit ${ps[0]}
156 else
157 exit ${ps[1]}
161 print_base_dir_attr()
163 echo -ne "ino=1 mode=$dmode nlink=2 uid=$UID gid=$GID rdev=0 size=0 blksize=512 blocks=0 atime=$cache_timestamp mtime=$mtime ctime=0${1:+ $1\0}"
166 set_Bucket_and_Key()
168 local path=$1
169 path=${1:1} # strip leading slash
170 Bucket=${path%%/*}
171 if [[ $path =~ / ]]
172 then
173 Key=${path#*/}
174 else
175 Key=''
181 fuseop=${0##*/}
182 cache_timestamp=0
183 mtime=0
186 case "$fuseop" in
187 readdir)
189 case "$1" in
191 print_base_dir_attr .
193 cache-s3-ls s3:// |\
195 read cache_timestamp
196 while read -r date time bucket
198 mtime=`date +%s -d "$date $time"`
199 echo -ne "ino=1 mode=$dmode nlink=2 uid=$UID gid=$GID rdev=0 size=0 blksize=512 blocks=0 atime=$cache_timestamp mtime=$mtime ctime=0 $bucket\0"
200 done
204 set_Bucket_and_Key "$1"
207 cache-s3-ls "s3://$Bucket/$Key${Key:+/}" |\
208 sed -e 's/^\s\+PRE/0 0 PRE/'
209 exit ${PIPESTATUS[0]}
212 read cache_timestamp
213 declare -A dirs=()
214 while read -r date time size fname
216 if [ "$size" = PRE ]
217 then
218 fname=${fname:0:-1}
219 if [ -n "$fname" ]
220 then
221 # NOTE: mtime is 0 here
222 print_base_dir_attr "$fname"
223 dirs["$fname"]=1
225 else
226 if [ -n "$fname" ]
227 then
228 if [ -n "${dirs[$fname]}" ]
229 then
230 # this is an object which is also a keyprefix
231 # suffix it with '#' sign which is unlikely in real object names
232 fname="$fname#"
234 mtime=`date +%s -d "$date $time"`
235 echo -ne "ino=1 mode=$fmode nlink=1 uid=$UID gid=$GID rdev=0 size=$size blksize=512 blocks=0 atime=$cache_timestamp mtime=$mtime ctime=0 $fname\0"
238 done
240 return_to_fuse ${PIPESTATUS[@]}
242 esac
243 ;; # readdir
245 getattr)
246 case "$1" in
248 print_base_dir_attr
251 set_Bucket_and_Key "$1"
253 if [ -z "$Key" ]
254 then
255 # viewing a bucket folder
256 # ensure the bucket exists based on the default bucket list
257 # TODO: support cross-account buckets
259 cache-s3-ls s3:// |\
261 read cache_timestamp
262 while read -r date time this_bucket
264 if [ "$this_bucket" = "$Bucket" ]
265 then
266 mtime=`date +%s -d "$date $time"`
267 print_base_dir_attr
268 exit 0
270 done
271 exit $ENOENT
273 return_to_fuse ${PIPESTATUS[@]}
274 else
275 # viewing an s3 object
276 # find its type: keyprefix (dir) or object key (file)
277 # lookup the parent dir
279 if [[ $Key =~ / ]]
280 then
281 key_dirname=${Key%/*}/
282 else
283 key_dirname=''
285 key_basename=${Key##*/}
288 cache-s3-ls "s3://$Bucket/$key_dirname" |\
289 sed -e 's/^\s\+PRE/0 0 PRE/'
290 exit ${PIPESTATUS[0]}
293 read cache_timestamp
294 declare -A dirs=()
295 while read -r date time size fname
297 if [ "$size" = PRE ]
298 then
299 fname=${fname:0:-1}
300 if [ -n "$fname" ]
301 then
302 dirs["$fname"]=1
304 else
305 if [ -n "${dirs[$fname]}" ]
306 then
307 # a keyprefix exists with this object basename, pretend it has a '#' suffix
308 fname="$fname#"
311 if [ "$fname" = "$key_basename" ]
312 then
313 if [ "$size" = PRE ]
314 then
315 # it's a key prefix, show there is a dir
316 # get mtime from the listing of this current keyprefix, only if it's cached
317 cachekey=`mk_cache_key "s3://$Bucket/$Key/"`
318 cachefile=$cache_basedir/$cachekey/data
319 if [ -f "$cachefile" ]
320 then
321 datetime=`sed -ne '/^\S\+ \S\+\s\+0 $/{s/\s\+0 $//p;q}' "$cachefile"`
322 if [ -n "$datetime" ]
323 then
324 mtime=`date +%s -d "$datetime"`
327 print_base_dir_attr
328 else
329 # it's an object key, show there is a file
330 mtime=`date +%s -d "$date $time"`
331 echo -ne "ino=1 mode=$fmode nlink=1 uid=$UID gid=$GID rdev=0 size=$size blksize=512 blocks=0 atime=$cache_timestamp mtime=$mtime ctime=0"
333 exit 0
335 done
336 exit $ENOENT
338 return_to_fuse ${PIPESTATUS[@]}
340 # notreached
342 esac
343 ;; # getattr
345 read_file)
346 bucket_and_key=$1
347 # trim trailing '#' sign which is there on object key which is also a keyprefix
348 bucket_and_key=${bucket_and_key%#}
349 fixerrno-aws s3 cp "s3:/$bucket_and_key" -
350 exit $?
351 ;; # read_file
353 write_file)
354 bucket_and_key=$1
355 # trim trailing '#' sign which is there on object key which is also a keyprefix
356 bucket_and_key=${bucket_and_key%#}
357 fixerrno-aws s3 cp - "s3:/$bucket_and_key"
358 exit $?
359 ;; # write_file
361 chmod)
362 bucket_and_key=$1
363 bucket_and_key=${1:1}
364 cachekey=`mk_cache_key "s3://$bucket_and_key${bucket_and_key:+/}"`
365 found=no
366 case ".$2" in
367 .00000)
368 # chmod 0 [<DIR>]
369 # clear cache recursively from within this directory.
371 x=`find "$cache_basedir/$cachekey" -type f -name "data" -printf x -delete`
372 if [ -n "$x" ];
373 then
374 found=yes
377 .00001)
378 # chmod 1 [<DIR>]
379 # clear cache of this directory.
381 cachefile="$cache_basedir/$cachekey/data"
382 if [ -e "$cachefile" ]
383 then
384 found=yes
385 rm "$cachefile"
388 *) exit $EINVAL;;
389 esac
391 if [ $found = yes ]
392 then
393 exit 0
394 else
395 # it was either a single file (which does not have own cachefile)
396 # or the corresponding cache file (of a directory) was missing.
398 # the formal case is more likely (because userspace apps often calls
399 # stat(2) before chmod, so the dir cache will populated anyway),
400 # so we indicate that we do not support clearig cache on files.
401 exit $EOPNOTSUPP
403 ;; # chmod
405 mkdir)
406 set_Bucket_and_Key "$1"
407 if [ -n "$Key" ]
408 then
409 Key=$Key/
410 fixerrno-aws s3api put-object --bucket "$Bucket" --key "$Key"
411 errno=$?
412 if [ $errno = 0 ]
413 then
414 parent=`dirname "$Bucket/$Key"`
415 leaf=`basename "$Bucket/$Key"`
416 cache-add-entry "s3://$parent/" "0 0 PRE $leaf/"
418 exit $errno
419 else
420 exit $EOPNOTSUPP
422 ;; # mkdir
424 rmdir|unlink)
425 set_Bucket_and_Key "$1"
426 if [ -n "$Key" ]
427 then
428 if [ "$fuseop" = rmdir ]
429 then
430 Key=$Key/
433 fixerrno-aws s3api delete-object --bucket "$Bucket" --key "$Key"
434 errno=$?
435 if [ $errno = 0 ]
436 then
437 parent=`dirname "$Bucket/$Key"`
438 leaf=`basename "$Bucket/$Key"`
439 if [ "$fuseop" = rmdir ]
440 then
441 leaf=$leaf/
443 cache-del-entry "s3://$parent/" "$leaf"
445 exit $errno
446 else
447 exit $EOPNOTSUPP
449 ;; # rmdir unlink
451 *) exit $ENOSYS;;
452 esac