3 # This script transforms markdown files into html and (optionally) nroff. The
4 # output files are written into the current directory named for the input file
5 # without the .md suffix and either the .html suffix or no suffix.
7 # If the input .md file has a section number at the end of the name (e.g.,
8 # rsync.1.md) a nroff file is also output (PROJ.NUM.md -> PROJ.NUM).
10 # The markdown input format has one extra extension: if a numbered list starts
11 # at 0, it is turned into a description list. The dl's dt tag is taken from the
12 # contents of the first tag inside the li, which is usually a p, code, or
15 # The cmarkgfm or commonmark lib is used to transforms the input file into
16 # html. Then, the html.parser is used as a state machine that lets us tweak
17 # the html and (optionally) output nroff data based on the html tags.
19 # If the string @USE_GFM_PARSER@ exists in the file, the string is removed and
20 # a github-flavored-markup parser is used to parse the file.
22 # The man-page .md files also get the vars @VERSION@, @BINDIR@, and @LIBDIR@
23 # substituted. Some of these values depend on the Makefile $(prefix) (see the
24 # generated Makefile). If the maintainer wants to build files for /usr/local
25 # while creating release-ready man-page files for /usr, use the environment to
26 # set RSYNC_OVERRIDE_PREFIX=/usr.
28 # Copyright (C) 2020 - 2021 Wayne Davison
30 # This program is freely redistributable.
32 import os, sys, re, argparse, subprocess, time
33 from html.parser import HTMLParser
35 VALID_PAGES = 'README INSTALL COPYING rsync.1 rrsync.1 rsync-ssl.1 rsyncd.conf.5'.split()
37 CONSUMES_TXT = set('h1 h2 h3 p li pre'.split())
41 <title>%TITLE%</title>
42 <meta charset="UTF-8"/>
43 <link href="https://fonts.googleapis.com/css2?family=Roboto&family=Roboto+Mono&display=swap" rel="stylesheet">
50 font-family: 'Roboto', sans-serif;
52 a.tgt { font-face: symbol; font-weight: 400; font-size: 70%; visibility: hidden; text-decoration: none; color: #ddd; padding: 0 4px; border: 0; }
53 a.tgt:after { content: '🔗'; }
54 a.tgt:hover { color: #444; background-color: #eaeaea; }
55 h1:hover > a.tgt, h2:hover > a.tgt, h3:hover > a.tgt, dt:hover > a.tgt { visibility: visible; }
57 font-family: 'Roboto Mono', monospace;
69 margin-block-start: 0em;
81 border-top: 1px solid grey;
84 background-color: #f6f8fa;
87 border: 1px solid #dfe2e5;
95 <div style="float: right"><p><i>%s</i></p></div>
103 .TH "%s" "%s" "%s" "%s" "User Commands"
110 NORM_FONT = ('\1', r"\fP")
111 BOLD_FONT = ('\2', r"\fB")
112 UNDR_FONT = ('\3', r"\fI")
113 NBR_DASH = ('\4', r"\-")
114 NBR_SPACE = ('\xa0', r"\ ")
116 FILENAME_RE = re.compile(r'^(?P<fn>(?P<srcdir>.+/)?(?P<name>(?P<prog>[^/]+?)(\.(?P<sect>\d+))?)\.md)$')
117 ASSIGNMENT_RE = re.compile(r'^(\w+)=(.+)')
118 VER_RE = re.compile(r'^#define\s+RSYNC_VERSION\s+"(\d.+?)"', re.M)
119 TZ_RE = re.compile(r'^#define\s+MAINTAINER_TZ_OFFSET\s+(-?\d+(\.\d+)?)', re.M)
120 VAR_REF_RE = re.compile(r'\$\{(\w+)\}')
121 VERSION_RE = re.compile(r' (\d[.\d]+)[, ]')
122 BIN_CHARS_RE = re.compile(r'[\1-\7]+')
123 SPACE_DOUBLE_DASH_RE = re.compile(r'\s--(\s)')
124 NON_SPACE_SINGLE_DASH_RE = re.compile(r'(^|\W)-')
125 WHITESPACE_RE = re.compile(r'\s')
126 CODE_BLOCK_RE = re.compile(r'[%s]([^=%s]+)[=%s]' % (BOLD_FONT[0], NORM_FONT[0], NORM_FONT[0]))
127 NBR_DASH_RE = re.compile(r'[%s]' % NBR_DASH[0])
128 INVALID_TARGET_CHARS_RE = re.compile(r'[^-A-Za-z0-9._]')
129 INVALID_START_CHAR_RE = re.compile(r'^([^A-Za-z0-9])')
130 MANIFY_LINESTART_RE = re.compile(r"^(['.])", flags=re.M)
138 for mdfn in args.mdfiles:
142 print("The test was successful.")
145 def parse_md_file(mdfn):
146 fi = FILENAME_RE.match(mdfn)
148 die('Failed to parse a md input file name:', mdfn)
149 fi = argparse.Namespace(**fi.groupdict())
150 fi.want_manpage = not not fi.sect
152 fi.title = fi.prog + '(' + fi.sect + ') manpage'
154 fi.title = fi.prog + ' for rsync'
158 find_man_substitutions()
159 prog_ver = 'rsync ' + env_subs['VERSION']
160 if fi.prog != 'rsync':
161 prog_ver = fi.prog + ' from ' + prog_ver
162 fi.man_headings = (fi.prog, fi.sect, env_subs['date'], prog_ver, env_subs['prefix'])
164 with open(mdfn, 'r', encoding='utf-8') as fh:
167 use_gfm_parser = '@USE_GFM_PARSER@' in txt
169 txt = txt.replace('@USE_GFM_PARSER@', '')
172 txt = (txt.replace('@VERSION@', env_subs['VERSION'])
173 .replace('@BINDIR@', env_subs['bindir'])
174 .replace('@LIBDIR@', env_subs['libdir']))
178 die('Input file requires cmarkgfm parser:', mdfn)
179 fi.html_in = gfm_parser(txt)
181 fi.html_in = md_parser(txt)
189 output_list = [ (fi.name + '.html', fi.html_out) ]
191 output_list += [ (fi.name, fi.man_out) ]
192 for fn, txt in output_list:
193 if args.dest and args.dest != '.':
194 fn = os.path.join(args.dest, fn)
195 if os.path.lexists(fn):
198 with open(fn, 'w', encoding='utf-8') as fh:
202 def find_man_substitutions():
203 srcdir = os.path.dirname(sys.argv[0]) + '/'
206 git_dir = srcdir + '.git'
207 if os.path.lexists(git_dir):
208 mtime = int(subprocess.check_output(['git', '--git-dir', git_dir, 'log', '-1', '--format=%at']))
210 # Allow "prefix" to be overridden via the environment:
211 env_subs['prefix'] = os.environ.get('RSYNC_OVERRIDE_PREFIX', None)
214 env_subs['VERSION'] = '1.0.0'
215 env_subs['bindir'] = '/usr/bin'
216 env_subs['libdir'] = '/usr/lib/rsync'
219 for fn in (srcdir + 'version.h', 'Makefile'):
223 die('Failed to find', srcdir + fn)
227 with open(srcdir + 'version.h', 'r', encoding='utf-8') as fh:
229 m = VER_RE.search(txt)
230 env_subs['VERSION'] = m.group(1)
231 m = TZ_RE.search(txt) # the tzdata lib may not be installed, so we use a simple hour offset
232 tz_offset = float(m.group(1)) * 60 * 60
234 with open('Makefile', 'r', encoding='utf-8') as fh:
236 m = ASSIGNMENT_RE.match(line)
239 var, val = (m.group(1), m.group(2))
240 if var == 'prefix' and env_subs[var] is not None:
242 while VAR_REF_RE.search(val):
243 val = VAR_REF_RE.sub(lambda m: env_subs[m.group(1)], val)
248 env_subs['date'] = time.strftime('%d %b %Y', time.gmtime(mtime + tz_offset)).lstrip('0')
251 def html_via_commonmark(txt):
252 return commonmark.HtmlRenderer().render(commonmark.Parser().parse(txt))
255 class TransformHtml(HTMLParser):
256 def __init__(self, fi):
257 HTMLParser.__init__(self, convert_charrefs=True)
261 st = self.state = argparse.Namespace(
264 at_first_tag_in_li = False,
265 at_first_tag_in_dd = False,
269 html_out = [ HTML_START.replace('%TITLE%', fi.title) ],
272 want_manpage = fi.want_manpage,
273 created_hashtags = set(),
274 derived_hashtags = set(),
275 referenced_hashtags = set(),
276 bad_hashtags = set(),
277 latest_targets = [ ],
284 st.man_out.append(MAN_START % fi.man_headings)
286 if '</table>' in fi.html_in:
287 st.html_out[0] = st.html_out[0].replace('</style>', TABLE_STYLE + '</style>')
289 self.feed(fi.html_in)
293 st.html_out.append(MAN_HTML_END % env_subs['date'])
294 st.html_out.append(HTML_END)
295 st.man_out.append(MAN_END)
297 fi.html_out = ''.join(st.html_out)
300 fi.man_out = ''.join(st.man_out)
303 for tgt, txt in st.derived_hashtags:
304 derived = txt2target(txt, tgt)
305 if derived not in st.created_hashtags:
306 txt = BIN_CHARS_RE.sub('', txt.replace(NBR_DASH[0], '-').replace(NBR_SPACE[0], ' '))
307 warn('Unknown derived hashtag link in', self.fn, 'based on:', (tgt, txt))
309 for bad in st.bad_hashtags:
310 if bad in st.created_hashtags:
311 warn('Missing "#" in hashtag link in', self.fn + ':', bad)
313 warn('Unknown non-hashtag link in', self.fn + ':', bad)
315 for bad in st.referenced_hashtags - st.created_hashtags:
316 warn('Unknown hashtag link in', self.fn + ':', '#' + bad)
318 def handle_starttag(self, tag, attrs_list):
321 self.output_debug('START', (tag, attrs_list))
322 if st.at_first_tag_in_li:
323 if st.list_state[-1] == 'dl':
328 st.html_out.append('<dt>')
330 st.at_first_tag_in_dd = True # Kluge to suppress a .P at the start of an li.
331 st.at_first_tag_in_li = False
333 if not st.at_first_tag_in_dd:
334 st.man_out.append(st.p_macro)
336 st.at_first_tag_in_li = True
337 lstate = st.list_state[-1]
341 st.man_out.append(".IP o\n")
343 st.man_out.append(".IP " + str(lstate) + ".\n")
344 st.list_state[-1] += 1
345 elif tag == 'blockquote':
346 st.man_out.append(".RS 4\n")
349 st.man_out.append(st.p_macro + ".nf\n")
350 elif tag == 'code' and not st.in_pre:
352 st.txt += BOLD_FONT[0]
353 elif tag == 'strong' or tag == 'b':
354 st.txt += BOLD_FONT[0]
355 elif tag == 'em' or tag == 'i':
357 tag = 'u' # Change it into underline to be more like the manpage
358 st.txt += UNDR_FONT[0]
361 for var, val in attrs_list:
363 start = int(val) # We only support integers.
366 st.man_out.append(".RS\n")
370 st.list_state.append('dl')
372 st.list_state.append(start)
373 st.man_out.append(st.p_macro)
376 st.man_out.append(st.p_macro)
378 st.man_out.append(".RS\n")
380 st.list_state.append('o')
382 st.man_out.append(".l\n")
383 st.html_out.append("<hr />")
387 for var, val in attrs_list:
389 if val.startswith(('https://', 'http://', 'mailto:', 'ftp:')):
390 pass # nothing to check
392 pg, tgt = val.split('#', 1)
393 if pg and pg not in VALID_PAGES or '#' in tgt:
394 st.bad_hashtags.add(val)
395 elif tgt in ('', 'opt', 'dopt'):
398 st.referenced_hashtags.add(tgt)
399 if tgt in st.latest_targets:
400 warn('Found link to the current section in', self.fn + ':', val)
401 elif val not in VALID_PAGES:
402 st.bad_hashtags.add(val)
403 st.a_txt_start = len(st.txt)
404 st.html_out.append('<' + tag + ''.join(' ' + var + '="' + htmlify(val) + '"' for var, val in attrs_list) + '>')
405 st.at_first_tag_in_dd = False
408 def handle_endtag(self, tag):
411 self.output_debug('END', (tag,))
412 if tag in CONSUMES_TXT or st.dt_from == tag:
421 if tgt.startswith('NEWS for '):
422 m = VERSION_RE.search(tgt)
425 st.target_suf = '-' + tgt
426 self.add_targets(tag, tgt)
428 st.man_out.append(st.p_macro + '.SH "' + manify(txt) + '"\n')
429 self.add_targets(tag, txt, st.target_suf)
430 st.opt_prefix = 'dopt' if txt == 'DAEMON OPTIONS' else 'opt'
432 st.man_out.append(st.p_macro + '.SS "' + manify(txt) + '"\n')
433 self.add_targets(tag, txt, st.target_suf)
435 if st.dt_from == 'p':
437 st.man_out.append('.IP "' + manify(txt) + '"\n')
438 if txt.startswith(BOLD_FONT[0]):
439 self.add_targets(tag, txt)
442 st.man_out.append(manify(txt) + "\n")
444 if st.list_state[-1] == 'dl':
445 if st.at_first_tag_in_li:
446 die("Invalid 0. -> td translation")
449 st.man_out.append(manify(txt) + "\n")
450 st.at_first_tag_in_li = False
451 elif tag == 'blockquote':
452 st.man_out.append(".RE\n")
455 st.man_out.append(manify(txt) + "\n.fi\n")
456 elif (tag == 'code' and not st.in_pre):
458 add_to_txt = NORM_FONT[0]
459 elif tag == 'strong' or tag == 'b':
460 add_to_txt = NORM_FONT[0]
461 elif tag == 'em' or tag == 'i':
463 tag = 'u' # Change it into underline to be more like the manpage
464 add_to_txt = NORM_FONT[0]
465 elif tag == 'ol' or tag == 'ul':
466 if st.list_state.pop() == 'dl':
469 st.man_out.append(".RE\n")
472 st.at_first_tag_in_dd = False
477 atxt = st.txt[st.a_txt_start:]
478 find = 'href="' + st.a_href + '"'
479 for j in range(len(st.html_out)-1, 0, -1):
480 if find in st.html_out[j]:
481 pg, tgt = st.a_href.split('#', 1)
482 derived = txt2target(atxt, tgt)
484 if derived in st.latest_targets:
485 warn('Found link to the current section in', self.fn + ':', st.a_href)
486 st.derived_hashtags.add((tgt, atxt))
487 st.html_out[j] = st.html_out[j].replace(find, 'href="' + pg + '#' + derived + '"')
490 die('INTERNAL ERROR: failed to find href in html data:', find)
491 st.html_out.append('</' + tag + '>')
497 if st.dt_from == tag:
498 st.man_out.append('.IP "' + manify(txt) + '"\n')
499 st.html_out.append('</dt><dd>')
500 st.at_first_tag_in_dd = True
503 st.html_out.append('<dd>')
504 st.at_first_tag_in_dd = True
507 def handle_data(self, txt):
510 warn('Malformed link in', self.fn + ':', txt)
512 self.output_debug('DATA', (txt,))
516 txt = SPACE_DOUBLE_DASH_RE.sub(NBR_SPACE[0] + r'--\1', txt).replace('--', NBR_DASH[0]*2)
517 txt = NON_SPACE_SINGLE_DASH_RE.sub(r'\1' + NBR_DASH[0], txt)
520 txt = WHITESPACE_RE.sub(NBR_SPACE[0], txt)
521 html = html.replace(NBR_DASH[0], '-').replace(NBR_SPACE[0], ' ') # <code> is non-breaking in CSS
522 st.html_out.append(html.replace(NBR_SPACE[0], ' ').replace(NBR_DASH[0], '-⁠'))
526 def add_targets(self, tag, txt, suf=None):
528 tag = '<' + tag + '>'
529 targets = CODE_BLOCK_RE.findall(txt)
534 txt = txt2target(txt, st.opt_prefix)
539 if txt in st.created_hashtags:
540 for j in range(2, 1000):
541 chk = txt + '-' + str(j)
542 if chk not in st.created_hashtags:
543 print('Made link target unique:', chk)
548 while st.html_out[tag_pos] != tag:
550 st.html_out[tag_pos] = tag[:-1] + ' id="' + txt + '">'
551 st.html_out.append('<a href="#' + txt + '" class="tgt"></a>')
552 tag_pos -= 1 # take into account the append
554 st.html_out[tag_pos] = '<span id="' + txt + '"></span>' + st.html_out[tag_pos]
555 st.created_hashtags.add(txt)
556 st.latest_targets = targets
559 def output_debug(self, event, extra):
563 st = argparse.Namespace(**vars(st))
564 if len(st.html_out) > 2:
565 st.html_out = ['...'] + st.html_out[-2:]
566 if len(st.man_out) > 2:
567 st.man_out = ['...'] + st.man_out[-2:]
569 pprint.PrettyPrinter(indent=2).pprint(vars(st))
572 def txt2target(txt, opt_prefix):
573 txt = txt.strip().rstrip(':')
574 m = CODE_BLOCK_RE.search(txt)
577 txt = NBR_DASH_RE.sub('-', txt)
578 txt = BIN_CHARS_RE.sub('', txt)
579 txt = INVALID_TARGET_CHARS_RE.sub('_', txt)
580 if opt_prefix and txt.startswith('-'):
581 txt = opt_prefix + txt
583 txt = INVALID_START_CHAR_RE.sub(r't\1', txt)
588 return MANIFY_LINESTART_RE.sub(r'\&\1', txt.replace('\\', '\\\\')
589 .replace(NBR_SPACE[0], NBR_SPACE[1])
590 .replace(NBR_DASH[0], NBR_DASH[1])
591 .replace(NORM_FONT[0], NORM_FONT[1])
592 .replace(BOLD_FONT[0], BOLD_FONT[1])
593 .replace(UNDR_FONT[0], UNDR_FONT[1]))
597 return txt.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"')
601 print(*msg, file=sys.stderr)
611 if __name__ == '__main__':
612 parser = argparse.ArgumentParser(description="Convert markdown into html and (optionally) nroff. Each input filename must have a .md suffix, which is changed to .html for the output filename. If the input filename ends with .num.md (e.g. foo.1.md) then a nroff file is also output with the input filename's .md suffix removed (e.g. foo.1).", add_help=False)
613 parser.add_argument('--test', action='store_true', help="Just test the parsing without outputting any files.")
614 parser.add_argument('--dest', metavar='DIR', help="Create files in DIR instead of the current directory.")
615 parser.add_argument('--debug', '-D', action='count', default=0, help='Output copious info on the html parsing. Repeat for even more.')
616 parser.add_argument("--help", "-h", action="help", help="Output this help message and exit.")
617 parser.add_argument("mdfiles", metavar='FILE.md', nargs='+', help="One or more .md files to convert.")
618 args = parser.parse_args()
622 md_parser = cmarkgfm.markdown_to_html
623 gfm_parser = cmarkgfm.github_flavored_markdown_to_html
627 md_parser = html_via_commonmark
629 die("Failed to find cmarkgfm or commonmark for python3.")