3 # This script transforms markdown files into html and (optionally) nroff. The
4 # output files are written into the current directory named for the input file
5 # without the .md suffix and either the .html suffix or no suffix.
7 # If the input .md file has a section number at the end of the name (e.g.,
8 # rsync.1.md) a nroff file is also output (PROJ.NUM.md -> PROJ.NUM).
10 # The markdown input format has one extra extension: if a numbered list starts
11 # at 0, it is turned into a description list. The dl's dt tag is taken from the
12 # contents of the first tag inside the li, which is usually a p, code, or
15 # The cmarkgfm or commonmark lib is used to transforms the input file into
16 # html. Then, the html.parser is used as a state machine that lets us tweak
17 # the html and (optionally) output nroff data based on the html tags.
19 # If the string @USE_GFM_PARSER@ exists in the file, the string is removed and
20 # a github-flavored-markup parser is used to parse the file.
22 # The man-page .md files also get the vars @VERSION@, @BINDIR@, and @LIBDIR@
23 # substituted. Some of these values depend on the Makefile $(prefix) (see the
24 # generated Makefile). If the maintainer wants to build files for /usr/local
25 # while creating release-ready man-page files for /usr, use the environment to
26 # set RSYNC_OVERRIDE_PREFIX=/usr.
28 # Copyright (C) 2020 - 2021 Wayne Davison
30 # This program is freely redistributable.
32 import os, sys, re, argparse, subprocess, time
33 from html.parser import HTMLParser
35 VALID_PAGES = 'README INSTALL COPYING rsync.1 rrsync.1 rsync-ssl.1 rsyncd.conf.5'.split()
37 CONSUMES_TXT = set('h1 h2 h3 p li pre'.split())
41 <title>%TITLE%</title>
42 <meta charset="UTF-8"/>
43 <link href="https://fonts.googleapis.com/css2?family=Roboto&family=Roboto+Mono&display=swap" rel="stylesheet">
50 font-family: 'Roboto', sans-serif;
52 a.tgt { font-face: symbol; font-weight: 400; font-size: 70%; visibility: hidden; text-decoration: none; color: #ddd; padding: 0 4px; border: 0; }
53 a.tgt:after { content: '🔗'; }
54 a.tgt:hover { color: #444; background-color: #eaeaea; }
55 h1:hover > a.tgt, h2:hover > a.tgt, h3:hover > a.tgt, dt:hover > a.tgt { visibility: visible; }
57 font-family: 'Roboto Mono', monospace;
69 margin-block-start: 0em;
81 border-top: 1px solid grey;
84 background-color: #f6f8fa;
87 border: 1px solid #dfe2e5;
95 <div style="float: right"><p><i>%s</i></p></div>
103 .TH "%s" "%s" "%s" "%s" "User Commands"
110 NORM_FONT = ('\1', r"\fP")
111 BOLD_FONT = ('\2', r"\fB")
112 UNDR_FONT = ('\3', r"\fI")
113 NBR_DASH = ('\4', r"\-")
114 NBR_SPACE = ('\xa0', r"\ ")
116 FILENAME_RE = re.compile(r'^(?P<fn>(?P<srcdir>.+/)?(?P<name>(?P<prog>[^/]+?)(\.(?P<sect>\d+))?)\.md)$')
117 ASSIGNMENT_RE = re.compile(r'^(\w+)=(.+)')
118 VER_RE = re.compile(r'^#define\s+RSYNC_VERSION\s+"(\d.+?)"', re.M)
119 TZ_RE = re.compile(r'^#define\s+MAINTAINER_TZ_OFFSET\s+(-?\d+(\.\d+)?)', re.M)
120 VAR_REF_RE = re.compile(r'\$\{(\w+)\}')
121 VERSION_RE = re.compile(r' (\d[.\d]+)[, ]')
122 BIN_CHARS_RE = re.compile(r'[\1-\7]+')
123 SPACE_DOUBLE_DASH_RE = re.compile(r'\s--(\s)')
124 NON_SPACE_SINGLE_DASH_RE = re.compile(r'(^|\W)-')
125 WHITESPACE_RE = re.compile(r'\s')
126 CODE_BLOCK_RE = re.compile(r'[%s]([^=%s]+)[=%s]' % (BOLD_FONT[0], NORM_FONT[0], NORM_FONT[0]))
127 NBR_DASH_RE = re.compile(r'[%s]' % NBR_DASH[0])
128 INVALID_TARGET_CHARS_RE = re.compile(r'[^-A-Za-z0-9._]')
129 INVALID_START_CHAR_RE = re.compile(r'^([^A-Za-z0-9])')
130 MANIFY_LINESTART_RE = re.compile(r"^(['.])", flags=re.M)
138 for mdfn in args.mdfiles:
142 print("The test was successful.")
145 def parse_md_file(mdfn):
146 fi = FILENAME_RE.match(mdfn)
148 die('Failed to parse a md input file name:', mdfn)
149 fi = argparse.Namespace(**fi.groupdict())
150 fi.want_manpage = not not fi.sect
152 fi.title = fi.prog + '(' + fi.sect + ') manpage'
154 fi.title = fi.prog + ' for rsync'
158 find_man_substitutions()
159 prog_ver = 'rsync ' + env_subs['VERSION']
160 if fi.prog != 'rsync':
161 prog_ver = fi.prog + ' from ' + prog_ver
162 fi.man_headings = (fi.prog, fi.sect, env_subs['date'], prog_ver, env_subs['prefix'])
164 with open(mdfn, 'r', encoding='utf-8') as fh:
167 use_gfm_parser = '@USE_GFM_PARSER@' in txt
169 txt = txt.replace('@USE_GFM_PARSER@', '')
172 txt = (txt.replace('@VERSION@', env_subs['VERSION'])
173 .replace('@BINDIR@', env_subs['bindir'])
174 .replace('@LIBDIR@', env_subs['libdir']))
178 die('Input file requires cmarkgfm parser:', mdfn)
179 fi.html_in = gfm_parser(txt)
181 fi.html_in = md_parser(txt)
189 output_list = [ (fi.name + '.html', fi.html_out) ]
191 output_list += [ (fi.name, fi.man_out) ]
192 for fn, txt in output_list:
193 if args.dest and args.dest != '.':
194 fn = os.path.join(args.dest, fn)
195 if os.path.lexists(fn):
198 with open(fn, 'w', encoding='utf-8') as fh:
202 def find_man_substitutions():
203 srcdir = os.path.dirname(sys.argv[0]) + '/'
206 git_dir = srcdir + '.git'
207 if os.path.lexists(git_dir):
208 mtime = int(subprocess.check_output(['git', '--git-dir', git_dir, 'log', '-1', '--format=%at']))
210 # Allow "prefix" to be overridden via the environment:
211 env_subs['prefix'] = os.environ.get('RSYNC_OVERRIDE_PREFIX', None)
214 env_subs['VERSION'] = '1.0.0'
215 env_subs['bindir'] = '/usr/bin'
216 env_subs['libdir'] = '/usr/lib/rsync'
219 for fn in (srcdir + 'version.h', 'Makefile'):
223 die('Failed to find', srcdir + fn)
227 with open(srcdir + 'version.h', 'r', encoding='utf-8') as fh:
229 m = VER_RE.search(txt)
230 env_subs['VERSION'] = m.group(1)
231 m = TZ_RE.search(txt) # the tzdata lib may not be installed, so we use a simple hour offset
232 tz_offset = float(m.group(1)) * 60 * 60
234 with open('Makefile', 'r', encoding='utf-8') as fh:
236 m = ASSIGNMENT_RE.match(line)
239 var, val = (m.group(1), m.group(2))
240 if var == 'prefix' and env_subs[var] is not None:
242 while VAR_REF_RE.search(val):
243 val = VAR_REF_RE.sub(lambda m: env_subs[m.group(1)], val)
248 env_subs['date'] = time.strftime('%d %b %Y', time.gmtime(mtime + tz_offset)).lstrip('0')
251 def html_via_commonmark(txt):
252 return commonmark.HtmlRenderer().render(commonmark.Parser().parse(txt))
255 class TransformHtml(HTMLParser):
256 def __init__(self, fi):
257 HTMLParser.__init__(self, convert_charrefs=True)
261 st = self.state = argparse.Namespace(
264 at_first_tag_in_li = False,
265 at_first_tag_in_dd = False,
269 html_out = [ HTML_START.replace('%TITLE%', fi.title) ],
272 want_manpage = fi.want_manpage,
273 created_hashtags = set(),
274 derived_hashtags = set(),
275 referenced_hashtags = set(),
276 bad_hashtags = set(),
277 latest_targets = [ ],
280 a_href_external = False,
287 st.man_out.append(MAN_START % fi.man_headings)
289 if '</table>' in fi.html_in:
290 st.html_out[0] = st.html_out[0].replace('</style>', TABLE_STYLE + '</style>')
292 self.feed(fi.html_in)
296 st.html_out.append(MAN_HTML_END % env_subs['date'])
297 st.html_out.append(HTML_END)
298 st.man_out.append(MAN_END)
300 fi.html_out = ''.join(st.html_out)
303 fi.man_out = ''.join(st.man_out)
306 for tgt, txt in st.derived_hashtags:
307 derived = txt2target(txt, tgt)
308 if derived not in st.created_hashtags:
309 txt = BIN_CHARS_RE.sub('', txt.replace(NBR_DASH[0], '-').replace(NBR_SPACE[0], ' '))
310 warn('Unknown derived hashtag link in', self.fn, 'based on:', (tgt, txt))
312 for bad in st.bad_hashtags:
313 if bad in st.created_hashtags:
314 warn('Missing "#" in hashtag link in', self.fn + ':', bad)
316 warn('Unknown non-hashtag link in', self.fn + ':', bad)
318 for bad in st.referenced_hashtags - st.created_hashtags:
319 warn('Unknown hashtag link in', self.fn + ':', '#' + bad)
323 if st.txt.startswith(('.', ',', '!', '?', ';', ':')):
324 st.man_out[-1] = ".UE " + st.txt[0] + "\n"
326 st.after_a_tag = False
328 def handle_starttag(self, tag, attrs_list):
331 self.output_debug('START', (tag, attrs_list))
332 if st.at_first_tag_in_li:
333 if st.list_state[-1] == 'dl':
338 st.html_out.append('<dt>')
340 st.at_first_tag_in_dd = True # Kluge to suppress a .P at the start of an li.
341 st.at_first_tag_in_li = False
343 if not st.at_first_tag_in_dd:
344 st.man_out.append(st.p_macro)
346 st.at_first_tag_in_li = True
347 lstate = st.list_state[-1]
351 st.man_out.append(".IP o\n")
353 st.man_out.append(".IP " + str(lstate) + ".\n")
354 st.list_state[-1] += 1
355 elif tag == 'blockquote':
356 st.man_out.append(".RS 4\n")
359 st.man_out.append(st.p_macro + ".nf\n")
360 elif tag == 'code' and not st.in_pre:
362 st.txt += BOLD_FONT[0]
363 elif tag == 'strong' or tag == 'b':
364 st.txt += BOLD_FONT[0]
365 elif tag == 'em' or tag == 'i':
367 tag = 'u' # Change it into underline to be more like the manpage
368 st.txt += UNDR_FONT[0]
371 for var, val in attrs_list:
373 start = int(val) # We only support integers.
376 st.man_out.append(".RS\n")
380 st.list_state.append('dl')
382 st.list_state.append(start)
383 st.man_out.append(st.p_macro)
386 st.man_out.append(st.p_macro)
388 st.man_out.append(".RS\n")
390 st.list_state.append('o')
392 st.man_out.append(".l\n")
393 st.html_out.append("<hr />")
397 for var, val in attrs_list:
399 if val.startswith(('https://', 'http://', 'mailto:', 'ftp:')):
402 st.man_out.append(manify(st.txt.strip()) + "\n")
403 st.man_out.append(".UR " + val + "\n")
406 st.a_href_external = True
408 pg, tgt = val.split('#', 1)
409 if pg and pg not in VALID_PAGES or '#' in tgt:
410 st.bad_hashtags.add(val)
411 elif tgt in ('', 'opt', 'dopt'):
413 st.a_href_external = False
415 st.referenced_hashtags.add(tgt)
416 if tgt in st.latest_targets:
417 warn('Found link to the current section in', self.fn + ':', val)
418 elif val not in VALID_PAGES:
419 st.bad_hashtags.add(val)
420 st.a_txt_start = len(st.txt)
421 st.html_out.append('<' + tag + ''.join(' ' + var + '="' + htmlify(val) + '"' for var, val in attrs_list) + '>')
422 st.at_first_tag_in_dd = False
425 def handle_endtag(self, tag):
428 self.output_debug('END', (tag,))
431 if tag in CONSUMES_TXT or st.dt_from == tag:
440 if tgt.startswith('NEWS for '):
441 m = VERSION_RE.search(tgt)
444 st.target_suf = '-' + tgt
445 self.add_targets(tag, tgt)
447 st.man_out.append(st.p_macro + '.SH "' + manify(txt) + '"\n')
448 self.add_targets(tag, txt, st.target_suf)
449 st.opt_prefix = 'dopt' if txt == 'DAEMON OPTIONS' else 'opt'
451 st.man_out.append(st.p_macro + '.SS "' + manify(txt) + '"\n')
452 self.add_targets(tag, txt, st.target_suf)
454 if st.dt_from == 'p':
456 st.man_out.append('.IP "' + manify(txt) + '"\n')
457 if txt.startswith(BOLD_FONT[0]):
458 self.add_targets(tag, txt)
461 st.man_out.append(manify(txt) + "\n")
463 if st.list_state[-1] == 'dl':
464 if st.at_first_tag_in_li:
465 die("Invalid 0. -> td translation")
468 st.man_out.append(manify(txt) + "\n")
469 st.at_first_tag_in_li = False
470 elif tag == 'blockquote':
471 st.man_out.append(".RE\n")
474 st.man_out.append(manify(txt) + "\n.fi\n")
475 elif (tag == 'code' and not st.in_pre):
477 add_to_txt = NORM_FONT[0]
478 elif tag == 'strong' or tag == 'b':
479 add_to_txt = NORM_FONT[0]
480 elif tag == 'em' or tag == 'i':
482 tag = 'u' # Change it into underline to be more like the manpage
483 add_to_txt = NORM_FONT[0]
484 elif tag == 'ol' or tag == 'ul':
485 if st.list_state.pop() == 'dl':
488 st.man_out.append(".RE\n")
491 st.at_first_tag_in_dd = False
495 if st.a_href_external:
496 st.txt = st.txt.strip()
497 if args.force_link_text or st.a_href != st.txt:
498 st.man_out.append(manify(st.txt) + "\n")
499 st.man_out.append(".UE\n") # This might get replaced with a punctuation version in handle_UE()
500 st.after_a_tag = True
501 st.a_href_external = False
504 atxt = st.txt[st.a_txt_start:]
505 find = 'href="' + st.a_href + '"'
506 for j in range(len(st.html_out)-1, 0, -1):
507 if find in st.html_out[j]:
508 pg, tgt = st.a_href.split('#', 1)
509 derived = txt2target(atxt, tgt)
511 if derived in st.latest_targets:
512 warn('Found link to the current section in', self.fn + ':', st.a_href)
513 st.derived_hashtags.add((tgt, atxt))
514 st.html_out[j] = st.html_out[j].replace(find, 'href="' + pg + '#' + derived + '"')
517 die('INTERNAL ERROR: failed to find href in html data:', find)
518 st.html_out.append('</' + tag + '>')
524 if st.dt_from == tag:
525 st.man_out.append('.IP "' + manify(txt) + '"\n')
526 st.html_out.append('</dt><dd>')
527 st.at_first_tag_in_dd = True
530 st.html_out.append('<dd>')
531 st.at_first_tag_in_dd = True
534 def handle_data(self, txt):
537 warn('Malformed link in', self.fn + ':', txt)
539 self.output_debug('DATA', (txt,))
543 txt = SPACE_DOUBLE_DASH_RE.sub(NBR_SPACE[0] + r'--\1', txt).replace('--', NBR_DASH[0]*2)
544 txt = NON_SPACE_SINGLE_DASH_RE.sub(r'\1' + NBR_DASH[0], txt)
547 txt = WHITESPACE_RE.sub(NBR_SPACE[0], txt)
548 html = html.replace(NBR_DASH[0], '-').replace(NBR_SPACE[0], ' ') # <code> is non-breaking in CSS
549 st.html_out.append(html.replace(NBR_SPACE[0], ' ').replace(NBR_DASH[0], '-⁠'))
553 def add_targets(self, tag, txt, suf=None):
555 tag = '<' + tag + '>'
556 targets = CODE_BLOCK_RE.findall(txt)
561 txt = txt2target(txt, st.opt_prefix)
566 if txt in st.created_hashtags:
567 for j in range(2, 1000):
568 chk = txt + '-' + str(j)
569 if chk not in st.created_hashtags:
570 print('Made link target unique:', chk)
575 while st.html_out[tag_pos] != tag:
577 st.html_out[tag_pos] = tag[:-1] + ' id="' + txt + '">'
578 st.html_out.append('<a href="#' + txt + '" class="tgt"></a>')
579 tag_pos -= 1 # take into account the append
581 st.html_out[tag_pos] = '<span id="' + txt + '"></span>' + st.html_out[tag_pos]
582 st.created_hashtags.add(txt)
583 st.latest_targets = targets
586 def output_debug(self, event, extra):
590 st = argparse.Namespace(**vars(st))
591 if len(st.html_out) > 2:
592 st.html_out = ['...'] + st.html_out[-2:]
593 if len(st.man_out) > 2:
594 st.man_out = ['...'] + st.man_out[-2:]
596 pprint.PrettyPrinter(indent=2).pprint(vars(st))
599 def txt2target(txt, opt_prefix):
600 txt = txt.strip().rstrip(':')
601 m = CODE_BLOCK_RE.search(txt)
604 txt = NBR_DASH_RE.sub('-', txt)
605 txt = BIN_CHARS_RE.sub('', txt)
606 txt = INVALID_TARGET_CHARS_RE.sub('_', txt)
607 if opt_prefix and txt.startswith('-'):
608 txt = opt_prefix + txt
610 txt = INVALID_START_CHAR_RE.sub(r't\1', txt)
615 return MANIFY_LINESTART_RE.sub(r'\&\1', txt.replace('\\', '\\\\')
616 .replace(NBR_SPACE[0], NBR_SPACE[1])
617 .replace(NBR_DASH[0], NBR_DASH[1])
618 .replace(NORM_FONT[0], NORM_FONT[1])
619 .replace(BOLD_FONT[0], BOLD_FONT[1])
620 .replace(UNDR_FONT[0], UNDR_FONT[1]))
624 return txt.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"')
628 print(*msg, file=sys.stderr)
638 if __name__ == '__main__':
639 parser = argparse.ArgumentParser(description="Convert markdown into html and (optionally) nroff. Each input filename must have a .md suffix, which is changed to .html for the output filename. If the input filename ends with .num.md (e.g. foo.1.md) then a nroff file is also output with the input filename's .md suffix removed (e.g. foo.1).", add_help=False)
640 parser.add_argument('--test', action='store_true', help="Just test the parsing without outputting any files.")
641 parser.add_argument('--dest', metavar='DIR', help="Create files in DIR instead of the current directory.")
642 parser.add_argument('--force-link-text', action='store_true', help="Don't remove the link text if it matches the link href. Useful when nroff doesn't understand .UR and .UE.")
643 parser.add_argument('--debug', '-D', action='count', default=0, help='Output copious info on the html parsing. Repeat for even more.')
644 parser.add_argument("--help", "-h", action="help", help="Output this help message and exit.")
645 parser.add_argument("mdfiles", metavar='FILE.md', nargs='+', help="One or more .md files to convert.")
646 args = parser.parse_args()
650 md_parser = cmarkgfm.markdown_to_html
651 gfm_parser = cmarkgfm.github_flavored_markdown_to_html
655 md_parser = html_via_commonmark
657 die("Failed to find cmarkgfm or commonmark for python3.")