1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
|
#!@PYTHON@
## This is www_post.py. This script is the main stage
## of toplevel GNUmakefile local-WWW-post target.
# USAGE: www_post PACKAGE_NAME TOPLEVEL_VERSION OUTDIR TARGETS
# please call me from top of the source directory
import sys
import os
import re
import langdefs
import mirrortree
import postprocess_html
package_name, package_version, outdir, targets = sys.argv[1:]
targets = targets.split (' ')
outdir = os.path.normpath (outdir)
doc_dirs = ['input', 'Documentation', outdir]
target_pattern = os.path.join (outdir, '%s-root')
# these redirection pages allow to go back to the documentation index
# from HTML manuals/snippets page
static_files = {
os.path.join (outdir, 'index.html'):
'''<META HTTP-EQUIV="refresh" content="0;URL=Documentation/web/index.html">
<html>
<head>
<title>Redirecting...</title>
<meta name="author" content="This file was autogenerated by %s">
</head>
<body>Redirecting to the documentation index...</body>
</html>
''' % sys.argv[0],
os.path.join (outdir, 'VERSION'):
package_version + '\n',
}
for f, contents in static_files.items ():
open (f, 'w').write (contents)
sys.stderr.write ("Mirroring...\n")
dirs, symlinks, files = mirrortree.walk_tree (
tree_roots = doc_dirs,
process_dirs = outdir,
exclude_dirs = '(^|/)((' + \
r'po|xref-maps|out|out-test|out-cov|.*?[.]t2d|\w*?-root)|^Documentation/(' + \
'|'.join ([l.code for l in langdefs.LANGUAGES]) + '))(/|$)',
find_files = r'.*?\.(?:midi|html|pdf|png|jpe?g|txt|i?ly|signature|css|zip|js|..\.idx|php)$|VERSION',
exclude_files = r'lily-[0-9a-f]+.*\.(pdf|txt)')
# extra files: info and tex output from lilypond-book regtests
extra_files = mirrortree.walk_tree (
tree_roots = ['input/regression/lilypond-book'],
process_dirs = outdir,
exclude_dirs = r'(^|/)(out|out-test)(/|$)',
find_files = r'.+\.(info|tex)$',
exclude_files = r'lily-[0-9a-f]+.*\.tex')[2]
files.extend(extra_files)
# actual mirrorring stuff
html_files = []
hardlinked_files = []
# These whitelisted files actually do contain the string
# 'UNTRANSLATED NODE: IGNORE ME' for documentation purposes.
whitelisted_files = [
'Documentation/out-www/contributor-big-page.html',
'Documentation/out-www/contributor/website-build.html',
]
for f in files:
if f.endswith ('.html'):
if f in whitelisted_files or not 'UNTRANSLATED NODE: IGNORE ME' in open (f).read ():
html_files.append (f)
else:
hardlinked_files.append (f)
dirs = [re.sub ('/' + outdir, '', d) for d in dirs]
while outdir in dirs:
dirs.remove (outdir)
dirs = list (set (dirs))
dirs.sort ()
strip_file_name = {}
strip_re = re.compile (outdir + '/')
for t in targets:
out_root = target_pattern % t
strip_file_name[t] = lambda s: os.path.join (target_pattern % t, (strip_re.sub ('', s)))
if not os.path.exists (out_root):
os.mkdir (out_root)
for d in dirs:
new_dir = os.path.join (out_root, d)
if not os.path.exists (new_dir):
os.mkdir (new_dir)
for f in hardlinked_files:
if not os.path.isfile (strip_file_name[t] (f)):
os.link (f, strip_file_name[t] (f))
for l in symlinks:
p = mirrortree.new_link_path (os.path.normpath (os.readlink (l)), os.path.dirname (l), strip_re)
dest = strip_file_name[t] (l)
if not os.path.lexists (dest):
os.symlink (p, dest)
# need this for content negotiation with documentation index
if 'online' in targets:
f = open (os.path.join (target_pattern % 'online', 'Documentation/.htaccess'), 'w')
f.write ('#.htaccess\nDirectoryIndex index\n')
f.close ()
postprocess_html.build_pages_dict (html_files)
for t in targets:
sys.stderr.write ("Processing HTML pages for %s target...\n" % t)
postprocess_html.process_html_files (
package_name = package_name,
package_version = package_version,
target = t,
name_filter = strip_file_name[t])
|