Attachment 'collect-1.1.py'
Download 1 #-*- coding: utf-8 -*-
2 """
3 MoinMoin - collect parser, Version 1.0, 27.06.2014
4
5 A simple parser that reads key:value pairs from a dictionary to combine
6 pages into a collection for printing or saving as a booklet.
7
8 Keywords
9
10 Usage
11
12 History
13
14 Version 1.0 - 14.07.2014: initial version
15 Version 1.1 - 21.06.2015: minor fixes to allowed for no contents page
16
17 @copyright: 2014 Ian Riley <ian@riley.asia>
18
19 Developed with inspiration and code from: keyval.py parser -
20
21 @copyright: 2006 by Matt Cooper <macooper@vt.edu>
22
23 license: GNU GPL, see COPYING for details.
24
25 """
26
27 Dependencies = ["time"]
28
29 from MoinMoin.parser import text_moin_wiki as wiki
30 from MoinMoin.parser import text as raw
31 from MoinMoin import wikiutil
32
33 class Parser:
34 parsername = 'collect'
35
36 def __init__(self, raw, request, **kw):
37 self.raw = raw
38 self.request = request
39 self.page_name = self.request.page.page_name
40 self.args = kw.get('format_args', '')
41
42 def format(self, formatter):
43 eol = "\n"
44 sep = ":: "
45 space = " "
46 vert = "|"
47 hline = '------'
48 heading1 = "= %s =" + eol
49 heading2 = "== %s ==" + eol
50 item = " 1.#%s <<AnchorLink(%s)>>" + eol
51 item_text = " 1.#%s <<AnchorLink(%s|%s)>>" + eol
52 page_break = eol + "<<PageBreak>>" + eol
53 toc = "<<TableOfContents(%s)>>" + eol
54 anchor = "<<Anchor(%s)>>" + eol
55 anchor_link = "<<AnchorLink(%s|%s)>>" + eol
56 get_toc = "<<GetTOC(%s, %s)>>" + eol
57 embed = "<<EmbeddedPage(%s)>>" + eol #change href URLs to anchors
58 http_link = "` `<<HttpLink(%s)>>" + eol # insert external link
59 msg_raw ="## MoinMoin markup generated by collect parser" + eol
60
61 # set option defaults
62 collection_heading = ''
63 paged = True
64 contents = ''
65 contents_depth = -1
66 pages = []
67 page_texts = []
68 page_headings = []
69 page_anchors = []
70 page_urls = []
71
72 args = self.args.split(space)
73
74 results = ''
75 lines = self.raw.split(eol)
76 for line in lines:
77 if line:
78 # handle dict lines
79 if (line[0] == space) and (sep in line):
80 key, val = (line.lstrip(space)).split(sep)
81 if key == 'heading':
82 collection_heading = val
83 elif key == 'paged':
84 paged = (val.lower() == 'yes')
85 if not paged:
86 page_break = hline + eol
87 elif key == 'contents':
88 contents = val.lower()
89 if contents == 'contents':
90 contents_depth = 0
91 elif key == 'contents-depth':
92 try:
93 contents_depth = int(val)
94 except ValueError:
95 contents_depth = -1
96 elif key == 'page':
97 page_name = val
98 page_text = ''
99 if vert in val:
100 page_name, page_text = page_name.split(vert,1)
101 page_name = wikiutil.AbsPageName(self.page_name, page_name)
102 pages.append(page_name)
103 page_texts.append(page_text)
104 page_headings.append(False)
105 page_urls.append(False)
106 page_anchors.append(wikiutil.anchor_name_from_text(val))
107 elif key == 'page-heading':
108 page_headings[-1] = (val.lower() == 'yes')
109 elif key == 'page-url':
110 page_urls[-1] = (val.lower() == 'yes')
111
112 #build collection coverpage
113 if collection_heading:
114 results += heading1 % collection_heading
115 for i, c_name in enumerate(pages):
116 c_item = ''
117 c_toc = ''
118 c_url = ''
119 c_name_anchor = c_name + '.top'
120 if contents == 'list': # list of <<AnchorLink()>>
121 c_item = item_text % (str(i+1), c_name_anchor, c_name)
122 if page_texts[i]:
123 c_item = item_text % (str(i+1), c_name_anchor, page_texts[i])
124 if page_urls[i]:
125 c_url = http_link % c_name
126 elif contents == 'contents': # list of <<Contents()>>
127 c_item = heading2 % (c_name)
128 if page_texts[i]:
129 c_item = heading2 % (page_texts[i])
130 c_toc = get_toc % (c_name, str(contents_depth))
131 if page_urls[i]:
132 c_url = http_link % c_name
133
134 results += c_item + c_toc + c_url
135
136 #build collection
137 top_link = anchor_link % ('collection.top', '[Top]')
138 for i, c_name in enumerate(pages):
139 c_anchor = anchor % (c_name + '.top')
140 c_embed = embed % (','.join([str(i)] + pages))
141 c_heading = ''
142 if page_headings[i]:
143 c_heading = heading1 % pages[i]
144 if page_texts[i]:
145 c_heading = heading1 % page_texts[i]
146 if results:
147 results += page_break+ top_link + c_anchor
148 results += c_heading + c_embed
149
150 if not paged: # add a final horizontal line
151 results += page_break
152
153 del lines
154
155 if results:
156 results = (anchor % 'collection.top') + results
157 if 'raw' in args:
158 results = msg_raw + results
159 wikiizer = raw.Parser(results, self.request)
160 else:
161 wikiizer = wiki.Parser(results, self.request)
162 wikiizer.format(formatter)
You are not allowed to attach a file to this page.