Attachment 'collect-1.0.py'
Download 1 #-*- coding: utf-8 -*-
2 """
3 MoinMoin - collect parser, Version 1.0, 27.06.2014
4
5 A simple parser that reads key:value pairs from a dictionary to combine
6 pages into a collection for printing or saving as a booklet.
7
8 Keywords
9
10 Usage
11
12 History
13
14 Version 1.0 - 14.07.2014: initial version
15
16 @copyright: 2014 Ian Riley <ian@riley.asia>
17
18 Developed with inspiration and code from: keyval.py parser -
19
20 @copyright: 2006 by Matt Cooper <macooper@vt.edu>
21
22 license: GNU GPL, see COPYING for details.
23
24 """
25
26 Dependencies = ["time"]
27
28 from MoinMoin.parser import text_moin_wiki as wiki
29 from MoinMoin.parser import text as raw
30 from MoinMoin import wikiutil
31
32 class Parser:
33 parsername = 'collect'
34
35 def __init__(self, raw, request, **kw):
36 self.raw = raw
37 self.request = request
38 self.page_name = self.request.page.page_name
39 self.args = kw.get('format_args', '')
40
41 def format(self, formatter):
42 eol = "\n"
43 sep = ":: "
44 space = " "
45 vert = "|"
46 hline = '------'
47 heading1 = "= %s =" + eol
48 heading2 = "== %s ==" + eol
49 item = " 1.#%s <<AnchorLink(%s)>>" + eol
50 item_text = " 1.#%s <<AnchorLink(%s|%s)>>" + eol
51 page_break = eol + "<<PageBreak>>" + eol
52 toc = "<<TableOfContents(%s)>>" + eol
53 anchor = "<<Anchor(%s)>>" + eol
54 anchor_link = "<<AnchorLink(%s|%s)>>" + eol
55 get_toc = "<<GetTOC(%s, %s)>>" + eol
56 embed = "<<EmbeddedPage(%s)>>" + eol #change href URLs to anchors
57 http_link = "` `<<HttpLink(%s)>>" + eol # insert external link
58 msg_raw ="## MoinMoin markup generated by collect parser" + eol
59
60 # set option defaults
61 collection_heading = ''
62 paged = True
63 contents = ''
64 contents_depth = -1
65 pages = []
66 page_texts = []
67 page_headings = []
68 page_anchors = []
69 page_urls = []
70
71 args = self.args.split(space)
72
73 results = ''
74 lines = self.raw.split(eol)
75 for line in lines:
76 if line:
77 # handle dict lines
78 if (line[0] == space) and (sep in line):
79 key, val = (line.lstrip(space)).split(sep)
80 if key == 'heading':
81 collection_heading = val
82 elif key == 'paged':
83 paged = (val.lower() == 'yes')
84 if not paged:
85 page_break = hline + eol
86 elif key == 'contents':
87 contents = val.lower()
88 if contents == 'contents':
89 contents_depth = 0
90 elif key == 'contents-depth':
91 try:
92 contents_depth = int(val)
93 except ValueError:
94 contents_depth = -1
95 elif key == 'page':
96 page_name = val
97 page_text = ''
98 if vert in val:
99 page_name, page_text = page_name.split(vert,1)
100 page_name = wikiutil.AbsPageName(self.page_name, page_name)
101 pages.append(page_name)
102 page_texts.append(page_text)
103 page_headings.append(False)
104 page_urls.append(False)
105 page_anchors.append(wikiutil.anchor_name_from_text(val))
106 elif key == 'page-heading':
107 page_headings[-1] = (val.lower() == 'yes')
108 elif key == 'page-url':
109 page_urls[-1] = (val.lower() == 'yes')
110
111 #build collection coverpage
112 if collection_heading:
113 results += heading1 % collection_heading
114 for i, c_name in enumerate(pages):
115 c_url = ''
116 c_toc = ''
117 c_name_anchor = c_name + '.top'
118 if contents == 'list': # list of <<AnchorLink()>>
119 c_item = item_text % (str(i+1), c_name_anchor, c_name)
120 if page_texts[i]:
121 c_item = item_text % (str(i+1), c_name_anchor, page_texts[i])
122 if page_urls[i]:
123 c_url = http_link % c_name
124 elif contents == 'contents': # list of <<Contents()>>
125 c_item = heading2 % (c_name)
126 if page_texts[i]:
127 c_item = heading2 % (page_texts[i])
128 c_toc = get_toc % (c_name, str(contents_depth))
129 if page_urls[i]:
130 c_url = http_link % c_name
131
132 results += c_item + c_toc + c_url
133
134 #build collection
135 top_link = anchor_link % ('collection.top', '[Top]')
136 for i, c_name in enumerate(pages):
137 c_anchor = anchor % (c_name + '.top')
138 c_embed = embed % (','.join([str(i)] + pages))
139 c_heading = ''
140 if page_headings[i]:
141 c_heading = heading1 % pages[i]
142 if page_texts[i]:
143 c_heading = heading1 % page_texts[i]
144
145 results += page_break + c_anchor + top_link + c_heading + c_embed
146 if not paged: # add a final horizontal line
147 results += page_break
148
149 del lines
150
151 if results:
152 results = (anchor % 'collection.top') + results
153 if 'raw' in args:
154 results = msg_raw + results
155 wikiizer = raw.Parser(results, self.request)
156 else:
157 wikiizer = wiki.Parser(results, self.request)
158 wikiizer.format(formatter)
You are not allowed to attach a file to this page.