build.py
  1  import markdown
  2  import os
  3  import sys
  4  import shutil
  5  
  6  packages = {
  7      "rns": "rns-1.0.3-py3-none-any.whl",
  8      "nomadnet": "nomadnet-0.9.1-py3-none-any.whl",
  9      "lxmf": "lxmf-0.9.3-py3-none-any.whl",
 10      "rnsh": "rnsh-0.1.5-py3-none-any.whl",
 11  }
 12  
 13  DEFAULT_TITLE = "RNode Bootstrap Console"
 14  SOURCES_PATH="./source"
 15  BUILD_PATH="./build"
 16  PACKAGES_PATH = "../../dist_archive"
 17  RNS_SOURCE_PATH = "../../Reticulum"
 18  INPUT_ENCODING="utf-8"
 19  OUTPUT_ENCODING="utf-8"
 20  
 21  LXMF_ADDRESS = "8dd57a738226809646089335a6b03695"
 22  
 23  document_start = """
 24  <!DOCTYPE html>
 25  <html>
 26  <head>
 27  <link rel="stylesheet" href="{ASSET_PATH}css/water.css?v=4">
 28  <link rel="shortcut icon" type="image/x-icon" href="{ASSET_PATH}gfx/icon.png">
 29  <meta charset="utf-8"/>
 30  <title>{PAGE_TITLE}</title>
 31  <meta name="viewport" content="width=device-width, initial-scale=1.0">
 32  </head>
 33  <body>
 34  <div id="load_overlay" style="background-color:#2a2a2f; position:absolute; top:0px; left:0px; width:100%; height:100%; z-index:2000;"></div>
 35  <span class="logo">RNode Console</span>
 36  {MENU}<hr>"""
 37  
 38  document_end = """</body></html>"""
 39  
 40  menu_md = """<center markdown=\"1\"><span class="menu">[Start]({CONTENT_PATH}index.html) | [Replicate]({CONTENT_PATH}replicate.html) | [Software]({CONTENT_PATH}software.html) | [Learn]({CONTENT_PATH}learn.html) | [Help](help.html) | [Contribute]({CONTENT_PATH}contribute.html)</span></center>"""
 41  
 42  manual_redirect = """
 43  <!DOCTYPE html>
 44  <html>
 45  <head>
 46  <meta http-equiv="refresh" content="0; url=/m/index.html">
 47  </head>
 48  </html>
 49  """
 50  help_redirect = """
 51  <!DOCTYPE html>
 52  <html>
 53  <head>
 54  <meta http-equiv="refresh" content="0; url=/help.html">
 55  </head>
 56  </html>
 57  """
 58  
 59  url_maps = [
 60      # { "path": "", "target": "/.md"},    
 61  ]
 62  
 63  def scan_pages(base_path):
 64      files = [file for file in os.listdir(base_path) if os.path.isfile(os.path.join(base_path, file)) and file[:1] != "."]
 65      directories = [file for file in os.listdir(base_path) if os.path.isdir(os.path.join(base_path, file)) and file[:1] != "."]
 66  
 67      page_sources = []
 68  
 69      for file in files:
 70          if file.endswith(".md"):
 71              page_sources.append(base_path+"/"+file)
 72  
 73      for directory in directories:
 74          page_sources.extend(scan_pages(base_path+"/"+directory))
 75  
 76      return page_sources
 77  
 78  def get_prop(md, prop):
 79      try:
 80          pt = "["+prop+"]: <> ("
 81          pp = md.find(pt)
 82          if pp != -1:
 83              ps = pp+len(pt)
 84              pe = md.find(")", ps)
 85              return md[ps:pe]
 86          else:
 87              return None
 88  
 89      except Exception as e:
 90          print("Error while extracting topic property: "+str(e))
 91          return None
 92  
 93  def list_topic(topic):
 94      base_path = SOURCES_PATH+"/"+topic
 95      files = [file for file in os.listdir(base_path) if os.path.isfile(os.path.join(base_path, file)) and file[:1] != "." and file != "index.md"]
 96      
 97      topic_entries = []
 98      for file in files:
 99          if file.endswith(".md"):
100              fp = base_path+"/"+file
101              f = open(fp, "rb")
102              link_path = fp.replace(SOURCES_PATH, ".").replace(".md", ".html")
103  
104              md = f.read().decode(INPUT_ENCODING)
105              topic_entries.append({
106                  "title": get_prop(md, "title"),
107                  "image": get_prop(md, "image"),
108                  "date": get_prop(md, "date"),
109                  "excerpt": get_prop(md, "excerpt"),
110                  "md": md,
111                  "file": link_path
112              })
113  
114      topic_entries.sort(key=lambda e: e["date"], reverse=True)
115      return topic_entries
116  
117  def render_topic(topic_entries):
118      md = ""
119      for topic in topic_entries:
120          md += "<a class=\"topic_link\" href=\""+str(topic["file"])+"\">"
121          md += "<span class=\"topic\">"
122          md += "<img class=\"topic_image\" src=\""+str(topic["image"])+"\"/>"
123          md += "<span class=\"topic_title\">"+str(topic["title"])+"</span>"
124          #md += "<span class=\"topic_date\">"+str(topic["date"])+"</span>"
125          md += "<span class=\"topic_excerpt\">"+str(topic["excerpt"])+"</span>"
126          md += "</span>"
127          md += "</a>"
128  
129  
130      return md
131  
132  def generate_html(f, root_path):
133      md = f.read().decode(INPUT_ENCODING)
134  
135      page_title = get_prop(md, "title")
136      if page_title == None:
137          page_title = DEFAULT_TITLE
138      else:
139          page_title += " | "+DEFAULT_TITLE
140      
141      tt = "{TOPIC:"
142      tp = md.find(tt)
143      if tp != -1:
144          ts = tp+len(tt)
145          te = md.find("}", ts)
146          topic = md[ts:te]
147          
148          rt = tt+topic+"}"
149          tl = render_topic(list_topic(topic))
150          print("Found topic: "+str(topic)+", rt "+str(rt))
151          md = md.replace(rt, tl)
152  
153      menu_html = markdown.markdown(menu_md.replace("{CONTENT_PATH}", root_path), extensions=["md_in_html", "markdown.extensions.fenced_code", "sane_lists"]).replace("<p></p>", "")
154      page_html = markdown.markdown(md, extensions=["md_in_html", "markdown.extensions.fenced_code"]).replace("{ASSET_PATH}", root_path)
155      page_html = page_html.replace("{LXMF_ADDRESS}", LXMF_ADDRESS)
156      for pkg_name in packages:
157          page_html = page_html.replace("{PKG_"+pkg_name+"}", "pkg/"+pkg_name+".zip")
158          page_html = page_html.replace("{PKG_BASE_"+pkg_name+"}", pkg_name+".zip")
159          page_html = page_html.replace("{PKG_NAME_"+pkg_name+"}", packages[pkg_name])
160  
161      page_date = get_prop(md, "date")
162      if page_date != None:
163          page_html = page_html.replace("{DATE}", page_date)
164  
165      return document_start.replace("{ASSET_PATH}", root_path).replace("{MENU}", menu_html).replace("{PAGE_TITLE}", page_title) + page_html + document_end
166  
167  source_files = scan_pages(SOURCES_PATH)
168  
169  mf = open(BUILD_PATH+"/m.html", "w")
170  mf.write(manual_redirect)
171  mf.close()
172  mf = open(BUILD_PATH+"/h.html", "w")
173  mf.write(help_redirect)
174  mf.close()
175  
176  def optimise_manual(path):
177      pm = 180
178      scale_imgs = [
179          ("_images/board_rnodev2.png", pm),
180          ("_images/board_rnode.png", pm),
181          ("_images/board_heltec32v20.png", pm),
182          ("_images/board_heltec32v30.png", pm),
183          ("_images/board_heltec32v4.png", pm),
184          ("_images/board_t3v21.png", pm),
185          ("_images/board_t3v20.png", pm),
186          ("_images/board_t3v10.png", pm),
187          ("_images/board_t3s3.png", pm),
188          ("_images/board_tbeam.png", pm),
189          ("_images/board_tdeck.png", pm),
190          ("_images/board_rak4631.png", pm),
191          ("_images/board_tbeam_supreme.png", pm),
192          ("_images/sideband_devices.webp", pm),
193          ("_images/nomadnet_3.png", pm),
194          ("_images/meshchat_1.webp", pm),
195          ("_images/radio_is5ac.png", pm),
196          ("_images/radio_rblhg5.png", pm),
197          ("_static/rns_logo_512.png", 256),
198          ("../images/bg_h_1.webp", pm),
199      ]
200  
201      import subprocess
202      import shlex
203      for i,s in scale_imgs:
204          fp = path+"/"+i
205          input_file = fp
206          output_file = input_file
207          resize = "convert "+input_file+" -quality 25 -resize "+str(s)+" "+output_file
208          print(resize)
209          subprocess.call(shlex.split(resize), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
210          # if output_file != input_file and os.path.isfile(input_file): os.unlink(input_file)
211  
212      remove_files = [
213          "objects.inv",
214          "Reticulum Manual.pdf",
215          "Reticulum Manual.epub",
216          "_static/styles/furo.css.map",
217          "_static/scripts/furo.js.map",
218          "_static/jquery-3.6.0.js",
219          "_static/jquery.js",
220          "static/underscore-1.13.1.js",
221          "_static/_sphinx_javascript_frameworks_compat.js",
222          "_static/scripts/furo.js.LICENSE.txt",
223          "_static/styles/furo-extensions.css.map",
224          "_images/board_rak4631.png",
225          "_images/board_rnodev2.png",
226          "_images/board_t114.png",
227          "_images/board_t3s3.png",
228          "_images/board_t3v10.png",
229          "_images/board_t3v20.png",
230          "_images/board_t3v21.png",
231          "_images/board_tbeam.png",
232          "_images/board_tdeck.png",
233          "_images/board_techo.png",
234          "_images/board_tbeam_supreme.png",
235          "_images/board_opencomxl.png",
236          "_images/board_heltec32v20.png",
237          "_images/board_heltec32v30.png",
238          # "_static/pygments.css",
239          # "_static/language_data.js",
240          # "_static/searchtools.js",
241          # "searchindex.js",
242      ]
243      for file in remove_files:
244          fp = path+"/"+file
245          print("Removing file: "+str(fp))
246          try:
247              os.unlink(fp)
248          except Exception as e:
249              print("An error occurred while attempting to unlink "+str(fp)+": "+str(e))
250  
251      remove_dirs = [
252          "_sources",
253      ]
254      for d in remove_dirs:
255          fp = path+"/"+d
256          print("Removing dir: "+str(fp))
257          shutil.rmtree(fp)
258  
259      shutil.move(path, BUILD_PATH+"/m")
260  
261  def fetch_reticulum_site():
262      r_site_path = BUILD_PATH+"/r"
263      if not os.path.isdir(r_site_path):
264          shutil.copytree(PACKAGES_PATH+"/reticulum.network", r_site_path)
265      if os.path.isdir(r_site_path+"/manual"):
266          optimise_manual(r_site_path+"/manual")
267      remove_files = [
268          "gfx/reticulum_logo_512.png",
269      ]
270      for file in remove_files:
271          fp = r_site_path+"/"+file
272          print("Removing file: "+str(fp))
273          os.unlink(fp)
274      replace_paths()
275  
276  def replace_paths():
277      repls = [
278          ("gfx/reticulum_logo_512.png", "/m/_static/rns_logo_512.png")
279      ]
280      for root, dirs, files in os.walk(BUILD_PATH):
281          for file in files:
282              fpath = root+"/"+file
283              if fpath.endswith(".html"):
284                  print("Performing replacements in "+fpath+"")
285                  f = open(fpath, "rb")
286                  html = f.read().decode("utf-8")
287                  f.close()
288                  for s,r in repls:
289                      html = html.replace(s,r)
290                  f = open(fpath, "wb")
291                  f.write(html.encode("utf-8"))
292                  f.close()
293  
294                  # if not os.path.isdir(BUILD_PATH+"/d"):
295                  #     os.makedirs(BUILD_PATH+"/d")
296                  # shutil.move(fpath, BUILD_PATH+"/d/")
297  
298  
299  def remap_names():
300      for root, dirs, files in os.walk(BUILD_PATH):
301          for file in files:
302              fpath = root+"/"+file
303              spath = fpath.replace(BUILD_PATH, "")
304              if len(spath) > 31:
305                  print("Path "+spath+" is too long, remapping...")
306                  if not os.path.isdir(BUILD_PATH+"/d"):
307                      os.makedirs(BUILD_PATH+"/d")
308                  shutil.move(fpath, BUILD_PATH+"/d/")
309  
310              
311  
312  def gz_all():
313      import gzip
314      for root, dirs, files in os.walk(BUILD_PATH):
315          for file in files:
316              fpath = root+"/"+file
317              print("Gzipping "+fpath+"...")
318              f = open(fpath, "rb")
319              g = gzip.open(fpath+".gz", "wb")
320              g.writelines(f)
321              g.close()
322              f.close()
323              os.unlink(fpath)
324  
325  from zipfile import ZipFile
326  for pkg_name in packages:
327      pkg_file = packages[pkg_name]
328      pkg_full_path = PACKAGES_PATH+"/"+pkg_file
329      if os.path.isfile(pkg_full_path):
330          print("Including "+pkg_file)
331          z = ZipFile(BUILD_PATH+"/pkg/"+pkg_name+".zip", "w")
332          z.write(pkg_full_path, pkg_full_path[len(PACKAGES_PATH+"/"):])
333          z.close()
334          # shutil.copy(pkg_full_path, BUILD_PATH+"/"+pkg_name)
335  
336      else:
337          print("Could not find "+pkg_full_path)
338          exit(1)
339  
340  for um in url_maps:
341      with open(SOURCES_PATH+"/"+um["target"], "rb") as f:
342          of = BUILD_PATH+um["target"].replace(SOURCES_PATH, "").replace(".md", ".html")
343          root_path = "../"
344          html = generate_html(f, root_path)
345          
346          print("Map path   : "+str(um["path"]))
347          print("Map target : "+str(um["target"]))
348          print("Mapped root path: "+str(root_path))
349  
350          if not os.path.isdir(BUILD_PATH+"/"+um["path"]):
351              os.makedirs(BUILD_PATH+"/"+um["path"], exist_ok=True)
352  
353          with open(BUILD_PATH+"/"+um["path"]+"/index.html", "wb") as wf:
354              wf.write(html.encode(OUTPUT_ENCODING))
355  
356  for mdf in source_files:
357      with open(mdf, "rb") as f:
358          of = BUILD_PATH+mdf.replace(SOURCES_PATH, "").replace(".md", ".html")
359          root_path = "../"*(len(of.replace(BUILD_PATH+"/", "").split("/"))-1)
360          html = generate_html(f, root_path)
361  
362          if not os.path.isdir(os.path.dirname(of)):
363              os.makedirs(os.path.dirname(of), exist_ok=True)
364  
365          with open(of, "wb") as wf:
366              wf.write(html.encode(OUTPUT_ENCODING))
367  
368  fetch_reticulum_site()
369  if not "--no-gz" in sys.argv:
370      gz_all()
371  
372  if not "--no-remap" in sys.argv:
373      remap_names()