Now support for nested paths. Better section detection.

This commit is contained in:
spf13 2013-08-13 19:39:24 -04:00
parent 6e27239485
commit b4bcc591e4
3 changed files with 72 additions and 56 deletions

View file

@ -19,18 +19,23 @@ import (
)
type Node struct {
Url string
Permalink template.HTML
RSSlink template.HTML
Site SiteInfo
layout string
Data map[string]interface{}
Section string
Slug string
Title string
Description string
Keywords []string
Date time.Time
UrlPath
}
type UrlPath struct {
Url string
Permalink template.HTML
Slug string
Section string
Path string
}
func (n *Node) GetSection() string {

View file

@ -81,34 +81,82 @@ func (p Pages) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Pages) Sort() { sort.Sort(p) }
func (p Pages) Limit(n int) Pages { return p[0:n] }
func initializePage(filename string) (page Page) {
page = Page{contentType: "",
// TODO abstract further to support loading from more
// than just files on disk. Should load reader (file, []byte)
func NewPage(filename string) *Page {
page := Page{contentType: "",
File: File{FileName: filename, Extension: "html"},
Node: Node{Keywords: make([]string, 10, 30)},
Params: make(map[string]interface{}),
Markup: "md"}
page.Date, _ = time.Parse("20060102", "20080101")
page.setSection()
return page
return &page
}
func (p *Page) setSection() {
x := strings.Split(p.FileName, string(os.PathSeparator))
func (page *Page) Initalize() error {
err := page.setUrlPath()
if err != nil {
return err
}
err = page.buildPageFromFile()
if err != nil {
return err
}
page.analyzePage()
return nil
}
func (p *Page) setUrlPath() error {
y := strings.TrimPrefix(p.FileName, p.Site.Config.GetAbsPath(p.Site.Config.ContentDir))
x := strings.Split(y, string(os.PathSeparator))
if len(x) <= 1 {
return errors.New("Zero length page name")
}
p.Section = strings.Trim(x[1], "/\\")
p.Path = strings.Trim(strings.Join(x[:len(x)-1], string(os.PathSeparator)), "/\\")
return nil
}
// If Url is provided it is assumed to be the complete relative path
// and will override everything
// Otherwise path + slug is used if provided
// Lastly path + filename is used if provided
func (p *Page) setOutFile() {
// Always use Url if it's specified
if len(strings.TrimSpace(p.Url)) > 2 {
p.OutFile = strings.TrimSpace(p.Url)
return
}
if section := x[len(x)-2]; section != "content" {
p.Section = section
var outfile string
if len(strings.TrimSpace(p.Slug)) > 0 {
// Use Slug if provided
if p.Site.Config.UglyUrls {
outfile = p.Slug + "." + p.Extension
} else {
outfile = p.Slug + slash + "index." + p.Extension
}
} else {
// Fall back to filename
_, t := filepath.Split(p.FileName)
if p.Site.Config.UglyUrls {
outfile = replaceExtension(strings.TrimSpace(t), p.Extension)
} else {
file, _ := fileExt(strings.TrimSpace(t))
outfile = file + slash + "index." + p.Extension
}
}
p.OutFile = p.Path + string(os.PathSeparator) + strings.TrimSpace(outfile)
}
func (page *Page) Type() string {
if page.contentType != "" {
return page.contentType
}
page.setSection()
page.setUrlPath()
if x := page.GetSection(); x != "" {
return x
}
@ -136,7 +184,7 @@ func ReadFrom(buf io.Reader, name string) (page *Page, err error) {
return nil, errors.New("Zero length page name")
}
p := initializePage(name)
p := NewPage(name)
if err = p.parse(buf); err != nil {
return
@ -144,22 +192,7 @@ func ReadFrom(buf io.Reader, name string) (page *Page, err error) {
p.analyzePage()
return &p, nil
}
// TODO should return errors as well
// TODO new page should return just a page
// TODO initalize separately... load from reader (file, or []byte)
func NewPage(filename string) *Page {
p := initializePage(filename)
if err := p.buildPageFromFile(); err != nil {
fmt.Println(err)
}
p.analyzePage()
return &p
return p, nil
}
func (p *Page) analyzePage() {

View file

@ -263,7 +263,8 @@ func (s *Site) CreatePages() {
page := NewPage(fileName)
page.Site = s.Info
page.Tmpl = s.Tmpl
s.setOutFile(page)
page.Initalize()
page.setOutFile()
if s.Config.BuildDrafts || !page.Draft {
s.Pages = append(s.Pages, page)
}
@ -361,30 +362,7 @@ func (s *Site) RenderPages() error {
func (s *Site) WritePages() {
for _, p := range s.Pages {
s.WritePublic(p.Section+slash+p.OutFile, p.RenderedContent.Bytes())
}
}
func (s *Site) setOutFile(p *Page) {
if len(strings.TrimSpace(p.Slug)) > 0 {
// Use Slug if provided
if s.Config.UglyUrls {
p.OutFile = strings.TrimSpace(p.Slug + "." + p.Extension)
} else {
p.OutFile = strings.TrimSpace(p.Slug + slash + "index.html")
}
} else if len(strings.TrimSpace(p.Url)) > 2 {
// Use Url if provided & Slug missing
p.OutFile = strings.TrimSpace(p.Url)
} else {
// Fall back to filename
_, t := filepath.Split(p.FileName)
if s.Config.UglyUrls {
p.OutFile = replaceExtension(strings.TrimSpace(t), p.Extension)
} else {
file, _ := fileExt(strings.TrimSpace(t))
p.OutFile = file + slash + "index." + p.Extension
}
s.WritePublic(p.OutFile, p.RenderedContent.Bytes())
}
}