diff --git a/app/kgpz.go b/app/kgpz.go index 3a94be8..9507939 100644 --- a/app/kgpz.go +++ b/app/kgpz.go @@ -19,21 +19,12 @@ const ( PIECES_DIR = "XML/beitraege/" ) -type Library struct { - Agents *providers.AgentProvider - Places *providers.PlaceProvider - Works *providers.WorkProvider - Categories *providers.CategoryProvider - Issues *providers.IssueProvider - Pieces *providers.PieceProvider -} - type KGPZ struct { - lmu sync.Mutex - gmu sync.Mutex - Config *providers.ConfigProvider - Repo *providers.GitProvider - Library + lmu sync.Mutex + gmu sync.Mutex + Config *providers.ConfigProvider + Repo *providers.GitProvider + Library *providers.Library } func (k *KGPZ) Init() { @@ -53,17 +44,77 @@ func NewKGPZ(config *providers.ConfigProvider) *KGPZ { return &KGPZ{Config: config} } +func (k *KGPZ) Serialize() { + // TODO: this is error handling from hell + // There is no need to recreate the whole library if the paths haven't changed + // We do it to keep the old data if the new data is missing + + // Preventing pulling and serializing at the same time + k.gmu.Lock() + defer k.gmu.Unlock() + + issues, err := getXMLFiles(filepath.Join(k.Config.FolderPath, ISSUES_DIR)) + helpers.MaybePanic(err, "Error getting issues") + + pieces, err := getXMLFiles(filepath.Join(k.Config.FolderPath, PIECES_DIR)) + helpers.MaybePanic(err, "Error getting pieces") + + lib := providers.NewLibrary( + []string{filepath.Join(k.Config.FolderPath, AGENTS_PATH)}, + []string{filepath.Join(k.Config.FolderPath, PLACES_PATH)}, + []string{filepath.Join(k.Config.FolderPath, WORKS_PATH)}, + []string{filepath.Join(k.Config.FolderPath, CATEGORIES_PATH)}, + *issues, + *pieces) + + lib.Serialize() + + // TODO: is it neccessary to lock here, sice gmu lock prevents concurrent locking of the library? + k.lmu.Lock() + defer k.lmu.Unlock() + + if k.Library == nil { + k.Library = lib + return + } + + if lib.Agents == nil { + lib.Agents = k.Library.Agents + } + + if lib.Places == nil { + lib.Places = k.Library.Places + } + + if lib.Works == nil { + lib.Works = k.Library.Works + } + + if lib.Categories == nil { + lib.Categories = k.Library.Categories + } + + if lib.Issues == nil { + lib.Issues = k.Library.Issues + } + + if lib.Pieces == nil { + lib.Pieces = k.Library.Pieces + } + + k.Library = lib +} + func (k *KGPZ) IsDebug() bool { return k.Config.Debug } func (k *KGPZ) Pull() { - // TODO: what happens if the application quits mid-pull? - // We need to make sure to exit gracefully go func() { k.gmu.Lock() - defer k.gmu.Unlock() + if k.Repo == nil { + k.gmu.Unlock() return } @@ -72,11 +123,13 @@ func (k *KGPZ) Pull() { helpers.LogOnErr(&k.Repo, err, "Error pulling repo") } + // Need to unlock here to prevent deadlock, since Serialize locks the same mutex + k.gmu.Unlock() + if changed { if k.IsDebug() { helpers.LogOnDebug(&k.Repo, "GitProvider changed") } - // Locking is handled in Serialize() k.Serialize() } }() @@ -99,161 +152,6 @@ func (k *KGPZ) initRepo() { } } -// This panics if the data cant be read, and there is no data read -func (k *KGPZ) Serialize() { - new := Library{} - - wg := sync.WaitGroup{} - wg.Add(6) - - go func() { - defer wg.Done() - new.Agents = k.InitAgents() - }() - - go func() { - defer wg.Done() - new.Places = k.InitPlaces() - }() - - go func() { - defer wg.Done() - new.Works = k.InitWorks() - }() - - go func() { - defer wg.Done() - new.Categories = k.InitCategories() - }() - - go func() { - defer wg.Done() - new.Issues = k.InitIssues() - }() - - go func() { - defer wg.Done() - new.Pieces = k.InitPieces() - }() - - wg.Wait() - - k.lmu.Lock() - defer k.lmu.Unlock() - k.Library = new -} - -// TODO: on error, we need to log the error, and use stale data to recover gracefully -// If Repo != nil we can try the last commit; if k != nil we can try the last data -func (k *KGPZ) InitAgents() *providers.AgentProvider { - ap := providers.NewAgentProvider([]string{filepath.Join(k.Config.FolderPath, AGENTS_PATH)}) - if err := ap.Load(); err != nil { - helpers.LogOnErr(&ap, err, "Error loading agents") - k.lmu.Lock() - ap.Items = k.Agents.Items - k.lmu.Unlock() - // TODO: mark as stale - } - - if k.Config.LogData { - helpers.LogOnDebug(&ap, "AgentProvider") - } - - return ap -} - -func (k *KGPZ) InitPlaces() *providers.PlaceProvider { - pp := providers.NewPlaceProvider([]string{filepath.Join(k.Config.FolderPath, PLACES_PATH)}) - if err := pp.Load(); err != nil { - helpers.LogOnErr(&pp, err, "Error loading places") - k.lmu.Lock() - pp.Items = k.Places.Items - k.lmu.Unlock() - // TODO: mark as stale - } - - if k.Config.LogData { - helpers.LogOnDebug(&pp, "PlaceProvider") - } - - return pp -} - -func (k *KGPZ) InitWorks() *providers.WorkProvider { - wp := providers.NewWorkProvider([]string{filepath.Join(k.Config.FolderPath, WORKS_PATH)}) - if err := wp.Load(); err != nil { - helpers.LogOnErr(&wp, err, "Error loading works") - k.lmu.Lock() - wp.Items = k.Works.Items - k.lmu.Unlock() - // TODO: mark as stale - } - - if k.Config.LogData { - helpers.LogOnDebug(&wp, "WorkProvider") - } - - return wp -} - -func (k *KGPZ) InitCategories() *providers.CategoryProvider { - cp := providers.NewCategoryProvider([]string{filepath.Join(k.Config.FolderPath, CATEGORIES_PATH)}) - if err := cp.Load(); err != nil { - helpers.LogOnErr(&cp, err, "Error loading categories") - k.lmu.Lock() - cp.Items = k.Categories.Items - k.lmu.Unlock() - } - - if k.Config.LogData { - helpers.LogOnDebug(&cp, "CategoryProvider") - } - - return cp -} - -func (k *KGPZ) InitIssues() *providers.IssueProvider { - files, err := getXMLFiles(filepath.Join(k.Config.FolderPath, ISSUES_DIR)) - - helpers.MaybePanic(err, "Error getting issues files") - - cp := providers.NewIssueProvider(*files) - if err := cp.Load(); err != nil { - helpers.LogOnErr(&cp, err, "Error loading issues") - k.lmu.Lock() - cp.Items = k.Issues.Items - k.lmu.Unlock() - // TODO: mark as stale - } - - if k.Config.LogData { - helpers.LogOnDebug(&cp, "IssueProvider") - } - - return cp -} - -func (k *KGPZ) InitPieces() *providers.PieceProvider { - files, err := getXMLFiles(filepath.Join(k.Config.FolderPath, PIECES_DIR)) - - helpers.MaybePanic(err, "Error getting pieces files") - - cp := providers.NewPieceProvider(*files) - if err := cp.Load(); err != nil { - helpers.LogOnErr(&cp, err, "Error loading pieces") - k.lmu.Lock() - cp.Items = k.Pieces.Items - k.lmu.Unlock() - // TODO: mark as stale - } - - if k.Config.LogData { - helpers.LogOnDebug(&cp, "PieceProvider") - } - - return cp -} - func (k *KGPZ) Shutdown() { k.Repo.Wait() } diff --git a/providers/xmlprovider.go b/providers/xmlprovider.go index 5aee31d..97a070c 100644 --- a/providers/xmlprovider.go +++ b/providers/xmlprovider.go @@ -19,7 +19,88 @@ type XMLProvider[T KGPZXML[T]] struct { Items T } -func (p *XMLProvider[T]) Load() error { +type Library struct { + Agents *AgentProvider + Places *PlaceProvider + Works *WorkProvider + Categories *CategoryProvider + Issues *IssueProvider + Pieces *PieceProvider +} + +func NewLibrary(agentpaths, placepaths, workpaths, categorypaths, issuepaths, piecepaths []string) *Library { + return &Library{ + Agents: NewAgentProvider(agentpaths), + Places: NewPlaceProvider(placepaths), + Works: NewWorkProvider(workpaths), + Categories: NewCategoryProvider(categorypaths), + Issues: NewIssueProvider(issuepaths), + Pieces: NewPieceProvider(piecepaths), + } +} + +func (l *Library) Serialize() { + wg := sync.WaitGroup{} + wg.Add(6) + + go func() { + defer wg.Done() + err := l.Agents.Serialize() + if err != nil { + l.Agents = nil + fmt.Println(err) + } + }() + + go func() { + defer wg.Done() + err := l.Places.Serialize() + if err != nil { + l.Places = nil + fmt.Println(err) + } + }() + + go func() { + defer wg.Done() + err := l.Works.Serialize() + if err != nil { + l.Works = nil + fmt.Println(err) + } + }() + + go func() { + defer wg.Done() + err := l.Categories.Serialize() + if err != nil { + l.Categories = nil + fmt.Println(err) + } + }() + + go func() { + defer wg.Done() + err := l.Issues.Serialize() + if err != nil { + l.Issues = nil + fmt.Println(err) + } + }() + + go func() { + defer wg.Done() + err := l.Pieces.Serialize() + if err != nil { + l.Pieces = nil + fmt.Println(err) + } + }() + + wg.Wait() +} + +func (p *XMLProvider[T]) Serialize() error { // Introduce goroutine for every path, locking on append: var wg sync.WaitGroup for _, path := range p.paths { diff --git a/server/server.go b/server/server.go index dc0e8a2..caca617 100644 --- a/server/server.go +++ b/server/server.go @@ -7,6 +7,7 @@ import ( "github.com/Theodor-Springmann-Stiftung/kgpz_web/app" "github.com/Theodor-Springmann-Stiftung/kgpz_web/providers" + "github.com/Theodor-Springmann-Stiftung/kgpz_web/views" "github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2/middleware/logger" @@ -16,6 +17,8 @@ import ( const ( REQUEST_TIMEOUT = 8 * time.Second SERVER_TIMEOUT = 8 * time.Second + + STATIC_PREFIX = "/assets" ) // INFO: Server is a meta-package that handles the current router, which it starts in a goroutine. @@ -59,6 +62,7 @@ func (s *Server) Start() { } srv.Use(recover.New()) + srv.Use(STATIC_PREFIX, static(&views.StaticFS, STATIC_PREFIX)) srv.Get("/", func(c *fiber.Ctx) error { return c.SendString("I'm a GET request!") diff --git a/server/services.go b/server/services.go new file mode 100644 index 0000000..6d186f9 --- /dev/null +++ b/server/services.go @@ -0,0 +1,18 @@ +package server + +import ( + "io/fs" + "net/http" + + "github.com/gofiber/fiber/v2" + "github.com/gofiber/fiber/v2/middleware/filesystem" +) + +func static(fs *fs.FS) fiber.Handler { + return filesystem.New(filesystem.Config{ + Root: http.FS(*fs), + Browse: false, + Index: "index.html", + MaxAge: 3600, + }) +}