diff --git a/cinera/cinera.c b/cinera/cinera.c index 7a12cb0..aae1a59 100644 --- a/cinera/cinera.c +++ b/cinera/cinera.c @@ -26,6 +26,7 @@ version CINERA_APP_VERSION = { .Patch = 6 }; +#include #include // NOTE(matt): varargs #include // NOTE(matt): printf, sprintf, vsprintf, fprintf, perror #include // NOTE(matt): calloc, malloc, free @@ -84,6 +85,7 @@ typedef uint64_t bool; #define DEBUG 0 #define DEBUG_MEM 0 +//#define DEBUG_THREADING // //// @@ -134,6 +136,170 @@ clock_t TIMING_START; #define MAX(A, B) A > B ? A : B #define Clamp(EndA, N, EndB) int Min = MIN(EndA, EndB); int Max = MAX(EndA, EndB); if(N < Min) { N = Min; } else if(N > Max) { N = Max; } +#ifdef DEBUG_THREADING +#define DebugPrint(...) fprintf(stderr, "%s[%s/%u]%s ", ColourStrings[CS_BLACK_BOLD], __FILE__, __LINE__, ColourStrings[CS_END]); fprintf(stderr, __VA_ARGS__) +#else +#define DebugPrint(...) +#endif + +char *JobTypeStrings[] = +{ + 0, + "DB FREE", + "DB INIT", + "DB SHUTDOWN", + "IO SHUTDOWN", +}; + +typedef enum +{ + JTI_NULL, + JTI_DB_FREE, + JTI_DB_INIT, + JTI_DB_SHUTDOWN, + JTI_IO_SHUTDOWN, +} job_type_id; + +typedef struct +{ + job_type_id Type; + void *InputData; + void *OutputDest; + pthread_cond_t Complete; + bool Valid; +} job; + +#define MAX_JOB_COUNT 4 + +typedef struct +{ + job Job[MAX_JOB_COUNT]; + int NextJobToTake; + pthread_mutex_t NextJobToTakeMutex; + int NextJobToFill; + pthread_mutex_t NextJobToFillMutex; + pthread_cond_t Populated; +} job_queue; + +job_queue JobQueue; + +void +PrintJob(job J) +{ + fprintf(stderr, "\"%s\"\n", JobTypeStrings[J.Type]); +#if 0 + switch(J.Type) + { + case JTI_FILE_IO_ADD: + { + printf("%.*s %d\n", MAX_CINERA_FILENAME_LENGTH, J.FileIOData.Filename, J.FileIOData.Filesize); + } break; + case JTI_FILE_IO_GET: + { + printf("%.*s\n", MAX_CINERA_FILENAME_LENGTH, J.FileIOData.Filename); + } break; + case JTI_UI: + { + printf("%.*s\n", MAX_USERINPUT_STRING_LENGTH, J.UIData.String); + } break; + default: + { + printf("\n"); + } break; + } +#endif +} + +#ifdef DEBUG_THREADING +void +PrintJobQueue(job_queue *Q) +{ + fprintf(stderr, "\n\n"); + for(int i = 0; i < MAX_JOB_COUNT; ++i) + { + fprintf(stderr, "[%d] ", i); + PrintJob(Q->Job[i]); + } +} +#else +#define PrintJobQueue(...) +#endif + +job * +PushJob(job_queue *Q, job_type_id Type, void *InputData, void *OutputDest) +{ + // USAGE: OutputDestination may be either a fixed or variable size. For jobs whose output is a variable size, as + // documented in JobInfo, the caller is to pass an unallocated pointer. The job handler will allocate the + // necessary memory. The caller must then free this memory. + + pthread_mutex_lock(&Q->NextJobToFillMutex); + job *J = &Q->Job[Q->NextJobToFill]; + if(J->Type == JTI_NULL) + { + job Empty = {}; + *J = Empty; +#if 0 + switch(Type) + { + case JTI_FILE_IO_ADD: + { + database_cinera_entry FileIOJobData = *(database_cinera_entry *)InputData; + ClearCopyStringNoTerminate(J->FileIOData.Filename, sizeof(J->FileIOData.Filename), Wrap0i(FileIOJobData.Filename, MAX_CINERA_FILENAME_LENGTH)); + J->FileIOData.Filesize = FileIOJobData.Filesize; + } break; + case JTI_FILE_IO_GET: + { + ClearCopyStringNoTerminate(J->FileIOData.Filename, sizeof(J->FileIOData.Filename), Wrap0i(InputData, MAX_CINERA_FILENAME_LENGTH)); + } break; + case JTI_UI: + { + database_userinput_entry UIJobData = *(database_userinput_entry *)InputData; + ClearCopyStringNoTerminate(J->UIData.String, sizeof(J->UIData.String), Wrap0i(UIJobData.String, MAX_USERINPUT_STRING_LENGTH)); + } break; + } +#endif + Q->NextJobToFill = (Q->NextJobToFill + 1) % MAX_JOB_COUNT; + J->Type = Type; + J->OutputDest = OutputDest; + } + else + { + J = 0; + printf("Error: Job queue full"); + } + + pthread_cond_signal(&Q->Populated); + pthread_mutex_unlock(&Q->NextJobToFillMutex); + pthread_cond_init(&J->Complete, NULL); + //PrintJobQueue(Q); + return J; +} + +bool +QueueIsPopulated(job_queue *Q) +{ + job *J = &Q->Job[Q->NextJobToTake]; + bool Result = J->Type != JTI_NULL; + return Result; +} + +job * +TakeJob(job_queue *Q) +{ + //pthread_mutex_lock(&Q->NextJobToTakeMutex); + job *J = &Q->Job[Q->NextJobToTake]; + if(J->Type != JTI_NULL) + { + Q->NextJobToTake = (Q->NextJobToTake + 1) % MAX_JOB_COUNT; + } + else + { + printf("Error: Job queue empty"); + } + //pthread_mutex_unlock(&Q->NextJobToTakeMutex); + return J; +} + typedef int32_t hash32; typedef hash32 asset_hash; @@ -3173,7 +3339,7 @@ edit_type EditTypes[] = void LogUsage(buffer *Buffer) { -#if DEBUG +#if 0 // NOTE(matt): Stack-string char LogPath[256]; CopyString(LogPath, "%s/%s", CurrentProject->CacheDir, "buffers.log"); @@ -3288,8 +3454,8 @@ ClaimBuffer(buffer *Buffer, buffer_id ID, int Size) Buffer->Ptr = Buffer->Location; #if DEBUG float PercentageUsed = (float)(MemoryArena.Ptr - MemoryArena.Location) / MemoryArena.Size * 100; - printf(" ClaimBuffer(%s): %d\n" - " Total ClaimedMemory: %ld (%.2f%%, leaving %ld free)\n\n", Buffer->ID, Buffer->Size, MemoryArena.Ptr - MemoryArena.Location, PercentageUsed, MemoryArena.Size - (MemoryArena.Ptr - MemoryArena.Location)); + printf(" ClaimBuffer(%s): %ld\n" + " Total ClaimedMemory: %ld (%.2f%%, leaving %ld free)\n\n", BufferIDStrings[Buffer->ID], Buffer->Size, MemoryArena.Ptr - MemoryArena.Location, PercentageUsed, MemoryArena.Size - (MemoryArena.Ptr - MemoryArena.Location)); #endif return RC_SUCCESS; } @@ -3302,10 +3468,10 @@ DeclaimBuffer(buffer *Buffer) float PercentageUsed = (float)(Buffer->Ptr - Buffer->Location) / Buffer->Size * 100; #if DEBUG printf("DeclaimBuffer(%s)\n" - " Used: %ld / %d (%.2f%%)\n" + " Used: %li / %ld (%.2f%%)\n" "\n" " Total ClaimedMemory: %ld\n\n", - Buffer->ID, + BufferIDStrings[Buffer->ID], Buffer->Ptr - Buffer->Location, Buffer->Size, PercentageUsed, @@ -3342,8 +3508,8 @@ RewindBuffer(buffer *Buffer) #if DEBUG float PercentageUsed = (float)(Buffer->Ptr - Buffer->Location) / Buffer->Size * 100; printf("Rewinding %s\n" - " Used: %ld / %d (%.2f%%)\n\n", - Buffer->ID, + " Used: %ld / %ld (%.2f%%)\n\n", + BufferIDStrings[Buffer->ID], Buffer->Ptr - Buffer->Location, Buffer->Size, PercentageUsed); @@ -5095,11 +5261,19 @@ ReadSearchPageIntoBuffer(file *File, string *BaseDir, string *SearchLocation) return ReadFileIntoBuffer(File); } +string +GetGlobalSearchPageLocation(void) +{ + string Result = {}; + db_block_projects *ProjectsBlock = DB.Metadata.Signposts.ProjectsBlock.Ptr ? DB.Metadata.Signposts.ProjectsBlock.Ptr : LocateBlock(B_PROJ); + Result = Wrap0i(ProjectsBlock->GlobalSearchDir, sizeof(ProjectsBlock->GlobalSearchDir)); + return Result; +} + rc ReadGlobalSearchPageIntoBuffer(file *File) { - db_block_projects *ProjectsBlock = DB.Metadata.Signposts.ProjectsBlock.Ptr ? DB.Metadata.Signposts.ProjectsBlock.Ptr : LocateBlock(B_PROJ); - string SearchLocationL = Wrap0i(ProjectsBlock->GlobalSearchDir, sizeof(ProjectsBlock->GlobalSearchDir)); + string SearchLocationL = GetGlobalSearchPageLocation(); File->Path = ConstructHTMLIndexFilePath(0, &SearchLocationL, 0); return ReadFileIntoBuffer(File); } @@ -5673,23 +5847,33 @@ UpdateAssetInDB(asset *Asset) if(StoredAsset) { - StoredAsset->Associated = Asset->Associated; - StoredAsset->Variants = Asset->Variants; - StoredAsset->Width = Asset->Dimensions.Width; - StoredAsset->Height = Asset->Dimensions.Height; + bool Changed = FALSE; + if(StoredAsset->Associated != Asset->Associated || + StoredAsset->Variants != Asset->Variants || + StoredAsset->Width != Asset->Dimensions.Width || + StoredAsset->Height != Asset->Dimensions.Height) + { + StoredAsset->Associated = Asset->Associated; + StoredAsset->Variants = Asset->Variants; + StoredAsset->Width = Asset->Dimensions.Width; + StoredAsset->Height = Asset->Dimensions.Height; + + Changed = TRUE; + } if(StoredAsset->Hash != Asset->Hash) { + // NOTE(matt): Extra-db code start char OldChecksum[16]; ClearCopyString(OldChecksum, sizeof(OldChecksum), "%08x", StoredAsset->Hash); char NewChecksum[16]; ClearCopyString(NewChecksum, sizeof(NewChecksum), "%08x", Asset->Hash); + // NOTE(matt): Extra-db code end StoredAsset->Hash = Asset->Hash; - char *Ptr = (char *)Asset; - Ptr += sizeof(*Asset); + // NOTE(matt): Extra-db code start file AssetFile = {}; AssetFile.Path = ConstructAssetPath(&AssetFile, Wrap0i(StoredAsset->Filename, sizeof(StoredAsset->Filename)), StoredAsset->Type); ResolvePath(&AssetFile.Path); @@ -5712,14 +5896,20 @@ UpdateAssetInDB(asset *Asset) } FreeString(&Message); + // NOTE(matt): Extra-db code end + Changed = TRUE; + } + + if(Changed) + { DB.Metadata.File.Handle = fopen(DB.Metadata.File.Path, "w"); fwrite(DB.Metadata.File.Buffer.Location, DB.Metadata.File.Buffer.Size, 1, DB.Metadata.File.Handle); SetFileEditPosition(&DB.Metadata); CycleSignpostedFile(&DB.Metadata); - Asset->DeferredUpdate = FALSE; } + Asset->DeferredUpdate = FALSE; } else { @@ -7636,8 +7826,9 @@ enable syntax highlighting:")); fprintf(stderr, "\n"); } +#define PrintHelp() PrintHelp_(Args[0], ConfigPath) void -PrintHelp(char *BinaryLocation, char *DefaultConfigPath) +PrintHelp_(char *BinaryLocation, char *DefaultConfigPath) { // Options fprintf(stderr, @@ -7660,7 +7851,7 @@ PrintHelp(char *BinaryLocation, char *DefaultConfigPath) } void -PrintHelp_(char *BinaryLocation) +PrintHelp_Old(char *BinaryLocation) { #if AFE fprintf(stderr, @@ -8515,10 +8706,10 @@ typedef struct } neighbours; void -ExamineDB1(file File) +ExamineDB1(file *File) { database1 LocalDB; - LocalDB.Header = *(db_header1 *)File.Buffer.Location; + LocalDB.Header = *(db_header1 *)File->Buffer.Location; printf("Current:\n" "\tDBVersion: %d\n" "\tAppVersion: %d.%d.%d\n" @@ -8532,23 +8723,23 @@ ExamineDB1(file File) LocalDB.Header.EntryCount); - File.Buffer.Ptr = File.Buffer.Location + sizeof(LocalDB.Header); + File->Buffer.Ptr = File->Buffer.Location + sizeof(LocalDB.Header); for(int EntryIndex = 0; EntryIndex < LocalDB.Header.EntryCount; ++EntryIndex) { - LocalDB.Entry = *(db_entry1 *)File.Buffer.Ptr; + LocalDB.Entry = *(db_entry1 *)File->Buffer.Ptr; printf(" %3d\t%s%sSize: %4d\n", EntryIndex + 1, LocalDB.Entry.BaseFilename, StringLength(LocalDB.Entry.BaseFilename) > 8 ? "\t" : "\t\t", // NOTE(matt): Janktasm LocalDB.Entry.Size); - File.Buffer.Ptr += sizeof(LocalDB.Entry); + File->Buffer.Ptr += sizeof(LocalDB.Entry); } } void -ExamineDB2(file File) +ExamineDB2(file *File) { database2 LocalDB; - LocalDB.Header = *(db_header2 *)File.Buffer.Location; + LocalDB.Header = *(db_header2 *)File->Buffer.Location; printf("Current:\n" "\tDBVersion: %d\n" "\tAppVersion: %d.%d.%d\n" @@ -8568,23 +8759,23 @@ ExamineDB2(file File) LocalDB.Header.EntryCount); - File.Buffer.Ptr = File.Buffer.Location + sizeof(LocalDB.Header); + File->Buffer.Ptr = File->Buffer.Location + sizeof(LocalDB.Header); for(int EntryIndex = 0; EntryIndex < LocalDB.Header.EntryCount; ++EntryIndex) { - LocalDB.Entry = *(db_entry2 *)File.Buffer.Ptr; + LocalDB.Entry = *(db_entry2 *)File->Buffer.Ptr; printf(" %3d\t%s%sSize: %4d\n", EntryIndex + 1, LocalDB.Entry.BaseFilename, StringLength(LocalDB.Entry.BaseFilename) > 8 ? "\t" : "\t\t", // NOTE(matt): Janktasm LocalDB.Entry.Size); - File.Buffer.Ptr += sizeof(LocalDB.Entry); + File->Buffer.Ptr += sizeof(LocalDB.Entry); } } void -ExamineDB3(file File) +ExamineDB3(file *File) { database3 LocalDB; - LocalDB.Header = *(db_header3 *)File.Buffer.Location; + LocalDB.Header = *(db_header3 *)File->Buffer.Location; printf("Current:\n" "\tDBVersion: %d\n" "\tAppVersion: %d.%d.%d\n" @@ -8623,10 +8814,10 @@ ExamineDB3(file File) LocalDB.Header.EntryCount); - File.Buffer.Ptr = File.Buffer.Location + sizeof(LocalDB.Header); + File->Buffer.Ptr = File->Buffer.Location + sizeof(LocalDB.Header); for(int EntryIndex = 0; EntryIndex < LocalDB.Header.EntryCount; ++EntryIndex) { - LocalDB.Entry = *(db_entry3 *)File.Buffer.Ptr; + LocalDB.Entry = *(db_entry3 *)File->Buffer.Ptr; printf(" %3d\t%s%sSize: %4d\t%d\t%d\t%d\t%d\n" "\t %s\n", EntryIndex + 1, LocalDB.Entry.BaseFilename, @@ -8637,18 +8828,18 @@ ExamineDB3(file File) LocalDB.Entry.LinkOffsets.NextStart, LocalDB.Entry.LinkOffsets.NextEnd, LocalDB.Entry.Title); - File.Buffer.Ptr += sizeof(LocalDB.Entry); + File->Buffer.Ptr += sizeof(LocalDB.Entry); } } void -ExamineDB4(file File) +ExamineDB4(file *File) { #if AFE database4 LocalDB; - LocalDB.Header = *(db_header4 *)File.Buffer.Location; + LocalDB.Header = *(db_header4 *)File->Buffer.Location; - LocalDB.EntriesHeader = *(db_header_entries4 *)(File.Buffer.Location + sizeof(LocalDB.Header)); + LocalDB.EntriesHeader = *(db_header_entries4 *)(File->Buffer.Location + sizeof(LocalDB.Header)); printf("Current:\n" "\tDBVersion: %d\n" "\tAppVersion: %d.%d.%d\n" @@ -8693,10 +8884,10 @@ ExamineDB4(file File) LocalDB.EntriesHeader.Count); - File.Buffer.Ptr = File.Buffer.Location + sizeof(LocalDB.Header) + sizeof(LocalDB.EntriesHeader); + File->Buffer.Ptr = File->Buffer.Location + sizeof(LocalDB.Header) + sizeof(LocalDB.EntriesHeader); for(int EntryIndex = 0; EntryIndex < LocalDB.EntriesHeader.Count; ++EntryIndex) { - LocalDB.Entry = *(db_entry4 *)File.Buffer.Ptr; + LocalDB.Entry = *(db_entry4 *)File->Buffer.Ptr; printf(" %3d\t%s%sSize: %4d\t%d\t%d\t%d\t%d\n" "\t %s\n", EntryIndex, LocalDB.Entry.BaseFilename, @@ -8707,11 +8898,11 @@ ExamineDB4(file File) LocalDB.Entry.LinkOffsets.NextStart, LocalDB.Entry.LinkOffsets.NextEnd, LocalDB.Entry.Title); - File.Buffer.Ptr += sizeof(LocalDB.Entry); + File->Buffer.Ptr += sizeof(LocalDB.Entry); } - LocalDB.AssetsHeader = *(db_header_assets4 *)File.Buffer.Ptr; - File.Buffer.Ptr += sizeof(LocalDB.AssetsHeader); + LocalDB.AssetsHeader = *(db_header_assets4 *)File->Buffer.Ptr; + File->Buffer.Ptr += sizeof(LocalDB.AssetsHeader); printf( "\n" "Asset Root Directory: %s\n" "Asset Root URL: %s\n" @@ -8735,7 +8926,7 @@ ExamineDB4(file File) for(int AssetIndex = 0; AssetIndex < LocalDB.AssetsHeader.Count; ++AssetIndex) { - LocalDB.Asset = *(db_asset4*)File.Buffer.Ptr; + LocalDB.Asset = *(db_asset4*)File->Buffer.Ptr; printf("\n" "%s\n" "Type: %s\n" @@ -8746,11 +8937,11 @@ ExamineDB4(file File) LocalDB.Asset.Hash, LocalDB.Asset.LandmarkCount); - File.Buffer.Ptr += sizeof(LocalDB.Asset); + File->Buffer.Ptr += sizeof(LocalDB.Asset); for(int LandmarkIndex = 0; LandmarkIndex < LocalDB.Asset.LandmarkCount; ++LandmarkIndex) { - LocalDB.Landmark = *(db_landmark *)File.Buffer.Ptr; - File.Buffer.Ptr += sizeof(LocalDB.Landmark); + LocalDB.Landmark = *(db_landmark *)File->Buffer.Ptr; + File->Buffer.Ptr += sizeof(LocalDB.Landmark); printf(" %d•%s%d%s", LocalDB.Landmark.EntryIndex, ColourStrings[CS_MAGENTA], LocalDB.Landmark.Position, ColourStrings[CS_END]); } printf("\n"); @@ -8912,14 +9103,14 @@ PrintProjectsBlock(db_block_projects *B) } void -ExamineDB5(file File) +ExamineDB5(file *File) { - File.Buffer.Ptr = File.Buffer.Location; + File->Buffer.Ptr = File->Buffer.Location; database5 LocalDB; - if(File.Buffer.Size >= sizeof(LocalDB.Header)) + if(File->Buffer.Size >= sizeof(LocalDB.Header)) { - LocalDB.Header = *(db_header5 *)File.Buffer.Ptr; + LocalDB.Header = *(db_header5 *)File->Buffer.Ptr; fprintf(stderr, "\n"); PrintC(CS_BLUE_BOLD, "Versions"); fprintf(stderr, @@ -8941,24 +9132,24 @@ ExamineDB5(file File) LocalDB.Header.InitialAppVersion.Major, LocalDB.Header.InitialAppVersion.Minor, LocalDB.Header.InitialAppVersion.Patch, LocalDB.Header.InitialDBVersion, LocalDB.Header.InitialHMMLVersion.Major, LocalDB.Header.InitialHMMLVersion.Minor, LocalDB.Header.InitialHMMLVersion.Patch); - File.Buffer.Ptr += sizeof(db_header5); + File->Buffer.Ptr += sizeof(db_header5); for(int i = 0; i < LocalDB.Header.BlockCount; ++i) { - int Four = *(int *)File.Buffer.Ptr; + int Four = *(int *)File->Buffer.Ptr; if(Four == FOURCC("ASET")) { - db_block_assets *AssetsBlock = (db_block_assets *)File.Buffer.Ptr; - File.Buffer.Ptr = PrintAssetsBlock(AssetsBlock); + db_block_assets *AssetsBlock = (db_block_assets *)File->Buffer.Ptr; + File->Buffer.Ptr = PrintAssetsBlock(AssetsBlock); } else if(Four == FOURCC("PROJ")) { - db_block_projects *ProjectsBlock = (db_block_projects *)File.Buffer.Ptr; - File.Buffer.Ptr = PrintProjectsBlock(ProjectsBlock); + db_block_projects *ProjectsBlock = (db_block_projects *)File->Buffer.Ptr; + File->Buffer.Ptr = PrintProjectsBlock(ProjectsBlock); } else { fprintf(stderr, "\n" - "Invalid database file: %s", File.Path); + "Invalid database file: %s", File->Path); break; } } @@ -8968,41 +9159,36 @@ ExamineDB5(file File) } void -ExamineDB(void) +ExamineDB(file *DB) { - DB.Metadata.File.Buffer.ID = BID_DATABASE; - DB.Metadata.File = InitFile(0, &Config->DatabaseLocation, EXT_NULL); - ReadFileIntoBuffer(&DB.Metadata.File); // NOTE(matt): Could we actually catch errors (permissions?) here and bail? - - if(DB.Metadata.File.Buffer.Location) + if(DB->Buffer.Location) { - uint32_t FirstInt = *(uint32_t *)DB.Metadata.File.Buffer.Location; + uint32_t FirstInt = *(uint32_t *)DB->Buffer.Location; if(FirstInt != FOURCC("CNRA")) { switch(FirstInt) { - case 1: ExamineDB1(DB.Metadata.File); break; - case 2: ExamineDB2(DB.Metadata.File); break; - case 3: ExamineDB3(DB.Metadata.File); break; - default: printf("Invalid database file: %s\n", DB.Metadata.File.Path); break; + case 1: ExamineDB1(DB); break; + case 2: ExamineDB2(DB); break; + case 3: ExamineDB3(DB); break; + default: printf("Invalid database file: %s\n", DB->Path); break; } } else { - uint32_t SecondInt = *(uint32_t *)(DB.Metadata.File.Buffer.Location + sizeof(uint32_t)); + uint32_t SecondInt = *(uint32_t *)(DB->Buffer.Location + sizeof(uint32_t)); switch(SecondInt) { - case 4: ExamineDB4(DB.Metadata.File); break; - case 5: ExamineDB5(DB.Metadata.File); break; - default: printf("Invalid database file: %s\n", DB.Metadata.File.Path); break; + case 4: ExamineDB4(DB); break; + case 5: ExamineDB5(DB); break; + default: printf("Invalid database file: %s\n", DB->Path); break; } } } else { - fprintf(stderr, "Unable to open database file %s: %s\n", DB.Metadata.File.Path, strerror(errno)); + fprintf(stderr, "Unable to open database file %s: %s\n", DB->Path, strerror(errno)); } - FreeFile(&DB.Metadata.File); } #define HMMLCleanup() \ @@ -10224,9 +10410,9 @@ HMMLToBuffers(buffers *CollationBuffers, template *BespokeTemplate, string BaseF #if DEBUG printf( "================================================================================\n" - "%s\n" + "%.*s\n" "================================================================================\n", - Filename); + (int)BaseFilename.Length, BaseFilename.Base); #endif // NOTE(matt): Tree structure of "global" buffer dependencies // Master @@ -11228,7 +11414,7 @@ BuffersToHTML(config *C, project *Project, buffers *CollationBuffers, template * MEM_TEST_TOP("BuffersToHTML()"); #if DEBUG printf("\n\n --- Buffer Collation ---\n" - " %s\n\n\n", OutputPath ? OutputPath : Project->OutLocation); + " %s\n\n\n", OutputPath); #endif #if DEBUG_MEM @@ -15041,50 +15227,53 @@ InitProjectInDBPostamble(project_generations *OldAcc, project_generations *NewAc } void -InsertProjectIntoDB_TopLevel(project_generations *G, db_block_projects **Block, db_header_project **Child, project *P) +InsertProjectIntoDB(project_generations *G, db_block_projects **Block, db_header_project **Parent, db_header_project **Child, project *P) { + bool GotBlock = Block ? TRUE : FALSE; + bool GotParent = Parent ? TRUE : FALSE; + Assert(GotBlock ^ GotParent); + uint64_t Byte = 0; DB.Metadata.File.Handle = fopen(DB.Metadata.File.Path, "w"); - WriteFromByteToPointer(&DB.Metadata.File, &Byte, *Block); - db_block_projects NewBlock = **Block; - ++NewBlock.Count; - fwrite(&NewBlock, sizeof(db_block_projects), 1, DB.Metadata.File.Handle); - Byte += sizeof(db_block_projects); + uint64_t PPos; + if(GotBlock) + { + PPos = (char *)*Block - DB.Metadata.File.Buffer.Location; + WriteFromByteToPointer(&DB.Metadata.File, &Byte, *Block); + + db_block_projects NewBlock = **Block; + ++NewBlock.Count; + fwrite(&NewBlock, sizeof(db_block_projects), 1, DB.Metadata.File.Handle); + Byte += sizeof(db_block_projects); + } + else + { + PPos = (char *)*Parent - DB.Metadata.File.Buffer.Location; + WriteFromByteToPointer(&DB.Metadata.File, &Byte, *Parent); + + db_header_project NewParent = **Parent; + ++NewParent.ChildCount; + fwrite(&NewParent, sizeof(db_header_project), 1, DB.Metadata.File.Handle); + Byte += sizeof(db_header_project); + } WriteFromByteToPointer(&DB.Metadata.File, &Byte, *Child); SetFileEditPosition(&DB.Metadata); project_generations OldG = CopyAccumulator(G); InitProjectInDBRecursively(G, P); - uint64_t BlockPos = (char *)*Block - DB.Metadata.File.Buffer.Location; + *Child = InitProjectInDBPostamble(&OldG, G); - *Block = (db_block_projects *)(DB.Metadata.File.Buffer.Location + BlockPos); - FreeBook(&OldG.EntriesInGeneration); -} - -void -InsertProjectIntoDB(project_generations *G, db_header_project **Parent, db_header_project **Child, project *P) -{ - uint64_t Byte = 0; - - DB.Metadata.File.Handle = fopen(DB.Metadata.File.Path, "w"); - WriteFromByteToPointer(&DB.Metadata.File, &Byte, *Parent); - - db_header_project NewParent = **Parent; - ++NewParent.ChildCount; - fwrite(&NewParent, sizeof(db_header_project), 1, DB.Metadata.File.Handle); - Byte += sizeof(db_header_project); - - WriteFromByteToPointer(&DB.Metadata.File, &Byte, *Child); - SetFileEditPosition(&DB.Metadata); - - project_generations OldG = CopyAccumulator(G); - InitProjectInDBRecursively(G, P); - uint64_t PPos = (char *)*Parent - DB.Metadata.File.Buffer.Location; - *Child = InitProjectInDBPostamble(&OldG, G); - *Parent = (db_header_project *)(DB.Metadata.File.Buffer.Location + PPos); + if(GotBlock) + { + *Block = (db_block_projects *)(DB.Metadata.File.Buffer.Location + PPos); + } + else + { + *Parent = (db_header_project *)(DB.Metadata.File.Buffer.Location + PPos); + } FreeBook(&OldG.EntriesInGeneration); } @@ -15260,66 +15449,68 @@ DeleteProjectInterior(db_header_project **Child, project_generations *G, uint64_ } void -DeleteProject_TopLevel(db_block_projects **Parent, db_header_project **Child, project_generations *G) -{ - PrintFunctionName("DeleteProject_TopLevel()"); - // TODO(matt); Print out something sensible to inform real life users - // TODO(matt): - // - // 0:1 - // 1:4 - uint64_t PPos = (char *)*Parent - DB.Metadata.File.Buffer.Location; - uint64_t CPos = (char *)*Child - DB.Metadata.File.Buffer.Location; - - DB.Metadata.File.Handle = fopen(DB.Metadata.File.Path, "w"); - uint64_t Byte = 0; - - WriteFromByteToPointer(&DB.Metadata.File, &Byte, *Parent); - - db_block_projects NewParent = **Parent; - --NewParent.Count; - - fwrite(&NewParent, sizeof(db_block_projects), 1, DB.Metadata.File.Handle); - Byte += sizeof(db_block_projects); - - DeleteProjectInterior(Child, G, &Byte); - - *Parent = (db_block_projects *)(DB.Metadata.File.Buffer.Location + PPos); - *Child = (db_header_project *)(DB.Metadata.File.Buffer.Location + CPos); -} - -void -DeleteProject(db_header_project **Parent, db_header_project **Child, project_generations *G) +DeleteProject(db_block_projects **Block, db_header_project **Parent, db_header_project **Child, project_generations *G) { PrintFunctionName("DeleteProject()"); + + bool GotBlock = Block ? TRUE : FALSE; + bool GotParent = Parent ? TRUE : FALSE; + Assert(GotBlock ^ GotParent); + // TODO(matt); Print out something sensible to inform real life users // TODO(matt): // // 0:1 // 1:4 - uint64_t PPos = (char *)*Parent - DB.Metadata.File.Buffer.Location; + uint64_t PPos; + if(GotBlock) + { + PPos = (char *)*Block - DB.Metadata.File.Buffer.Location; + } + else + { + PPos = (char *)*Parent - DB.Metadata.File.Buffer.Location; + } uint64_t CPos = (char *)*Child - DB.Metadata.File.Buffer.Location; //db_project_index Index = GetCurrentProjectIndex(G); DB.Metadata.File.Handle = fopen(DB.Metadata.File.Path, "w"); uint64_t Byte = 0; - WriteFromByteToPointer(&DB.Metadata.File, &Byte, *Parent); + if(GotBlock) + { + WriteFromByteToPointer(&DB.Metadata.File, &Byte, *Block); - db_header_project NewParent = **Parent; - --NewParent.ChildCount; + db_block_projects NewBlock = **Block; + --NewBlock.Count; + fwrite(&NewBlock, sizeof(db_block_projects), 1, DB.Metadata.File.Handle); + Byte += sizeof(db_block_projects); + } + else + { + WriteFromByteToPointer(&DB.Metadata.File, &Byte, *Parent); + + db_header_project NewParent = **Parent; + --NewParent.ChildCount; + fwrite(&NewParent, sizeof(db_header_project), 1, DB.Metadata.File.Handle); + Byte += sizeof(db_header_project); + } - fwrite(&NewParent, sizeof(db_header_project), 1, DB.Metadata.File.Handle); - Byte += sizeof(db_header_project); DeleteProjectInterior(Child, G, &Byte); - *Parent = (db_header_project *)(DB.Metadata.File.Buffer.Location + PPos); + if(GotBlock) + { + *Block = (db_block_projects *)(DB.Metadata.File.Buffer.Location + PPos); + } + else + { + *Parent = (db_header_project *)(DB.Metadata.File.Buffer.Location + PPos); + } *Child = (db_header_project *)(DB.Metadata.File.Buffer.Location + CPos); } -void SyncProjects(project_generations *G, project *C, db_header_project **SParent, db_header_project **SChild); -void SyncProjects_TopLevel(project_generations *G, project *C, db_block_projects **SParent, db_header_project **SChild); +void SyncProjects(project_generations *G, project *C, db_block_projects **Block, db_header_project **SParent, db_header_project **SChild); db_landmark * DetermineFirstLandmarkAndRangeOfProjectHierarchy(db_asset *A, @@ -15455,46 +15646,65 @@ ReorganiseProjectsInterior(project_generations *G, project *CChild, uint64_t Chi } bool -ReorganiseProjects(project_generations *G, project *CChild, db_header_project **SParent, uint64_t ChildIndex, db_header_project **SChild) +ReorganiseProjects(project_generations *G, project *CChild, + db_block_projects **Block, db_header_project **SParent, + uint64_t ChildIndex, db_header_project **SChild) { - uint64_t SParentPos = (char *)*SParent - DB.Metadata.File.Buffer.Location; - uint64_t SChildPos = (char *)*SChild - DB.Metadata.File.Buffer.Location; + bool GotBlock = Block ? TRUE : FALSE; + bool GotParent = SParent ? TRUE : FALSE; + Assert(GotBlock ^ GotParent); - bool Result = ReorganiseProjectsInterior(G, CChild, ChildIndex, (*SParent)->ChildCount, *SChild); - - if(Result == TRUE) + uint64_t ParentPos; + uint64_t ChildCount; + if(GotBlock) { - *SParent = (db_header_project *)(DB.Metadata.File.Buffer.Location + SParentPos); - *SChild = (db_header_project *)(DB.Metadata.File.Buffer.Location + SChildPos); - AddEntryToGeneration(G, CChild); - SyncProjects(G, CChild, SParent, SChild); + ParentPos = (char *)*Block - DB.Metadata.File.Buffer.Location; + ChildCount = (*Block)->Count; + } + else + { + ParentPos = (char *)*SParent - DB.Metadata.File.Buffer.Location; + ChildCount = (*SParent)->ChildCount; } - return Result; -} - -bool -ReorganiseProjects_TopLevel(project_generations *G, project *CChild, db_block_projects **SParent, uint64_t ChildIndex, db_header_project **SChild) -{ - uint64_t SParentPos = (char *)*SParent - DB.Metadata.File.Buffer.Location; uint64_t SChildPos = (char *)*SChild - DB.Metadata.File.Buffer.Location; - bool Result = ReorganiseProjectsInterior(G, CChild, ChildIndex, (*SParent)->Count, *SChild); + bool Result = ReorganiseProjectsInterior(G, CChild, ChildIndex, ChildCount, *SChild); if(Result == TRUE) { - *SParent = (db_block_projects *)(DB.Metadata.File.Buffer.Location + SParentPos); *SChild = (db_header_project *)(DB.Metadata.File.Buffer.Location + SChildPos); AddEntryToGeneration(G, CChild); - SyncProjects_TopLevel(G, CChild, SParent, SChild); + + if(GotBlock) + { + *Block = (db_block_projects *)(DB.Metadata.File.Buffer.Location + ParentPos); + } + else + { + *SParent = (db_header_project *)(DB.Metadata.File.Buffer.Location + ParentPos); + } + SyncProjects(G, CChild, Block, SParent, SChild); } return Result; } void -SyncProjects_TopLevel(project_generations *G, project *C, db_block_projects **SParent, db_header_project **SChild) +SyncProjects(project_generations *G, project *C, db_block_projects **Block, db_header_project **SParent, db_header_project **SChild) { + bool GotBlock = Block ? TRUE : FALSE; + bool GotParent = SParent ? TRUE : FALSE; + Assert(GotBlock ^ GotParent); + uint64_t SChildPos = (char *)*SChild - DB.Metadata.File.Buffer.Location; - uint64_t SParentPos = (char *)*SParent - DB.Metadata.File.Buffer.Location; + uint64_t ParentPos; + if(GotBlock) + { + ParentPos = (char *)*Block - DB.Metadata.File.Buffer.Location; + } + else + { + ParentPos = (char *)*SParent - DB.Metadata.File.Buffer.Location; + } IncrementCurrentGeneration(G); @@ -15510,17 +15720,17 @@ SyncProjects_TopLevel(project_generations *G, project *C, db_block_projects **SP { Located = TRUE; AddEntryToGeneration(G, CChild); - SyncProjects(G, CChild, SChild, &SGrandChild); + SyncProjects(G, CChild, 0, SChild, &SGrandChild); } else { - Located = ReorganiseProjects(G, CChild, SChild, ci, &SGrandChild); + Located = ReorganiseProjects(G, CChild, 0, SChild, ci, &SGrandChild); } } if(!Located) { - InsertProjectIntoDB(G, SChild, &SGrandChild, CChild); + InsertProjectIntoDB(G, 0, SChild, &SGrandChild, CChild); } SGrandChild = SkipProjectAndChildren(SGrandChild); @@ -15529,64 +15739,7 @@ SyncProjects_TopLevel(project_generations *G, project *C, db_block_projects **SP uint64_t DeletionCount = (*SChild)->ChildCount - C->Child.ItemCount; for(int DeletionIndex = 0; DeletionIndex < DeletionCount; ++DeletionIndex) { - DeleteProject(SChild, &SGrandChild, G); - } - - if((*SChild)->ChildCount == 0 && (*SChild)->EntryCount == 0) - { - string BaseDir = Wrap0i((*SChild)->BaseDir, sizeof((*SChild)->BaseDir)); - string SearchLocation = Wrap0i((*SChild)->SearchLocation, sizeof((*SChild)->SearchLocation)); - string ProjectID = Wrap0i((*SChild)->ID, sizeof((*SChild)->ID)); - DeleteSearchPageFromFilesystem(BaseDir, SearchLocation, ProjectID); - DeleteLandmarksForSearch(C->Index); - DeleteStaleAssets(); - } - - DecrementCurrentGeneration(G); - *SParent = (db_block_projects *)(DB.Metadata.File.Buffer.Location + SParentPos); - *SChild = (db_header_project *)(DB.Metadata.File.Buffer.Location + SChildPos); -} - -void -SyncProjects(project_generations *G, project *C, db_header_project **SParent, db_header_project **SChild) -{ - uint64_t SChildPos = (char *)*SChild - DB.Metadata.File.Buffer.Location; - uint64_t SParentPos = (char *)*SParent - DB.Metadata.File.Buffer.Location; - - IncrementCurrentGeneration(G); - - db_header_project *SGrandChild = LocateFirstChildProject(*SChild); - - for(int ci = 0; ci < C->Child.ItemCount; ++ci) - { - bool Located = FALSE; - project *CChild = GetPlaceInBook(&C->Child, ci); - if(ci < (*SChild)->ChildCount) - { - if(ConfiguredAndStoredProjectIDsMatch(CChild, SGrandChild)) - { - Located = TRUE; - AddEntryToGeneration(G, CChild); - SyncProjects(G, CChild, SChild, &SGrandChild); - } - else - { - Located = ReorganiseProjects(G, CChild, SChild, ci, &SGrandChild); - } - } - - if(!Located) - { - InsertProjectIntoDB(G, SChild, &SGrandChild, CChild); - } - - SGrandChild = SkipProjectAndChildren(SGrandChild); - } - - uint64_t DeletionCount = (*SChild)->ChildCount - C->Child.ItemCount; - for(int DeletionIndex = 0; DeletionIndex < DeletionCount; ++DeletionIndex) - { - DeleteProject(SChild, &SGrandChild, G); + DeleteProject(0, SChild, &SGrandChild, G); } if((*SChild)->ChildCount == 0 && (*SChild)->EntryCount == 0) @@ -15600,7 +15753,14 @@ SyncProjects(project_generations *G, project *C, db_header_project **SParent, db } DecrementCurrentGeneration(G); - *SParent = (db_header_project *)(DB.Metadata.File.Buffer.Location + SParentPos); + if(GotBlock) + { + *Block = (db_block_projects *)(DB.Metadata.File.Buffer.Location + ParentPos); + } + else + { + *SParent = (db_header_project *)(DB.Metadata.File.Buffer.Location + ParentPos); + } *SChild = (db_header_project *)(DB.Metadata.File.Buffer.Location + SChildPos); } @@ -15631,17 +15791,17 @@ SyncDB(config *C) { Located = TRUE; AddEntryToGeneration(&Accumulator, CChild); - SyncProjects_TopLevel(&Accumulator, CChild, &SParent, &SChild); + SyncProjects(&Accumulator, CChild, &SParent, 0, &SChild); } else { - Located = ReorganiseProjects_TopLevel(&Accumulator, CChild, &SParent, ci, &SChild); + Located = ReorganiseProjects(&Accumulator, CChild, &SParent, 0, ci, &SChild); } } if(!Located) { - InsertProjectIntoDB_TopLevel(&Accumulator, &SParent, &SChild, CChild); + InsertProjectIntoDB(&Accumulator, &SParent, 0, &SChild, CChild); } SChild = SkipProjectAndChildren(SChild); @@ -15650,7 +15810,7 @@ SyncDB(config *C) uint64_t DeletionCount = SParent->Count - C->Project.ItemCount; for(int DeletionIndex = 0; DeletionIndex < DeletionCount; ++DeletionIndex) { - DeleteProject_TopLevel(&SParent, &SChild, &Accumulator); + DeleteProject(&SParent, 0, &SChild, &Accumulator); } DeleteStaleAssets(); @@ -15997,6 +16157,13 @@ RemoveAndFreeWatchHandles(watch_handles *W) void DiscardAllAndFreeConfig(void) { +#if 0 + job *J = PushJob(&JobQueue, JTI_DB_FREE, 0, 0); + pthread_mutex_t Fake = {}; + pthread_mutex_init(&Fake, NULL); + pthread_cond_wait(&J->Complete, &Fake); +#endif + FreeSignpostedFile(&DB.Metadata); // NOTE(matt): This seems fine FreeFile(&DB.File); // NOTE(matt): This seems fine FreeAssets(&Assets); // NOTE(matt): This seems fine @@ -16274,7 +16441,7 @@ Exit(void) { PrintC(CS_SUCCESS, "\nExiting cleanly. Thank you for indexing with Cinera\n"); } - _exit(0); + _exit(RC_SUCCESS); } void @@ -16291,6 +16458,7 @@ Coda(int Sig) // NOTE(matt): I reckon this suffices because we are single-threaded. A multi-threaded system may require a volatile // sig_atomic_t Boolean GlobalRunning = FALSE; + PushJob(&JobQueue, JTI_DB_SHUTDOWN, 0, 0); } void @@ -16304,6 +16472,90 @@ InitInterruptHandler(void) sigaction(SIGINT, &CleanExit, 0); } +void * +ConsumeJobs(void *Data) +{ + void *Result = 0; + return Result; +} + +void * +DatabaseWorkerFunc(void *Data_) +{ + void *Result = 0; + while(GlobalRunning) + { + PrintFunctionName(); + PrintJobQueue(&JobQueue); + pthread_cond_wait(&JobQueue.Populated, &JobQueue.NextJobToTakeMutex); + + PrintJobQueue(&JobQueue); + //pthread_mutex_unlock(&JobQueue.NextJobToTakeMutex); + while(QueueIsPopulated(&JobQueue)) + { + job *Job = TakeJob(&JobQueue); +#if 1 + switch(Job->Type) + { + case JTI_DB_INIT: + { + InitDB(); + } break; + case JTI_DB_FREE: + { + FreeSignpostedFile(&DB.Metadata); // NOTE(matt): This seems fine + } break; + default: break; + } +#endif + Job->Type = JTI_NULL; + Job->OutputDest = 0; + pthread_cond_signal(&Job->Complete); + PrintJobQueue(&JobQueue); + } + } + + FreeSignpostedFile(&DB.Metadata); // NOTE(matt): This seems fine + DebugPrint("Exiting %s\n", __FUNCTION__); + pthread_exit(Result); +} + +typedef struct +{ + neighbourhood *Neighbourhood; + buffers *CollationBuffers; + template *BespokeTemplate; + string ConfigPathL; + memory_book *TokensList; +} file_io_worker_data; + +void * +FileIOWorkerFunc(void *Data_) +{ + Assert(Data_); + void *Result = 0; + file_io_worker_data Data = *(file_io_worker_data *)Data_; + while(GlobalRunning && MonitorFilesystem(Data.Neighbourhood, Data.CollationBuffers, Data.BespokeTemplate, Data.ConfigPathL, Data.TokensList) != RC_ARENA_FULL) + { + // TODO(matt): Refetch the quotes and rebuild player pages if needed + // + // Every sixty mins, redownload the quotes and, I suppose, SyncDBWithInput(). But here we still don't even know + // who the speaker is. To know, we'll probably have to store all quoted speakers in the project's .metadata. Maybe + // postpone this for now, but we will certainly need this to happen + // + // The most ideal solution is possibly that we store quote numbers in the Metadata->Entry, listen for and handle a + // REST PUT request from insobot when a quote changes (unless we're supposed to poll insobot for them?), and rebuild + // the player page(s) accordingly. + // + if(!(Mode & MODE_DRYRUN) && Config && Config->RespectingPrivacy && time(0) - LastPrivacyCheck > Config->PrivacyCheckInterval) + { + RecheckPrivacy(Data.Neighbourhood, Data.CollationBuffers, Data.BespokeTemplate); + } + sleep(GLOBAL_UPDATE_INTERVAL); + } + DebugPrint("Exiting %s\n", __FUNCTION__); + pthread_exit(Result); +} int main(int ArgC, char **Args) @@ -16327,11 +16579,10 @@ main(int ArgC, char **Args) case 'v': PrintVersions(); _exit(RC_SUCCESS); - break; case 'h': default: - PrintHelp(Args[0], ConfigPath); - return RC_SUCCESS; + PrintHelp(); + _exit(RC_SUCCESS); } } @@ -16392,8 +16643,13 @@ main(int ArgC, char **Args) { if(Mode & MODE_EXAMINE) { - // TODO(matt): Allow optionally passing a .db file as an argument? - ExamineDB(); + DB.Metadata.File.Buffer.ID = BID_DATABASE; + DB.Metadata.File = InitFile(0, &Config->DatabaseLocation, EXT_NULL); + ReadFileIntoBuffer(&DB.Metadata.File); // NOTE(matt): Could we actually catch errors (permissions?) here and bail? + + ExamineDB(&DB.Metadata.File); + + FreeFile(&DB.Metadata.File); _exit(RC_SUCCESS); } @@ -16403,6 +16659,8 @@ main(int ArgC, char **Args) } else { + // TODO(matt): By now I reckon we'd want to have created the database thread + /* */ MEM_TEST_MID("main()"); /* +MEM */ Succeeding = InitAll(&Neighbourhood, &CollationBuffers, &BespokeTemplate); /* */ MEM_TEST_MID("main()"); @@ -16412,7 +16670,7 @@ main(int ArgC, char **Args) { if(ArgC < 2) { - PrintHelp(Args[0], ConfigPath); + PrintHelp(); } else { @@ -16424,24 +16682,21 @@ main(int ArgC, char **Args) { if(inotifyInstance != -1) { - while(GlobalRunning && MonitorFilesystem(&Neighbourhood, &CollationBuffers, &BespokeTemplate, ConfigPathL, &TokensList) != RC_ARENA_FULL) - { - // TODO(matt): Refetch the quotes and rebuild player pages if needed - // - // Every sixty mins, redownload the quotes and, I suppose, SyncDBWithInput(). But here we still don't even know - // who the speaker is. To know, we'll probably have to store all quoted speakers in the project's .metadata. Maybe - // postpone this for now, but we will certainly need this to happen - // - // The most ideal solution is possibly that we store quote numbers in the Metadata->Entry, listen for and handle a - // REST PUT request from insobot when a quote changes (unless we're supposed to poll insobot for them?), and rebuild - // the player page(s) accordingly. - // - if(!(Mode & MODE_DRYRUN) && Config && Config->RespectingPrivacy && time(0) - LastPrivacyCheck > Config->PrivacyCheckInterval) - { - RecheckPrivacy(&Neighbourhood, &CollationBuffers, &BespokeTemplate); - } - sleep(GLOBAL_UPDATE_INTERVAL); - } + pthread_t DatabaseWorker, FileIOWorker; + pthread_create(&DatabaseWorker, 0, DatabaseWorkerFunc, 0); + + file_io_worker_data FileIOWorkerData = {}; + FileIOWorkerData.Neighbourhood = &Neighbourhood; + FileIOWorkerData.CollationBuffers = &CollationBuffers; + FileIOWorkerData.BespokeTemplate = &BespokeTemplate; + FileIOWorkerData.ConfigPathL = ConfigPathL; + FileIOWorkerData.TokensList = &TokensList; + + pthread_create(&FileIOWorker, 0, FileIOWorkerFunc, &FileIOWorkerData); + + pthread_join(DatabaseWorker, 0); + pthread_join(FileIOWorker, 0); + DebugPrint("Exited both threads\n"); } else if(GlobalRunning) { @@ -16454,5 +16709,9 @@ main(int ArgC, char **Args) MEM_TEST_END("main()"); } } + else + { + SystemError(0, 0, S_ERROR, "Config file path cannot be empty, aborting", 0); + } Exit(); }