Compare commits

8 Commits

Author SHA1 Message Date
8c2cce59e4 Merge branch 'master' into write-async-await 2025-01-24 16:58:18 +08:00
baf50eeab0 Merge branch 'master' into write-async-await 2024-09-08 22:43:46 +08:00
dc1b97fed4 write how async await works 2024-08-25 16:32:50 +08:00
9bfe091024 Merge branch 'master' into write-async-await 2024-08-25 16:30:17 +08:00
3a4ada50c6 Merge branch 'master' into write-async-await 2024-08-23 20:49:21 +08:00
05a22a0b29 Merge branch 'refs/heads/master' into write-async-await
# Conflicts:
#	YaeBlog.Core/Extensions/WebApplicationBuilderExtensions.cs
#	YaeBlog/Components/App.razor
#	YaeBlog/Layout/BlogLayout.razor
#	YaeBlog/Layout/MainLayout.razor
#	YaeBlog/Layout/MainLayout.razor.css
#	YaeBlog/Pages/About.razor
#	YaeBlog/Pages/About.razor.css
#	YaeBlog/Pages/Archives.razor
#	YaeBlog/Pages/Archives.razor.css
#	YaeBlog/Pages/BlogIndex.razor
#	YaeBlog/Pages/BlogIndex.razor.css
#	YaeBlog/Pages/Index.razor
#	YaeBlog/Pages/NotFound.razor
#	YaeBlog/Pages/Tags.razor
#	YaeBlog/Pages/Tags.razor.css
2024-07-30 00:54:02 +08:00
6797028cc1 write 2024-07-20 20:06:43 +08:00
77e52fa11e add: bootstrap in blazor 2024-07-15 21:36:18 +08:00
403 changed files with 2323 additions and 4162 deletions

2
.gitattributes vendored
View File

@@ -1,4 +1,2 @@
*.png filter=lfs diff=lfs merge=lfs -text *.png filter=lfs diff=lfs merge=lfs -text
*.jpg filter=lfs diff=lfs merge=lfs -text *.jpg filter=lfs diff=lfs merge=lfs -text
*.avif filter=lfs diff=lfs merge=lfs -text
*.webp filter=lfs diff=lfs merge=lfs -text

View File

@@ -7,7 +7,7 @@ jobs:
Build-Blog-Image: Build-Blog-Image:
runs-on: archlinux runs-on: archlinux
steps: steps:
- uses: https://mirrors.rrricardo.top/actions/checkout.git@v4 - uses: https://git.rrricardo.top/actions/checkout@v4
name: Check out code name: Check out code
with: with:
lfs: true lfs: true
@@ -18,16 +18,12 @@ jobs:
- name: Build docker image - name: Build docker image
run: | run: |
cd YaeBlog cd YaeBlog
podman build . -t registry.cn-beijing.aliyuncs.com/jackfiled/blog:latest --build-arg COMMIT_ID=$(git rev-parse --short=10 HEAD) docker build . -t registry.cn-beijing.aliyuncs.com/jackfiled/blog:latest
- name: Workaround to make sure podman login succeed
run: |
mkdir /root/.docker
- name: Login aliyun docker registry - name: Login aliyun docker registry
uses: https://mirrors.rrricardo.top/actions/podman-login.git@v1 uses: https://git.rrricardo.top/actions/login-action@v3
with: with:
registry: registry.cn-beijing.aliyuncs.com registry: registry.cn-beijing.aliyuncs.com
username: 初冬的朝阳 username: 初冬的朝阳
password: ${{ secrets.ALIYUN_PASSWORD }} password: ${{ secrets.ALIYUN_PASSWORD }}
auth_file_path: /etc/containers/auth.json
- name: Push docker image - name: Push docker image
run: podman push registry.cn-beijing.aliyuncs.com/jackfiled/blog:latest run: docker push registry.cn-beijing.aliyuncs.com/jackfiled/blog:latest

2
.gitignore vendored
View File

@@ -484,4 +484,4 @@ $RECYCLE.BIN/
*.swp *.swp
# Tailwind auto-generated stylesheet # Tailwind auto-generated stylesheet
*.g.css output.css

41
YaeBlog.sln Normal file
View File

@@ -0,0 +1,41 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 17
VisualStudioVersion = 17.0.31903.59
MinimumVisualStudioVersion = 10.0.40219.1
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "YaeBlog", "YaeBlog\YaeBlog.csproj", "{20438EFD-8DDE-43AF-92E2-76495C29233C}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = ".gitea", ".gitea", "{9B5AAA29-37D8-454A-8D8F-3E6B6BCF38E6}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "workflows", "workflows", "{ADBC3DA8-F65C-4B5D-A97A-DC351F8E6592}"
ProjectSection(SolutionItems) = preProject
.gitea\workflows\build.yaml = .gitea\workflows\build.yaml
EndProjectSection
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{753B998C-1B9E-498F-B949-845CE86C4075}"
ProjectSection(SolutionItems) = preProject
.editorconfig = .editorconfig
.gitattributes = .gitattributes
.gitignore = .gitignore
README.md = README.md
LICENSE = LICENSE
EndProjectSection
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Release|Any CPU = Release|Any CPU
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{20438EFD-8DDE-43AF-92E2-76495C29233C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{20438EFD-8DDE-43AF-92E2-76495C29233C}.Debug|Any CPU.Build.0 = Debug|Any CPU
{20438EFD-8DDE-43AF-92E2-76495C29233C}.Release|Any CPU.ActiveCfg = Release|Any CPU
{20438EFD-8DDE-43AF-92E2-76495C29233C}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
{ADBC3DA8-F65C-4B5D-A97A-DC351F8E6592} = {9B5AAA29-37D8-454A-8D8F-3E6B6BCF38E6}
EndGlobalSection
EndGlobal

View File

@@ -1,14 +0,0 @@
<Solution>
<Folder Name="/.gitea/" />
<Folder Name="/.gitea/workflows/">
<File Path=".gitea/workflows/build.yaml" />
</Folder>
<Folder Name="/Solution Items/">
<File Path=".editorconfig" />
<File Path=".gitattributes" />
<File Path=".gitignore" />
<File Path="LICENSE" />
<File Path="README.md" />
</Folder>
<Project Path="YaeBlog/YaeBlog.csproj" />
</Solution>

View File

@@ -7,4 +7,6 @@ public interface IEssayScanService
public Task<BlogContents> ScanContents(); public Task<BlogContents> ScanContents();
public Task SaveBlogContent(BlogContent content, bool isDraft = true); public Task SaveBlogContent(BlogContent content, bool isDraft = true);
public Task<ImageScanResult> ScanImages();
} }

View File

@@ -1,21 +0,0 @@
using System.CommandLine.Binding;
using YaeBlog.Abstraction;
using YaeBlog.Services;
namespace YaeBlog.Commands.Binders;
public sealed class ImageCompressServiceBinder : BinderBase<ImageCompressService>
{
protected override ImageCompressService GetBoundValue(BindingContext bindingContext)
{
bindingContext.AddService(provider =>
{
IEssayScanService essayScanService = provider.GetRequiredService<IEssayScanService>();
ILogger<ImageCompressService> logger = provider.GetRequiredService<ILogger<ImageCompressService>>();
return new ImageCompressService(essayScanService, logger);
});
return bindingContext.GetRequiredService<ImageCompressService>();
}
}

View File

@@ -1,6 +1,4 @@
using System.CommandLine; using System.CommandLine;
using Microsoft.Extensions.Options;
using YaeBlog.Abstraction;
using YaeBlog.Commands.Binders; using YaeBlog.Commands.Binders;
using YaeBlog.Components; using YaeBlog.Components;
using YaeBlog.Extensions; using YaeBlog.Extensions;
@@ -21,7 +19,6 @@ public sealed class YaeBlogCommand
AddNewCommand(_rootCommand); AddNewCommand(_rootCommand);
AddPublishCommand(_rootCommand); AddPublishCommand(_rootCommand);
AddScanCommand(_rootCommand); AddScanCommand(_rootCommand);
AddCompressCommand(_rootCommand);
} }
public Task<int> RunAsync(string[] args) public Task<int> RunAsync(string[] args)
@@ -97,20 +94,22 @@ public sealed class YaeBlogCommand
Argument<string> filenameArgument = new(name: "blog name", description: "The created blog filename."); Argument<string> filenameArgument = new(name: "blog name", description: "The created blog filename.");
newCommand.AddArgument(filenameArgument); newCommand.AddArgument(filenameArgument);
newCommand.SetHandler(async (file, blogOption, _, essayScanService) => newCommand.SetHandler(async (file, _, _, essayScanService) =>
{ {
BlogContents contents = await essayScanService.ScanContents(); BlogContents contents = await essayScanService.ScanContents();
if (contents.Posts.Any(content => content.BlogName == file)) if (contents.Posts.Any(content => content.FileName == file))
{ {
Console.WriteLine("There exists the same title blog in posts."); Console.WriteLine("There exists the same title blog in posts.");
return; return;
} }
await essayScanService.SaveBlogContent(new BlogContent( await essayScanService.SaveBlogContent(new BlogContent
new FileInfo(Path.Combine(blogOption.Value.Root, "drafts", file + ".md")), {
new MarkdownMetadata { Title = file, Date = DateTime.Now }, FileName = file,
string.Empty, true, [], [])); FileContent = string.Empty,
Metadata = new MarkdownMetadata { Title = file, Date = DateTime.Now }
});
Console.WriteLine($"Created new blog '{file}."); Console.WriteLine($"Created new blog '{file}.");
}, filenameArgument, new BlogOptionsBinder(), new LoggerBinder<EssayScanService>(), }, filenameArgument, new BlogOptionsBinder(), new LoggerBinder<EssayScanService>(),
@@ -127,15 +126,15 @@ public sealed class YaeBlogCommand
BlogContents contents = await essyScanService.ScanContents(); BlogContents contents = await essyScanService.ScanContents();
Console.WriteLine($"All {contents.Posts.Count} Posts:"); Console.WriteLine($"All {contents.Posts.Count} Posts:");
foreach (BlogContent content in contents.Posts.OrderBy(x => x.BlogName)) foreach (BlogContent content in contents.Posts.OrderBy(x => x.FileName))
{ {
Console.WriteLine($" - {content.BlogName}"); Console.WriteLine($" - {content.FileName}");
} }
Console.WriteLine($"All {contents.Drafts.Count} Drafts:"); Console.WriteLine($"All {contents.Drafts.Count} Drafts:");
foreach (BlogContent content in contents.Drafts.OrderBy(x => x.BlogName)) foreach (BlogContent content in contents.Drafts.OrderBy(x => x.FileName))
{ {
Console.WriteLine($" - {content.BlogName}"); Console.WriteLine($" - {content.FileName}");
} }
}, new BlogOptionsBinder(), new LoggerBinder<EssayScanService>(), new EssayScanServiceBinder()); }, new BlogOptionsBinder(), new LoggerBinder<EssayScanService>(), new EssayScanServiceBinder());
} }
@@ -151,39 +150,32 @@ public sealed class YaeBlogCommand
command.SetHandler(async (_, _, essayScanService, removeOptionValue) => command.SetHandler(async (_, _, essayScanService, removeOptionValue) =>
{ {
BlogContents contents = await essayScanService.ScanContents(); ImageScanResult result = await essayScanService.ScanImages();
List<BlogImageInfo> unusedImages = (from content in contents
from image in content.Images
where image is { IsUsed: false }
select image).ToList();
if (unusedImages.Count != 0) if (result.UnusedImages.Count != 0)
{ {
Console.WriteLine("Found unused images:"); Console.WriteLine("Found unused images:");
Console.WriteLine("HINT: use '--rm' to remove unused images."); Console.WriteLine("HINT: use '--rm' to remove unused images.");
} }
foreach (BlogImageInfo image in unusedImages) foreach (FileInfo image in result.UnusedImages)
{ {
Console.WriteLine($" - {image.File.FullName}"); Console.WriteLine($" - {image.FullName}");
} }
if (removeOptionValue) if (removeOptionValue)
{ {
foreach (BlogImageInfo image in unusedImages) foreach (FileInfo image in result.UnusedImages)
{ {
image.File.Delete(); image.Delete();
} }
} }
Console.WriteLine("Used not existed images:"); Console.WriteLine("Used not existed images:");
foreach (BlogContent content in contents) foreach (FileInfo image in result.NotFoundImages)
{ {
foreach (FileInfo file in content.NotfoundImages) Console.WriteLine($" - {image.FullName}");
{
Console.WriteLine($"- {file.Name} in {content.BlogName}");
}
} }
}, new BlogOptionsBinder(), new LoggerBinder<EssayScanService>(), new EssayScanServiceBinder(), removeOption); }, new BlogOptionsBinder(), new LoggerBinder<EssayScanService>(), new EssayScanServiceBinder(), removeOption);
} }
@@ -201,7 +193,7 @@ public sealed class YaeBlogCommand
BlogContents contents = await essayScanService.ScanContents(); BlogContents contents = await essayScanService.ScanContents();
BlogContent? content = (from blog in contents.Drafts BlogContent? content = (from blog in contents.Drafts
where blog.BlogName == filename where blog.FileName == filename
select blog).FirstOrDefault(); select blog).FirstOrDefault();
if (content is null) if (content is null)
@@ -210,17 +202,14 @@ public sealed class YaeBlogCommand
return; return;
} }
// 设置发布的时间
content.Metadata.Date = DateTime.Now;
// 将选中的博客文件复制到posts // 将选中的博客文件复制到posts
await essayScanService.SaveBlogContent(content, isDraft: false); await essayScanService.SaveBlogContent(content, isDraft: false);
// 复制图片文件夹 // 复制图片文件夹
DirectoryInfo sourceImageDirectory = DirectoryInfo sourceImageDirectory =
new(Path.Combine(blogOptions.Value.Root, "drafts", content.BlogName)); new(Path.Combine(blogOptions.Value.Root, "drafts", content.FileName));
DirectoryInfo targetImageDirectory = DirectoryInfo targetImageDirectory =
new(Path.Combine(blogOptions.Value.Root, "posts", content.BlogName)); new(Path.Combine(blogOptions.Value.Root, "posts", content.FileName));
if (sourceImageDirectory.Exists) if (sourceImageDirectory.Exists)
{ {
@@ -234,30 +223,9 @@ public sealed class YaeBlogCommand
} }
// 删除原始的文件 // 删除原始的文件
FileInfo sourceBlogFile = new(Path.Combine(blogOptions.Value.Root, "drafts", content.BlogName + ".md")); FileInfo sourceBlogFile = new(Path.Combine(blogOptions.Value.Root, "drafts", content.FileName + ".md"));
sourceBlogFile.Delete(); sourceBlogFile.Delete();
}, new BlogOptionsBinder(), }, new BlogOptionsBinder(),
new LoggerBinder<EssayScanService>(), new EssayScanServiceBinder(), filenameArgument); new LoggerBinder<EssayScanService>(), new EssayScanServiceBinder(), filenameArgument);
} }
private static void AddCompressCommand(RootCommand rootCommand)
{
Command command = new("compress", "Compress png/jpeg image to webp image to reduce size.");
rootCommand.Add(command);
Option<bool> dryRunOption = new("--dry-run", description: "Dry run the compression task but not write.",
getDefaultValue: () => false);
command.AddOption(dryRunOption);
command.SetHandler(ImageCommandHandler,
new BlogOptionsBinder(), new LoggerBinder<EssayScanService>(), new LoggerBinder<ImageCompressService>(),
new EssayScanServiceBinder(), new ImageCompressServiceBinder(), dryRunOption);
}
private static async Task ImageCommandHandler(IOptions<BlogOptions> _, ILogger<EssayScanService> _1,
ILogger<ImageCompressService> _2,
IEssayScanService _3, ImageCompressService imageCompressService, bool dryRun)
{
await imageCompressService.Compress(dryRun);
}
} }

View File

@@ -8,7 +8,7 @@
<link rel="stylesheet" href="YaeBlog.styles.css"/> <link rel="stylesheet" href="YaeBlog.styles.css"/>
<link rel="icon" href="images/favicon.ico"/> <link rel="icon" href="images/favicon.ico"/>
<link rel="stylesheet" href="globals.css"/> <link rel="stylesheet" href="globals.css"/>
<link rel="stylesheet" href="tailwind.g.css"/> <link rel="stylesheet" href="output.css"/>
<HeadOutlet/> <HeadOutlet/>
</head> </head>

View File

@@ -7,15 +7,11 @@
<Anchor Address="https://dotnet.microsoft.com" Text="@DotnetVersion"/> <Anchor Address="https://dotnet.microsoft.com" Text="@DotnetVersion"/>
驱动。 驱动。
</p> </p>
<p class="text-md">
Build Commit #
<Anchor Address="@BuildCommitUrl" Text="@BuildCommitId"/>
</p>
</div> </div>
<div> <div>
<p class="text-md"> <p class="text-md">
<Anchor Address="https://beian.miit.gov.cn" Text="蜀ICP备2022004429号-1" NewPage="true"/> <a href="https://beian.miit.gov.cn" target="_blank" class="text-black">蜀ICP备2022004429号-1</a>
</p> </p>
</div> </div>
</div> </div>
@@ -23,8 +19,4 @@
@code @code
{ {
private string DotnetVersion => $".NET {Environment.Version}"; private string DotnetVersion => $".NET {Environment.Version}";
private string BuildCommitId => Environment.GetEnvironmentVariable("COMMIT_ID") ?? "local_build";
private string BuildCommitUrl => $"https://git.rrricardo.top/jackfiled/YaeBlog/commit/{BuildCommitId}";
} }

View File

@@ -1,8 +1,5 @@
FROM mcr.microsoft.com/dotnet/aspnet:9.0 FROM mcr.microsoft.com/dotnet/aspnet:9.0
ARG COMMIT_ID
ENV COMMIT_ID=${COMMIT_ID}
WORKDIR /app WORKDIR /app
COPY bin/Release/net9.0/publish/ ./ COPY bin/Release/net9.0/publish/ ./
COPY source/ ./source/ COPY source/ ./source/

View File

@@ -1,12 +0,0 @@
namespace YaeBlog.Core.Exceptions;
public class BlogCommandException : Exception
{
public BlogCommandException(string message) : base(message)
{
}
public BlogCommandException(string message, Exception innerException) : base(message, innerException)
{
}
}

View File

@@ -1,18 +0,0 @@
using AngleSharp.Dom;
namespace YaeBlog.Extensions;
public static class AngleSharpExtensions
{
public static IEnumerable<IElement> EnumerateParentElements(this IElement element)
{
IElement? e = element.ParentElement;
while (e is not null)
{
IElement c = e;
e = e.ParentElement;
yield return c;
}
}
}

View File

@@ -1,20 +1,12 @@
namespace YaeBlog.Models; namespace YaeBlog.Models;
/// <summary> public class BlogContent
/// 单个博客文件的所有数据和元数据
/// </summary>
/// <param name="BlogFile">博客文件</param>
/// <param name="Metadata">文件中的MD元数据</param>
/// <param name="Content">文件内容</param>
/// <param name="IsDraft">是否为草稿</param>
/// <param name="Images">博客中使用的文件</param>
public record BlogContent(
FileInfo BlogFile,
MarkdownMetadata Metadata,
string Content,
bool IsDraft,
List<BlogImageInfo> Images,
List<FileInfo> NotfoundImages)
{ {
public string BlogName => BlogFile.Name.Split('.')[0]; public required string FileName { get; init; }
public required MarkdownMetadata Metadata { get; init; }
public required string FileContent { get; set; }
public bool IsDraft { get; set; } = false;
} }

View File

@@ -1,15 +1,10 @@
using System.Collections; using System.Collections.Concurrent;
using System.Collections.Concurrent;
namespace YaeBlog.Models; namespace YaeBlog.Models;
public record BlogContents(ConcurrentBag<BlogContent> Drafts, ConcurrentBag<BlogContent> Posts) public sealed class BlogContents(ConcurrentBag<BlogContent> drafts, ConcurrentBag<BlogContent> posts)
: IEnumerable<BlogContent>
{ {
IEnumerator<BlogContent> IEnumerable<BlogContent>.GetEnumerator() public ConcurrentBag<BlogContent> Drafts { get; } = drafts;
{
return Posts.Concat(Drafts).GetEnumerator();
}
public IEnumerator GetEnumerator() => ((IEnumerable<BlogContent>)this).GetEnumerator(); public ConcurrentBag<BlogContent> Posts { get; } = posts;
} }

View File

@@ -1,44 +0,0 @@
using System.Text;
namespace YaeBlog.Models;
public record BlogImageInfo(FileInfo File, long Width, long Height, string MineType, byte[] Content, bool IsUsed)
: IComparable<BlogImageInfo>
{
public int Size => Content.Length;
public override string ToString()
{
StringBuilder builder = new();
builder.AppendLine($"Blog image {File.Name}:");
builder.AppendLine($"\tWidth: {Width}; Height: {Height}");
builder.AppendLine($"\tSize: {FormatSize()}");
builder.AppendLine($"\tImage Format: {MineType}");
return builder.ToString();
}
public int CompareTo(BlogImageInfo? other)
{
if (other is null)
{
return -1;
}
return other.Size.CompareTo(Size);
}
private string FormatSize()
{
double size = Size;
if (size / 1024 > 3)
{
size /= 1024;
return size / 1024 > 3 ? $"{size / 1024}MB" : $"{size}KB";
}
return $"{size}B";
}
}

View File

@@ -0,0 +1,3 @@
namespace YaeBlog.Models;
public record struct ImageScanResult(List<FileInfo> UnusedImages, List<FileInfo> NotFoundImages);

View File

@@ -1,7 +1,6 @@
using AngleSharp; using AngleSharp;
using AngleSharp.Dom; using AngleSharp.Dom;
using YaeBlog.Abstraction; using YaeBlog.Abstraction;
using YaeBlog.Extensions;
using YaeBlog.Models; using YaeBlog.Models;
namespace YaeBlog.Processors; namespace YaeBlog.Processors;
@@ -21,21 +20,20 @@ public sealed class EssayStylesPostRenderProcessor : IPostRenderProcessor
ApplyGlobalCssStyles(document); ApplyGlobalCssStyles(document);
BeatifyTable(document); BeatifyTable(document);
BeatifyList(document);
BeatifyInlineCode(document);
return essay.WithNewHtmlContent(document.DocumentElement.OuterHtml); return essay.WithNewHtmlContent(document.DocumentElement.OuterHtml);
} }
private readonly Dictionary<string, string> _globalCssStyles = new() private readonly Dictionary<string, string> _globalCssStyles = new()
{ {
{ "pre", "p-4 bg-gray-100 rounded-sm overflow-x-auto" }, { "pre", "p-4 bg-slate-300 rounded-sm overflow-x-auto" },
{ "h2", "text-3xl font-bold py-4" }, { "h2", "text-3xl font-bold py-4" },
{ "h3", "text-2xl font-bold py-3" }, { "h3", "text-2xl font-bold py-3" },
{ "h4", "text-xl font-bold py-2" }, { "h4", "text-xl font-bold py-2" },
{ "h5", "text-lg font-bold py-1" }, { "h5", "text-lg font-bold py-1" },
{ "p", "p-2" }, { "p", "p-2" },
{ "img", "w-11/12 block mx-auto my-2 rounded-md shadow-md" }, { "img", "w-11/12 block mx-auto my-2 rounded-md shadow-md" },
{ "ul", "list-disc pl-2" }
}; };
private void ApplyGlobalCssStyles(IDocument document) private void ApplyGlobalCssStyles(IDocument document)
@@ -101,45 +99,4 @@ public sealed class EssayStylesPostRenderProcessor : IPostRenderProcessor
} }
} }
} }
private static void BeatifyList(IDocument document)
{
foreach (IElement ulElement in from e in document.All
where e.LocalName == "ul"
select e)
{
// 首先给<ul>元素添加样式
ulElement.ClassList.Add("list-disc ml-10");
foreach (IElement liElement in from e in ulElement.Children
where e.LocalName == "li"
select e)
{
// 修改<li>元素中的<p>元素样式
// 默认的p-2间距有点太宽了
foreach (IElement pElement in from e in liElement.Children
where e.LocalName == "p"
select e)
{
pElement.ClassList.Remove("p-2");
pElement.ClassList.Add("p-1");
}
}
}
}
private static void BeatifyInlineCode(IDocument document)
{
// 选择不在<pre>元素内的<code>元素
// 即行内代码
IEnumerable<IElement> inlineCodes = from e in document.All
where e.LocalName == "code" && e.EnumerateParentElements().All(p => p.LocalName != "pre")
select e;
foreach (IElement e in inlineCodes)
{
e.ClassList.Add("bg-gray-100 inline p-1 rounded-xs");
}
}
} }

View File

@@ -7,8 +7,7 @@ using YaeBlog.Models;
namespace YaeBlog.Processors; namespace YaeBlog.Processors;
public class ImagePostRenderProcessor( public class ImagePostRenderProcessor(ILogger<ImagePostRenderProcessor> logger,
ILogger<ImagePostRenderProcessor> logger,
IOptions<BlogOptions> options) IOptions<BlogOptions> options)
: IPostRenderProcessor : IPostRenderProcessor
{ {
@@ -30,27 +29,22 @@ public class ImagePostRenderProcessor(
if (attr is not null) if (attr is not null)
{ {
logger.LogDebug("Found image link: '{}'", attr.Value); logger.LogDebug("Found image link: '{}'", attr.Value);
attr.Value = GenerateImageLink(attr.Value, essay.FileName, essay.IsDraft); attr.Value = GenerateImageLink(attr.Value, essay.FileName);
} }
} }
return essay.WithNewHtmlContent(html.DocumentElement.OuterHtml); return essay.WithNewHtmlContent(html.DocumentElement.OuterHtml);
} }
public string Name => nameof(ImagePostRenderProcessor); public string Name => nameof(ImagePostRenderProcessor);
private string GenerateImageLink(string filename, string essayFilename, bool isDraft) private string GenerateImageLink(string filename, string essayFilename)
{ {
// 如果图片路径中没有包含文件名
// 则添加文件名
if (!filename.Contains(essayFilename)) if (!filename.Contains(essayFilename))
{ {
filename = Path.Combine(essayFilename, filename); filename = Path.Combine(essayFilename, filename);
} }
filename = isDraft filename = Path.Combine(_options.Root, "posts", filename);
? Path.Combine(_options.Root, "drafts", filename)
: Path.Combine(_options.Root, "posts", filename);
if (!Path.Exists(filename)) if (!Path.Exists(filename))
{ {

View File

@@ -1,7 +1,5 @@
using System.Collections.Concurrent; using System.Collections.Concurrent;
using System.Text.RegularExpressions; using System.Text.RegularExpressions;
using Imageflow.Bindings;
using Imageflow.Fluent;
using Microsoft.Extensions.Options; using Microsoft.Extensions.Options;
using YaeBlog.Abstraction; using YaeBlog.Abstraction;
using YaeBlog.Core.Exceptions; using YaeBlog.Core.Exceptions;
@@ -11,30 +9,17 @@ using YamlDotNet.Serialization;
namespace YaeBlog.Services; namespace YaeBlog.Services;
public partial class EssayScanService : IEssayScanService public partial class EssayScanService(
{ ISerializer yamlSerializer,
private readonly BlogOptions _blogOptions;
private readonly ISerializer _yamlSerializer;
private readonly IDeserializer _yamlDeserializer;
private readonly ILogger<EssayScanService> _logger;
public EssayScanService(ISerializer yamlSerializer,
IDeserializer yamlDeserializer, IDeserializer yamlDeserializer,
IOptions<BlogOptions> blogOptions, IOptions<BlogOptions> blogOptions,
ILogger<EssayScanService> logger) ILogger<EssayScanService> logger) : IEssayScanService
{ {
_yamlSerializer = yamlSerializer; private readonly BlogOptions _blogOptions = blogOptions.Value;
_yamlDeserializer = yamlDeserializer;
_logger = logger;
_blogOptions = blogOptions.Value;
RootDirectory = ValidateRootDirectory();
}
private DirectoryInfo RootDirectory { get; }
public async Task<BlogContents> ScanContents() public async Task<BlogContents> ScanContents()
{ {
ValidateDirectory(out DirectoryInfo drafts, out DirectoryInfo posts); ValidateDirectory(_blogOptions.Root, out DirectoryInfo drafts, out DirectoryInfo posts);
return new BlogContents( return new BlogContents(
await ScanContentsInternal(drafts, true), await ScanContentsInternal(drafts, true),
@@ -43,92 +28,82 @@ public partial class EssayScanService : IEssayScanService
public async Task SaveBlogContent(BlogContent content, bool isDraft = true) public async Task SaveBlogContent(BlogContent content, bool isDraft = true)
{ {
ValidateDirectory(out DirectoryInfo drafts, out DirectoryInfo posts); ValidateDirectory(_blogOptions.Root, out DirectoryInfo drafts, out DirectoryInfo posts);
FileInfo targetFile = isDraft FileInfo targetFile = isDraft
? new FileInfo(Path.Combine(drafts.FullName, content.BlogName + ".md")) ? new FileInfo(Path.Combine(drafts.FullName, content.FileName + ".md"))
: new FileInfo(Path.Combine(posts.FullName, content.BlogName + ".md")); : new FileInfo(Path.Combine(posts.FullName, content.FileName + ".md"));
if (!isDraft)
{
content.Metadata.Date = DateTime.Now;
}
if (targetFile.Exists) if (targetFile.Exists)
{ {
_logger.LogWarning("Blog {} exists, overriding.", targetFile.Name); logger.LogWarning("Blog {} exists, overriding.", targetFile.Name);
} }
await using StreamWriter writer = targetFile.CreateText(); await using StreamWriter writer = targetFile.CreateText();
await writer.WriteAsync("---\n"); await writer.WriteAsync("---\n");
await writer.WriteAsync(_yamlSerializer.Serialize(content.Metadata)); await writer.WriteAsync(yamlSerializer.Serialize(content.Metadata));
await writer.WriteAsync("---\n"); await writer.WriteAsync("---\n");
if (string.IsNullOrEmpty(content.Content) && isDraft) if (isDraft)
{ {
// 如果博客为操作且内容为空
// 创建简介隔断符号
await writer.WriteLineAsync("<!--more-->"); await writer.WriteLineAsync("<!--more-->");
} }
else else
{ {
await writer.WriteAsync(content.Content); await writer.WriteAsync(content.FileContent);
} }
// 保存图片文件
await Task.WhenAll(from image in content.Images
select File.WriteAllBytesAsync(image.File.FullName, image.Content));
} }
private record struct BlogResult(
FileInfo BlogFile,
string BlogContent,
List<BlogImageInfo> Images,
List<FileInfo> NotFoundImages);
private async Task<ConcurrentBag<BlogContent>> ScanContentsInternal(DirectoryInfo directory, bool isDraft) private async Task<ConcurrentBag<BlogContent>> ScanContentsInternal(DirectoryInfo directory, bool isDraft)
{ {
// 扫描以md结尾且不是隐藏文件的文件 // 扫描以md结果的但是不是隐藏文件的文件
IEnumerable<FileInfo> markdownFiles = from file in directory.EnumerateFiles() IEnumerable<FileInfo> markdownFiles = from file in directory.EnumerateFiles()
where file.Extension == ".md" && !file.Name.StartsWith('.') where file.Extension == ".md" && !file.Name.StartsWith('.')
select file; select file;
ConcurrentBag<BlogResult> fileContents = []; ConcurrentBag<(string, string)> fileContents = [];
await Parallel.ForEachAsync(markdownFiles, async (file, token) => await Parallel.ForEachAsync(markdownFiles, async (file, token) =>
{ {
using StreamReader reader = file.OpenText(); using StreamReader reader = file.OpenText();
string blogName = file.Name.Split('.')[0]; fileContents.Add((file.Name, await reader.ReadToEndAsync(token)));
string blogContent = await reader.ReadToEndAsync(token);
ImageResult imageResult =
await ScanImagePreBlog(directory, blogName,
blogContent);
fileContents.Add(new BlogResult(file, blogContent, imageResult.Images, imageResult.NotfoundImages));
}); });
ConcurrentBag<BlogContent> contents = []; ConcurrentBag<BlogContent> contents = [];
await Task.Run(() => await Task.Run(() =>
{ {
foreach (BlogResult blog in fileContents) foreach ((string filename, string content) in fileContents)
{ {
int endPos = blog.BlogContent.IndexOf("---", 4, StringComparison.Ordinal); int endPos = content.IndexOf("---", 4, StringComparison.Ordinal);
if (!blog.BlogContent.StartsWith("---") || endPos is -1 or 0) if (!content.StartsWith("---") || endPos is -1 or 0)
{ {
_logger.LogWarning("Failed to parse metadata from {}, skipped.", blog.BlogFile.Name); logger.LogWarning("Failed to parse metadata from {}, skipped.", filename);
return; return;
} }
string metadataString = blog.BlogContent[4..endPos]; string metadataString = content[4..endPos];
try try
{ {
MarkdownMetadata metadata = _yamlDeserializer.Deserialize<MarkdownMetadata>(metadataString); MarkdownMetadata metadata = yamlDeserializer.Deserialize<MarkdownMetadata>(metadataString);
_logger.LogDebug("Scan metadata title: '{}' for {}.", metadata.Title, blog.BlogFile.Name); logger.LogDebug("Scan metadata title: '{}' for {}.", metadata.Title, filename);
contents.Add(new BlogContent(blog.BlogFile, metadata, blog.BlogContent[(endPos + 3)..], isDraft, contents.Add(new BlogContent
blog.Images, blog.NotFoundImages)); {
FileName = filename[..^3], Metadata = metadata, FileContent = content[(endPos + 3)..],
IsDraft = isDraft
});
} }
catch (YamlException e) catch (YamlException e)
{ {
_logger.LogWarning("Failed to parser metadata from {} due to {}, skipping", blog.BlogFile.Name, e); logger.LogWarning("Failed to parser metadata from {} due to {}, skipping", filename, e);
} }
} }
}); });
@@ -136,96 +111,99 @@ public partial class EssayScanService : IEssayScanService
return contents; return contents;
} }
private record struct ImageResult(List<BlogImageInfo> Images, List<FileInfo> NotfoundImages); public async Task<ImageScanResult> ScanImages()
private async Task<ImageResult> ScanImagePreBlog(DirectoryInfo directory, string blogName, string content)
{ {
MatchCollection matchResult = ImagePattern.Matches(content); BlogContents contents = await ScanContents();
DirectoryInfo imageDirectory = new(Path.Combine(directory.FullName, blogName)); ValidateDirectory(_blogOptions.Root, out DirectoryInfo drafts, out DirectoryInfo posts);
Dictionary<string, bool> usedImages = imageDirectory.Exists List<FileInfo> unusedFiles = [];
? imageDirectory.EnumerateFiles().ToDictionary(file => file.FullName, _ => false) List<FileInfo> notFoundFiles = [];
: [];
List<FileInfo> notFoundImages = [];
foreach (Match match in matchResult) ImageScanResult draftResult = await ScanUnusedImagesInternal(contents.Drafts, drafts);
ImageScanResult postResult = await ScanUnusedImagesInternal(contents.Posts, posts);
unusedFiles.AddRange(draftResult.UnusedImages);
notFoundFiles.AddRange(draftResult.NotFoundImages);
unusedFiles.AddRange(postResult.UnusedImages);
notFoundFiles.AddRange(postResult.NotFoundImages);
return new ImageScanResult(unusedFiles, notFoundFiles);
}
private static Task<ImageScanResult> ScanUnusedImagesInternal(IEnumerable<BlogContent> contents,
DirectoryInfo root)
{ {
string imageName = match.Groups[1].Value; ConcurrentBag<FileInfo> unusedImage = [];
ConcurrentBag<FileInfo> notFoundImage = [];
// 判断md文件中的图片名称中是否包含文件夹名称 Parallel.ForEach(contents, content =>
// 例如 blog-1/image.png 或者 image.png
// 如果不带文件夹名称
// 默认添加同博客名文件夹
FileInfo usedFile = imageName.Contains(blogName)
? new FileInfo(Path.Combine(directory.FullName, imageName))
: new FileInfo(Path.Combine(directory.FullName, blogName, imageName));
if (usedImages.TryGetValue(usedFile.FullName, out _))
{ {
usedImages[usedFile.FullName] = true; MatchCollection result = ImagePattern.Matches(content.FileContent);
DirectoryInfo imageDirectory = new(Path.Combine(root.FullName, content.FileName));
Dictionary<string, bool> usedDictionary;
if (imageDirectory.Exists)
{
usedDictionary = (from file in imageDirectory.EnumerateFiles()
select new KeyValuePair<string, bool>(file.FullName, false)).ToDictionary();
} }
else else
{ {
notFoundImages.Add(usedFile); usedDictionary = [];
}
} }
List<BlogImageInfo> images = (await Task.WhenAll((from pair in usedImages foreach (Match match in result)
select GetImageInfo(new FileInfo(pair.Key), pair.Value)).ToArray())).ToList();
return new ImageResult(images, notFoundImages);
}
private static async Task<BlogImageInfo> GetImageInfo(FileInfo file, bool isUsed)
{ {
byte[] image = await File.ReadAllBytesAsync(file.FullName); string imageName = match.Groups[1].Value;
if (file.Extension is ".jpg" or ".jpeg" or ".png") FileInfo usedFile = imageName.Contains(content.FileName)
? new FileInfo(Path.Combine(root.FullName, imageName))
: new FileInfo(Path.Combine(root.FullName, content.FileName, imageName));
if (usedDictionary.TryGetValue(usedFile.FullName, out _))
{ {
ImageInfo imageInfo = usedDictionary[usedFile.FullName] = true;
await ImageJob.GetImageInfoAsync(MemorySource.Borrow(image), SourceLifetime.NowOwnedAndDisposedByTask); }
else
return new BlogImageInfo(file, imageInfo.ImageWidth, imageInfo.ImageWidth, imageInfo.PreferredMimeType, {
image, isUsed); notFoundImage.Add(usedFile);
}
} }
return new BlogImageInfo(file, 0, 0, file.Extension switch foreach (KeyValuePair<string, bool> pair in usedDictionary.Where(p => !p.Value))
{ {
"svg" => "image/svg", unusedImage.Add(new FileInfo(pair.Key));
"avif" => "image/avif", }
_ => string.Empty });
}, image, isUsed);
return Task.FromResult(new ImageScanResult(unusedImage.ToList(), notFoundImage.ToList()));
} }
[GeneratedRegex(@"\!\[.*?\]\((.*?)\)")] [GeneratedRegex(@"\!\[.*?\]\((.*?)\)")]
private static partial Regex ImagePattern { get; } private static partial Regex ImagePattern { get; }
private void ValidateDirectory(string root, out DirectoryInfo drafts, out DirectoryInfo posts)
private DirectoryInfo ValidateRootDirectory()
{ {
DirectoryInfo rootDirectory = new(Path.Combine(Environment.CurrentDirectory, _blogOptions.Root)); root = Path.Combine(Environment.CurrentDirectory, root);
DirectoryInfo rootDirectory = new(root);
if (!rootDirectory.Exists) if (!rootDirectory.Exists)
{ {
throw new BlogFileException($"'{_blogOptions.Root}' is not a directory."); throw new BlogFileException($"'{root}' is not a directory.");
} }
return rootDirectory; if (rootDirectory.EnumerateDirectories().All(dir => dir.Name != "drafts"))
}
private void ValidateDirectory(out DirectoryInfo drafts, out DirectoryInfo posts)
{ {
if (RootDirectory.EnumerateDirectories().All(dir => dir.Name != "drafts")) throw new BlogFileException($"'{root}/drafts' not exists.");
{
throw new BlogFileException($"'{_blogOptions.Root}/drafts' not exists.");
} }
if (RootDirectory.EnumerateDirectories().All(dir => dir.Name != "posts")) if (rootDirectory.EnumerateDirectories().All(dir => dir.Name != "posts"))
{ {
throw new BlogFileException($"'{_blogOptions.Root}/posts' not exists."); throw new BlogFileException($"'{root}/posts' not exists.");
} }
drafts = new DirectoryInfo(Path.Combine(_blogOptions.Root, "drafts")); drafts = new DirectoryInfo(Path.Combine(root, "drafts"));
posts = new DirectoryInfo(Path.Combine(_blogOptions.Root, "posts")); posts = new DirectoryInfo(Path.Combine(root, "posts"));
} }
} }

View File

@@ -1,119 +0,0 @@
using Imageflow.Fluent;
using YaeBlog.Abstraction;
using YaeBlog.Core.Exceptions;
using YaeBlog.Models;
namespace YaeBlog.Services;
public sealed class ImageCompressService(IEssayScanService essayScanService, ILogger<ImageCompressService> logger)
{
private record struct CompressResult(BlogImageInfo ImageInfo, byte[] CompressContent);
public async Task<List<BlogImageInfo>> ScanUsedImages()
{
BlogContents contents = await essayScanService.ScanContents();
List<BlogImageInfo> originalImages = (from content in contents.Posts.Concat(contents.Drafts)
from image in content.Images
where image.IsUsed
select image).ToList();
originalImages.Sort();
return originalImages;
}
public async Task Compress(bool dryRun)
{
BlogContents contents = await essayScanService.ScanContents();
// 筛选需要压缩的图片
// 即图片被博客使用且是jpeg/png格式
List<BlogContent> needCompressContents = (from content in contents
where content.Images.Any(i => i is { IsUsed: true } and { File.Extension: ".jpg" or ".jpeg" or ".png" })
select content).ToList();
if (needCompressContents.Count == 0)
{
return;
}
int uncompressedSize = 0;
int compressedSize = 0;
List<BlogContent> compressedContent = new(needCompressContents.Count);
foreach (BlogContent content in needCompressContents)
{
List<BlogImageInfo> uncompressedImages = (from image in content.Images
where image is { IsUsed: true } and { File.Extension: ".jpg" or ".jpeg" or ".png" }
select image).ToList();
uncompressedSize += uncompressedImages.Select(i => i.Size).Sum();
foreach (BlogImageInfo image in uncompressedImages)
{
logger.LogInformation("Uncompressed image: {} belonging to blog {}.", image.File.Name,
content.BlogName);
}
CompressResult[] compressedImages = (await Task.WhenAll(from image in uncompressedImages
select Task.Run(async () => new CompressResult(image, await ConvertToWebp(image))))).ToArray();
compressedSize += compressedImages.Select(i => i.CompressContent.Length).Sum();
// 直接在原有的图片列表上添加图片
List<BlogImageInfo> images = content.Images.Concat(from r in compressedImages
select r.ImageInfo with
{
File = new FileInfo(r.ImageInfo.File.FullName.Split('.')[0] + ".webp"),
Content = r.CompressContent,
MineType = "image/webp"
}).ToList();
// 修改文本
string blogContent = compressedImages.Aggregate(content.Content, (c, r) =>
{
string originalName = r.ImageInfo.File.Name;
string outputName = originalName.Split('.')[0] + ".webp";
return c.Replace(originalName, outputName);
});
compressedContent.Add(content with { Images = images, Content = blogContent });
}
logger.LogInformation("Compression ratio: {}%.", (double)compressedSize / uncompressedSize * 100.0);
if (dryRun is false)
{
await Task.WhenAll(from content in compressedContent
select essayScanService.SaveBlogContent(content, content.IsDraft));
}
}
private static async Task<byte[]> ConvertToWebp(BlogImageInfo image)
{
using ImageJob job = new();
BuildJobResult result = await job.Decode(MemorySource.Borrow(image.Content))
.Branch(f => f.EncodeToBytes(new WebPLosslessEncoder()))
.EncodeToBytes(new WebPLossyEncoder(75))
.Finish()
.InProcessAsync();
// 超过128KB的图片使用有损压缩
// 反之使用无损压缩
ArraySegment<byte>? losslessImage = result.TryGet(1)?.TryGetBytes();
ArraySegment<byte>? lossyImage = result.TryGet(2)?.TryGetBytes();
if (image.Size <= 128 * 1024 && losslessImage.HasValue)
{
return losslessImage.Value.ToArray();
}
if (lossyImage.HasValue)
{
return lossyImage.Value.ToArray();
}
throw new BlogCommandException($"Failed to convert {image.File.Name} to webp format: return value is null.");
}
}

View File

@@ -41,14 +41,14 @@ public partial class RendererService(
uint wordCount = GetWordCount(content); uint wordCount = GetWordCount(content);
BlogEssay essay = new() BlogEssay essay = new()
{ {
Title = content.Metadata.Title ?? content.BlogName, Title = content.Metadata.Title ?? content.FileName,
FileName = content.BlogName, FileName = content.FileName,
IsDraft = content.IsDraft, IsDraft = content.IsDraft,
Description = GetDescription(content), Description = GetDescription(content),
WordCount = wordCount, WordCount = wordCount,
ReadTime = CalculateReadTime(wordCount), ReadTime = CalculateReadTime(wordCount),
PublishTime = content.Metadata.Date ?? DateTime.Now, PublishTime = content.Metadata.Date ?? DateTime.Now,
HtmlContent = content.Content HtmlContent = content.FileContent
}; };
if (content.Metadata.Tags is not null) if (content.Metadata.Tags is not null)
@@ -156,17 +156,17 @@ public partial class RendererService(
private string GetDescription(BlogContent content) private string GetDescription(BlogContent content)
{ {
const string delimiter = "<!--more-->"; const string delimiter = "<!--more-->";
int pos = content.Content.IndexOf(delimiter, StringComparison.Ordinal); int pos = content.FileContent.IndexOf(delimiter, StringComparison.Ordinal);
bool breakSentence = false; bool breakSentence = false;
if (pos == -1) if (pos == -1)
{ {
// 自动截取前50个字符 // 自动截取前50个字符
pos = content.Content.Length < 50 ? content.Content.Length : 50; pos = content.FileContent.Length < 50 ? content.FileContent.Length : 50;
breakSentence = true; breakSentence = true;
} }
string rawContent = content.Content[..pos]; string rawContent = content.FileContent[..pos];
MatchCollection matches = DescriptionPattern.Matches(rawContent); MatchCollection matches = DescriptionPattern.Matches(rawContent);
StringBuilder builder = new(); StringBuilder builder = new();
@@ -182,18 +182,18 @@ public partial class RendererService(
string description = builder.ToString(); string description = builder.ToString();
logger.LogDebug("Description of {} is {}.", content.BlogName, logger.LogDebug("Description of {} is {}.", content.FileName,
description); description);
return description; return description;
} }
private uint GetWordCount(BlogContent content) private uint GetWordCount(BlogContent content)
{ {
int count = (from c in content.Content int count = (from c in content.FileContent
where char.IsLetterOrDigit(c) where char.IsLetterOrDigit(c)
select c).Count(); select c).Count();
logger.LogDebug("Word count of {} is {}", content.BlogName, logger.LogDebug("Word count of {} is {}", content.FileName,
count); count);
return (uint)count; return (uint)count;
} }

View File

@@ -1,9 +1,6 @@
<Project Sdk="Microsoft.NET.Sdk.Web"> <Project Sdk="Microsoft.NET.Sdk.Web">
<ItemGroup> <ItemGroup>
<PackageReference Include="ImageFlow.NativeRuntime.ubuntu-x86_64" Version="2.1.0-rc11" Condition="$([MSBuild]::IsOsPlatform('Linux'))"/>
<PackageReference Include="ImageFlow.NativeRuntime.osx-arm64" Version="2.1.0-rc11" Condition="$([MSBuild]::IsOsPlatform('OSX'))"/>
<PackageReference Include="ImageFlow.Net" Version="0.13.2"/>
<PackageReference Include="System.CommandLine" Version="2.0.0-beta4.22272.1"/> <PackageReference Include="System.CommandLine" Version="2.0.0-beta4.22272.1"/>
<PackageReference Include="AngleSharp" Version="1.1.0"/> <PackageReference Include="AngleSharp" Version="1.1.0"/>
<PackageReference Include="Markdig" Version="0.38.0"/> <PackageReference Include="Markdig" Version="0.38.0"/>
@@ -16,7 +13,7 @@
<ImplicitUsings>enable</ImplicitUsings> <ImplicitUsings>enable</ImplicitUsings>
</PropertyGroup> </PropertyGroup>
<Target Name="EnsurePnpmInstalled" BeforeTargets="BeforeBuild"> <Target Name="EnsurePnpmInstalled" BeforeTargets="Build">
<Message Importance="low" Text="Ensure pnpm is installed..."/> <Message Importance="low" Text="Ensure pnpm is installed..."/>
<Exec Command="pnpm --version" ContinueOnError="true"> <Exec Command="pnpm --version" ContinueOnError="true">
<Output TaskParameter="ExitCode" PropertyName="ErrorCode"/> <Output TaskParameter="ExitCode" PropertyName="ErrorCode"/>
@@ -28,13 +25,9 @@
<Exec Command="pnpm install"/> <Exec Command="pnpm install"/>
</Target> </Target>
<Target Name="TailwindGenerate" AfterTargets="EnsurePnpmInstalled" BeforeTargets="BeforeBuild" Condition="'$(_IsPublishing)' == 'yes'"> <Target Name="TailwindGenerate" AfterTargets="EnsurePnpmInstalled">
<Message Importance="normal" Text="Generate css files using tailwind..."/> <Message Importance="normal" Text="Generate css files using tailwind..."/>
<Exec Command="pnpm tailwindcss -i wwwroot/tailwind.css -o $(IntermediateOutputPath)tailwind.g.css"/> <Exec Command="pnpm tailwind -i wwwroot/input.css -o wwwroot/output.css"/>
<ItemGroup>
<Content Include="$(IntermediateOutputPath)tailwind.g.css" Visible="false" TargetPath="wwwroot/tailwind.g.css"/>
</ItemGroup>
</Target> </Target>
</Project> </Project>

View File

@@ -1,15 +1,12 @@
{ {
"name": "yae-blog", "name": "YaeBlog",
"version": "1.0.0", "version": "1.0.0",
"description": "", "description": "",
"scripts": { "scripts": {},
"dev": "tailwindcss -i wwwroot/tailwind.css -o wwwroot/tailwind.g.css -w"
},
"keywords": [], "keywords": [],
"author": "", "author": "",
"license": "ISC", "license": "ISC",
"devDependencies": { "devDependencies": {
"tailwindcss": "^4.0.0", "tailwindcss": "^3.4.16"
"@tailwindcss/cli": "^4.0.0"
} }
} }

1089
YaeBlog/pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,884 @@
---
title: async/await究竟是如何工作的
tags:
- dotnet
- 技术笔记
- 译文
---
### 译者按
如何正确而快速的编写异步运行的代码一直是软件工程界的难题而C#提出的`async/await`范式无疑是探索道路上的先行者。本篇文章便是翻译自.NET开发者博客上一篇名为“How async/await really works in C#”的文章,希望能够让读者在阅读之后明白`async/await`编程范式的前世今生和`.NET`实现方式。另外,.Net开发者中文博客也翻译了[这篇文章](https://devblogs.microsoft.com/dotnet-ch/async-await%e5%9c%a8-c%e8%af%ad%e8%a8%80%e4%b8%ad%e6%98%af%e5%a6%82%e4%bd%95%e5%b7%a5%e4%bd%9c%e7%9a%84/),一并供读者参考。
---
数周前,[.NET开发者博客](https://devblogs.microsoft.com/dotnet/)发布了一篇题为[什么是.NET为什么你应该选择.NET](https://devblogs.microsoft.com/dotnet/why-dotnet/)的文章。文章中从宏观上概览了整个`dotnet`生态系统总结了系统中的各个部分和其中的设计决定文章还承诺在未来推出一系列的深度文章介绍涉及到的方方面面。这篇文章便是这系列文章中的第一篇深入介绍C#和.NET中`async/await`的历史、设计决定和实现细节。
对于`async/await`的支持大约在十年前就提供了。在这段时间里,`async/await`语法大幅改变了编写可扩展.NET代码的方式同时该语法使得在不了解`async/await`工作原理的情况下使用它提供的功能编写异步代码也是十分容易和常见的。以下面的**同步**方法为例:(因为这个方法的调用者在整个操作完成之前、将控制权返回给它之前都不能进行任何操作,所以这个方法被称为**同步**
```csharp
// 将数据同步地从源复制到目的地
public void CopyStreamToStream(Stream source, Stream destination)
{
var buffer = new byte[0x1000];
int numRead;
while ((numRead = source.Read(buffer, 0, buffer.Length)) != 0)
{
destination.Write(buffer, 0, numRead);
}
}
```
在这个方法的基础上,你只需要修改几个关键词、改变几个方法的名称,就可以得到一个**异步**的方法(因为这个方法将很快,往往实在所有的工作完成之前,就会将控制权返回给它的调用者,所以被称作异步方法)。
```csharp
// 将数据异步地从源复制到目的地
public async Task CopyStreamToStreamAsync(Stream source, Stream destination)
{
var buffer = new byte[0x1000];
int numRead;
while ((numRead = await source.ReadAsync(buffer, 0, buffer.Length)) != 0)
{
await destination.WriteAsync(buffer, 0, numRead);
}
}
```
有着几乎相同的语法类似的控制流结构但是现在这个方法在执行过程中不会阻塞有着完全不同的底层执行模型而且C#编译器和核心库帮你完成所有这些复杂的工作
尽管在不了解底层原理的基础上使用这类技术是十分普遍的,但是我们坚持认为了解这些事务的运行原理将会帮助我们更好的利用它们。之于`async/await`,了解这些原理将在你需要深入探究时十分有用,例如当你需要调试一段错误的代码或者优化某段正确运行代码的运行效率时。在这篇文章中,我们将深入了解`async/await`具体如何在语言、编译器和库层面运行,然后你将更好地利用这些优秀的设计。
为了更好的理解这一切,我们将回到没有`async/await`的时代,看看在没有它们的情况下最优秀的异步代码是如何编写的。平心而论,这些代码看上去并不好。
### 原初的历史
回到.NET框架1.0时代,当时流行的异步编程范式是**异步编程模型**“Asynchronous Programming Model”也被称作`APM`范式、`Being/End`范式或者`IAsyncResult`范式。从宏观上来看,这种范式是相当简单的。例如对于一个同步操作`DoStuff`
```csharp
class Handler
{
public int DoStuff(string arg);
}
```
在这种编程模型下会存在两个相关的方法:一个`BeginStuff`方法和一个`EndStuff`方法:
```csharp
class Handler
{
public int DoStuff(string arg);
public IAsyncResult BeginDoStuff(string arg, AsyncCallback? callback, object? state);
public int EndDoStuff(IAsyncResult asyncResult);
}
```
`BeginStuff`方法首先会接受所有`DoStuff`方法会接受的参数,同时其会接受一个`AsyncCallback`回调和一个**不透明**的状态对象`state`,而且这两个参数都可以为空。这个“开始”方法将负责异步操作的初始化,而且如果提供了回调函数,这个函数还会负责在异步操作完成之后调用这个回调函数,因此这个回调函数也常常被称为初始化操作的“下一步”。开始方法还会负责构建一个实现了`IAsyncResult`接口的对象,这个对象中的`AsyncState`属性由可选的`state`参数提供:
```csharp
namespace System
{
public interface IAsyncResult
{
object? AsyncState { get; }
WaitHandle AsyncWaitHandle { get; }
bool IsCompleted { get; }
bool CompletedSynchronously { get; }
}
public delegate void AsyncCallback(IAsyncResult ar);
}
```
这个`IAsynResult`实例将会被开始方法返回,在调用`AsyncCallback`时这个实例也会被传递过去。当准备好使用该异步操作的结果时,调用者也会将这个`IAsyncResult`实例传递给结束方法,同时结束方法也会负责保证这个异步操作完成,如果没有完成该方法就会阻塞代码的运行直到完成。结束方法会返回异步操作的结果,异步操作过程中引发的各种错误和异常也会通过该方法传递出来。因此,对于下面这种同步的操作:
```csharp
try
{
int i = handler.DoStuff(arg);
Use(i);
}
catch (Exception e)
{
... // 在这里处理DoStuff方法和Use方法中引发的各种异常
}
```
可以使用开始/结束方法改写为异步运行的形式:
```csharp
try
{
handler.BeginDoStuff(arg, iar =>
{
try
{
Handler handler = (Handler)iar.AsyncState!;
int i = handler.EndDoStuff(iar);
Use(i);
}
catch (Exception e2)
{
... // 处理从EndDoStuff方法和Use方法中引发的各种异常
}
}, handler);
}
catch (Exception e)
{
... // 处理从同步调用BeginDoStuff方法引发的各种异常
}
```
对于熟悉使用含有回调`API`语言的开发者来说,这样的代码应该会显得相当眼熟。
但是事情在这里变得更加复杂了。例如,这段代码存在“栈堆积”`stack dive`的问题。栈堆积就是代码在重复的调用方法中使得栈越来越深直到发生栈溢出的现象。如果“异步”操作同步完成开始方法将会使同步的调用回调方法这就意味着对于开始方法的调用就会直接调用回调方法。同时考虑到“异步”方法同步完成却是一种非常常见的现象它们只是承诺会异步的完成操作而不是只被允许异步的完成。例如一个对于某个网络操作的异步操作比如读取一个套接字如果你只需要从一次操作中读取少量的数据例如在一次回答中只需要读取少量响应头的数据你可能会直接读取大量数据存储在缓冲区中。相比于每次使用都使用系统调用但是只读取少量的数据你一次读取了大量数据在缓冲区中并在缓冲区失效之前都是从缓冲区中读取这样就减少了需要调用昂贵的系统调用来和套接字交互的次数。像这样的缓冲区可能在你进行任何异步调用之后存在例如第一次操作异步的完成对于缓冲区的填充之后的若干次“异步”操作都不需要同I/O进行任何交互而直接通过与缓冲区的同步交互完成直到缓冲区失效之后再次异步的填充缓冲区。因此当开始方法进行上述的一次调用时开始方法会发现操作同步地完成了因此开始方法同步地调用回调方法。此时你有一个调用了开始方法的栈帧和一个调用了回调方法的栈帧。想想看如果回调方法再次调用了开始方法会发生什么如果开始方法和回调方法都是被同步调用的现在你就会在站上得到多个重复的栈帧如此重复下去直到将栈上的空间耗尽。
这并不是杞人忧天,使用下面这段代码就可以很容易的复现这个问题:
```csharp
using System.Net;
using System.Net.Sockets;
using Socket listener = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp);
listener.Bind(new IPEndPoint(IPAddress.Loopback, 0));
listener.Listen();
using Socket client = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp);
client.Connect(listener.LocalEndPoint!);
using Socket server = listener.Accept();
_ = server.SendAsync(new byte[100_000]);
var mres = new ManualResetEventSlim();
byte[] buffer = new byte[1];
var stream = new NetworkStream(client);
void ReadAgain()
{
stream.BeginRead(buffer, 0, 1, iar =>
{
if (stream.EndRead(iar) != 0)
{
ReadAgain(); // uh oh!
}
else
{
mres.Set();
}
}, null);
};
ReadAgain();
mres.Wait();
```
在代码中我们建立一个简单的客户端套接字和一个简单的服务端套接字并让它们连接。服务端会向客户端发送十万字节的信息,而客户端会使用开始/结束方法尝试去“异步的”接收这些信息(需要注意这样做是十分低效的,在教学实例之外的地方都不应该这样编写代码)。传递给`BeingRead`的回调函数通过调用`EndRead`方法停止读取,如果在读取过程中读取到数据(意味着还没有读取完成),就通过对于本地方法`ReadAgain`的递归调用来再次调用`BeingRead`方法继续读取。值得指出的是,在.NET Core中套接字操作比原来在.NET Framework中的版本快上许多同时如果操作系统可以同步的完成这些操作那么.NET Core中的操作也会同步完成需要注意操作系统内核也有一个缓冲区来完成套接字接收操作。因此运行这段代码就会出现栈溢出。
鉴于这个问题非常容易出现,因此`APM`模型中内建了缓解这个问题的方法。容易想到有两种方法可以缓解这个问题:
1. 不允许`AsyncCallback`被同步调用。如果该回调方法始终都是被异步调用的,即使操作是异步完成的,栈堆叠的方法也就不存在了。但是这样做会降低性能,因为同步完成的操作(或者快到难以注意到的操作)是相当的常见的,强制这些操作的回调排队完成会增加相当可观的开销。
2. 引入一个机制让调用者而不是回调函数在工作异步完成时完成剩余的工作。在这种情况下,我们就避免了引入额外的栈帧,在不增加栈深度的情况下完成了余下的工作。
`APM`模型使用了第二种方法。为了实现这个方法,`IAsyncResult`接口提供了另外两个成员:`IsCompleted``CompletedSynchronusly``IsCompeleted`成员告诉我们操作是否完成,在程序中可以反复检查这个成员直到它从`false`变成`true`。相对的,`CompletedSynchronously`在运行过程中不会变化,(或者它存在一个还未被发现的`bug`会导致这个值变化,笑),这个值的主要作用是判断后续的工作是应该由开始方法的调用者还是`AsyncCallback`来进行。如果`CompletedSynchronously`的值是`false`,说明这个操作是异步进行的,所有后续的工作应该由回调函数来进行处理;毕竟,如果工作是异步完成的,开始方法的调用者不能知道工作是何时完成的(如果开始方法的调用者调用了结束方法,那么结束方法就会阻塞直到工作完成)。反之,如果`CompletedSynchronously`的值是`true`,如果此时使用回调方法处理后续的工作就会引发栈堆叠问题,因为此时回调方法会在栈上比开始它更低的位置上进行后续的操作。因此任何在意栈堆叠问题的实现需要关注`CompletedSynchronously`的值,当为真的时候,让开始方法的调用者处理后续的工作,而回调方法在此时不应处理任何工作。这也是为什么`CompletedSynchronously`的值不能改变——开始方法的调用者和回调方法需要相同的值来保证后续工作在任何情况下都进行且只进行一次。
因此我们之前的`DoStuff`实例就需要被修改为:
```csharp
try
{
IAsyncResult ar = handler.BeginDoStuff(arg, iar =>
{
if (!iar.CompletedSynchronously)
{
try
{
Handler handler = (Handler)iar.AsyncState!;
int i = handler.EndDoStuff(iar);
Use(i);
}
catch (Exception e2)
{
... // handle exceptions from EndDoStuff and Use
}
}
}, handler);
if (ar.CompletedSynchronously)
{
int i = handler.EndDoStuff(ar);
Use(i);
}
}
catch (Exception e)
{
... // handle exceptions that emerge synchronously from BeginDoStuff and possibly EndDoStuff/Use
}
```
这里的代码已经~~显得冗长~~,而且我们还只研究了如何使用这种范式,还没有涉及如何实现这种范式。尽管大部分的开发者并不需要在这些子调用(例如实现`Socket.BeginReceive/EndReceive`这些方法去和操作系统交互),但是很多开发者需要组合这些操作(从一个“较大的”的异步操作调用多个异步操作),而这不仅需要使用其他的开始/结束方法,还需要自行实现你自己的开始/结束方法,这样你才能在其他的地方使用这个操作。同时,你还会注意到在上述的`DoStuff`范例中没有任何的控制流代码。如果需要引入一些控制流代码——即使是一个简单的循环——这也会立刻变成~~抖M才会编写的代码~~,同时也给无数的博客作者提供水`CSDN`的好题材。
所以让我们现在就来写一篇`CSDN`,给出一个完成的实例。在文章的开头我展示了一个`CopyStreamToStream`方法,这个方式会将一个流中的数据复制到另外一个流中(就是`Stream.CopyTo`方法所完成的工作,但是为了说明,让我们假设这个方法并不存在):
```csharp
public void CopyStreamToStream(Stream source, Stream destination)
{
var buffer = new byte[0x1000];
int numRead;
while ((numRead = source.Read(buffer, 0, buffer.Length)) != 0)
{
destination.Write(buffer, 0, numRead);
}
}
```
直白的说,我们只需要不停的从一个流中读取数据然后写入到另外一个流中,直到我们没法从第一个流中读取到任何数据。现在让我们使用`APM`模型使用这个操作的异步模式吧:
```csharp
public IAsyncResult BeginCopyStreamToStream(
Stream source, Stream destination,
AsyncCallback callback, object state)
{
var ar = new MyAsyncResult(state);
var buffer = new byte[0x1000];
Action<IAsyncResult?> readWriteLoop = null!;
readWriteLoop = iar =>
{
try
{
for (bool isRead = iar == null; ; isRead = !isRead)
{
if (isRead)
{
iar = source.BeginRead(buffer, 0, buffer.Length, static readResult =>
{
if (!readResult.CompletedSynchronously)
{
((Action<IAsyncResult?>)readResult.AsyncState!)(readResult);
}
}, readWriteLoop);
if (!iar.CompletedSynchronously)
{
return;
}
}
else
{
int numRead = source.EndRead(iar!);
if (numRead == 0)
{
ar.Complete(null);
callback?.Invoke(ar);
return;
}
iar = destination.BeginWrite(buffer, 0, numRead, writeResult =>
{
if (!writeResult.CompletedSynchronously)
{
try
{
destination.EndWrite(writeResult);
readWriteLoop(null);
}
catch (Exception e2)
{
ar.Complete(e);
callback?.Invoke(ar);
}
}
}, null);
if (!iar.CompletedSynchronously)
{
return;
}
destination.EndWrite(iar);
}
}
}
catch (Exception e)
{
ar.Complete(e);
callback?.Invoke(ar);
}
};
readWriteLoop(null);
return ar;
}
public void EndCopyStreamToStream(IAsyncResult asyncResult)
{
if (asyncResult is not MyAsyncResult ar)
{
throw new ArgumentException(null, nameof(asyncResult));
}
ar.Wait();
}
private sealed class MyAsyncResult : IAsyncResult
{
private bool _completed;
private int _completedSynchronously;
private ManualResetEvent? _event;
private Exception? _error;
public MyAsyncResult(object? state) => AsyncState = state;
public object? AsyncState { get; }
public void Complete(Exception? error)
{
lock (this)
{
_completed = true;
_error = error;
_event?.Set();
}
}
public void Wait()
{
WaitHandle? h = null;
lock (this)
{
if (_completed)
{
if (_error is not null)
{
throw _error;
}
return;
}
h = _event ??= new ManualResetEvent(false);
}
h.WaitOne();
if (_error is not null)
{
throw _error;
}
}
public WaitHandle AsyncWaitHandle
{
get
{
lock (this)
{
return _event ??= new ManualResetEvent(_completed);
}
}
}
public bool CompletedSynchronously
{
get
{
lock (this)
{
if (_completedSynchronously == 0)
{
_completedSynchronously = _completed ? 1 : -1;
}
return _completedSynchronously == 1;
}
}
}
public bool IsCompleted
{
get
{
lock (this)
{
return _completed;
}
}
}
}
```
~~Yowsers~~。即使写完了这些繁文缛节,这实际上仍然不是一个完美的实现。例如,`IAsyncResult`的实现会在每次操作时上锁,而不是在任何可能的时候都使用无锁的实现;异常也是以原始的模型存储,如果使用`ExceptionDispatchInfo`可以让异常在传播的过程中含有调用栈的信息,在每次操作中都分配了大量的空间来存储变量(例如在每次`BeingWrite`调用时都会分配一片空间来存储委托),如此等等。现在想象这就是你每次编写方法时需要做的工作,每次当你需要编写一个可重用的异步方法来使用另外一个异步方法时,你需要自己完成上述所有的工作。而且如果你需要编写使用多个不同的`IAsyncResult`的可重用代码——就像在`async/await`范式中`Task.WhenAll`所完成的那样,难度又上升了一个等级;每个不同操作都会实现并暴露针对相关的`API`,这让编写一套逻辑代码并简单的复用它们也变得不可能(尽管一些库作者可能会通过提供一层针对回调方法的新抽象来方便开发者编写需要访问暴露`API`的回调方法)。
上述这些复杂性也说明只有很少的一部分人尝试过这样编写代码,而且对于这些人来说,`bug`也往往如影随形。而且这并不是一个`APM`范式的黑点,这是所有使用基于回调的异步方法都具有的缺点。我们已经十分习惯现代语言都有的控制流结构所带来的强大和便利,因此使用会破坏这种结构的基于回调的异步方式会带来大量的复杂性也是可以理解的。同时,也没有任何主流的语言提供了更好的替代。
我们需要一种更好的办法,一个既继承了我们在`APM`范式中所学习到所有经验也规避了其所有的各种缺点的方式。一个有趣的点是,`APM`范式只是一种编程范式,运行时、核心库和编译器在使用或者实现这种范式的过程中没有提供任何协助。
### 基于事件的异步范式
在.NET Framework 2.0中提供了一系列的`API`来实现一种不同的异步编程范式,当时设想这种范式的主要应用场景是客户端应用程序。这种基于事件的异步范式,也被称作`EAP`范式,也是以提供一系列成员的方式提供的,包含一个用于初始化异步操作的方式和一个监听异步操作是否完成的事件。因此上述的`DoStuff`示例可能会暴露如下的一系列成员:
```csharp
class Handler
{
public int DoStuff(string arg);
public void DoStuffAsync(string arg, object? userToken);
public event DoStuffEventHandler? DoStuffCompleted;
}
public delegate void DoStuffEventHandler(object sender, DoStuffEventArgs e);
public class DoStuffEventArgs : AsyncCompletedEventArgs
{
public DoStuffEventArgs(int result, Exception? error, bool canceled, object? userToken) :
base(error, canceled, usertoken) => Result = result;
public int Result { get; }
}
```
首先通过`DoStuffCompleted`事件注册需要在完成异步操作时进行的工作然后调用`DoStuff`方法,这个方法将初始化异步操作,一旦异步操作完成,`DoStuffCompleted`事件将会被调用者引发。已经注册的回调方法可以运行剩余的工作,例如验证提供的`userToken`是否是期望的`userToken`,同时我们可以注册多个回调方法在异步操作完成的时候运行。
这个范式确实让一系列用例的编写更好编写,同时也让一系列用例变得更加复杂(例如上述的`CopyStreamToStream`例子)。这种范式的影响范围并不大,只在一次.NET Framework的更新中引入便匆匆地消失了除了留下了一系列为了支持这种范式而实现的`API`,例如:
```csharp
class Handler
{
public int DoStuff(string arg);
public void DoStuffAsync(string arg, object? userToken);
public event DoStuffEventHandler? DoStuffCompleted;
}
public delegate void DoStuffEventHandler(object sender, DoStuffEventArgs e);
public class DoStuffEventArgs : AsyncCompletedEventArgs
{
public DoStuffEventArgs(int result, Exception? error, bool canceled, object? userToken) :
base(error, canceled, usertoken) => Result = result;
public int Result { get; }
}
```
但是这种编程范式确实在`APM`范式所没有注意到的地方前进了一大步,并且这一点还保留到了我们今天所介绍的模型中:[同步上下文](https://github.com/dotnet/runtime/blob/967a59712996c2cdb8ce2f65fb3167afbd8b01f3/src/libraries/System.Private.CoreLib/src/System/Threading/SynchronizationContext.cs#L6) (`SynchronizationContext`)。
同步上下文作为一个对于通用调度器的实现,也是在.NET Framework中引入的。在实践中同步上下文最常用的方法是`Post`,这个方法将一个工作实现传递给上下文所代表的一种调度器。举例来说,一个基础的同步上下文实现是一个线程池`ThreadPool`,因此`Post`方法的典型实现就是`ThreadPool.QueueUserWorkItem`方法,这个方法将让线程池在池中任意的线程上以指定的状态调用指定的委托。然而,同步上下文的巧妙之处不仅在于提供了对于不同调度器的支持,而是提供了一种针对不同的应用模型使用不同调度方法的抽象能力。
考虑像Windows Forms之类的`UI`框架。对于大多数工作在Windows上的`UI`框架来说控件往往关联到一个特定的线程这个线程负责运行一个消息管理中心这个中心用来运行那些需要同控件交互的工作只有这个控件有能力来修改控件任何其他试图同控件进行交互的线程都需要发送消息到这个消息控制中心。Windows Forms通过一系列方法来实现这一点例如`Control.BeingInvoke`,这类方法将会把提供的委托和参数传递给同这个控件相关联的线程来运行。你可以写出如下的代码:
```csharp
private void button1_Click(object sender, EventArgs e)
{
ThreadPool.QueueUserWorkItem(_ =>
{
string message = ComputeMessage();
button1.BeginInvoke(() =>
{
button1.Text = message;
});
});
}
```
这段代码首先将`ComputeMessage`方法交给线程池中的一个线程运行(这样可以保证该方法在运行时`UI`界面不会卡死),当上述工作完成之后,再将一个更新`button1`标签的委托传递给关联到`button1`的线程运行。简单而易于理解。在`WPF`框架中也是类似的逻辑,使用一个被称为`Dispatcher`的类型:
```csharp
private void button1_Click(object sender, RoutedEventArgs e)
{
ThreadPool.QueueUserWorkItem(_ =>
{
string message = ComputeMessage();
button1.Dispatcher.InvokeAsync(() =>
{
button1.Content = message;
});
});
}
```
`.NET MAUI`亦然。但是如果我想将这部分的逻辑封装到一个独立的辅助函数中,例如下面这种:
```csharp
// 调用ComputeMessage然后触发更新逻辑
internal static void ComputeMessageAndInvokeUpdate(Action<string> update) { ... }
```
这样我就可以直接:
```csharp
private void button1_Click(object sender, EventArgs e)
{
ComputeMessageAndInvokeUpdate(message => button1.Text = message);
}
```
但是`ComputerMessageAndInvokeUpdate`应该如何实现才能适配各种类型的应用程序呢?难道需要硬编码所有可能涉及的`UI`框架吗?这就是`SynchronizationContext`大显神威的地方,我们可以这样实现这个方法:
```csharp
internal static void ComputeMessageAndInvokeUpdate(Action<string> update)
{
SynchronizationContext? sc = SynchronizationContext.Current;
ThreadPool.QueueUserWorkItem(_ =>
{
string message = ComputeMessage();
if (sc is not null)
{
sc.Post(_ => update(message), null);
}
else
{
update(message);
}
});
}
```
在这个实现中将`SynchronizationContext`作为同`UI`进行交互的调度器之抽象。任何应用程序模型都需要保证在`SynchronizationContext.Current`属性上注册一个继承了`SynchronizationContext`的类,这个就会完成调度相关的工作。例如在`Windows Forms`中:
```csharp
public sealed class WindowsFormsSynchronizationContext : SynchronizationContext, IDisposable
{
public override void Post(SendOrPostCallback d, object? state) =>
_controlToSendTo?.BeginInvoke(d, new object?[] { state });
...
}
```
`WPF`中有:
```
public sealed class DispatcherSynchronizationContext : SynchronizationContext
{
public override void Post(SendOrPostCallback d, Object state) =>
_dispatcher.BeginInvoke(_priority, d, state);
...
}
```
`ASP.NET`*曾经*也有过一个实现尽管Web框架实际上并不关心是哪个线程在运行指定的工作但是非常关心指定工作和那个请求相关因此该实现主要负责保证多个线程不会在同时访问同一个`HttpContext`
```csharp
internal sealed class AspNetSynchronizationContext : AspNetSynchronizationContextBase
{
public override void Post(SendOrPostCallback callback, Object state) =>
_state.Helper.QueueAsynchronous(() => callback(state));
...
}
```
这个概念也并不局限于像上面的主流应用程序模型。例如在[xunit](https://github.com/xunit/xunit),一个流行的单元测试框架(`.NET`核心代码仓库也使用了)中也实现了需要自定义的`SynchronizationContext`。例如限制同步运行单元测试时同时运行单元测试数量就可以用`SynchroniaztionContext`实现:
```
public class MaxConcurrencySyncContext : SynchronizationContext, IDisposable
{
public override void Post(SendOrPostCallback d, object? state)
{
var context = ExecutionContext.Capture();
workQueue.Enqueue((d, state, context));
workReady.Set();
}
}
```
`MaxConcurrentSyncContext`中的`Post`方法只是将需要完成的工作压入其内部的工作队列中,这样就能够控制同时多少工作能够并行的运行。
那么同步上下文这个概念时如何同基于事件的异步范式关联起来的呢?`EAP`范式和同步上下文都是在同一时间引入的,而`EAP`范式要求当异步操作启动的时候,完成事件需要由当前`SynchronizationContext`进行调度。为了简化这个过程(可能反而引入多余的复杂性),在`System.ComponentModel`命名控件中引入了一些帮助程序,具体来说是`AsyncOperation``AsyncOperationManager`。其中前者是一个由用户提供的状态对象和捕获到的`SynchronizationContext`组成的元组,后者是一个捕获`SynchronizationContext`和创建`AsyncOperation`对象的工厂类。`EAP`范式会在实现中使用上述帮助类,例如`Ping.SendAsync`会首先调用`AsyncOperationManager.CreateOperationi`来捕获同步上下文,然后当异步操作完成的时候调用`AsyncOperation.PostOperationCompleted`方法来调用捕获到的`SynchronizationContext.Post`方法。
`SynchronizationContext`还提供了其他一些后面会用到的小工具。这个类暴露了`OperationStarted``OperationCompleted`两个方法。这个虚方法在基类中的实现都是空的,并不完成任何工作。但是继承其的实现可能会重载这些来了解运行中的操作。`EAP`的实现就会在每个操作开始和结束的时候调用`OperationStarted``OperationCompleted`,来方便可能存在的同步上下文跟踪工作的进度。鉴于在`EAP`范式中启动异步操作的方法往往不会返回任何东西,不能指望可以获得任何帮助你跟踪工作进度的东西,因而可能获得工作进度的同步上下文就显得很有价值了。
综上所说,我们需要一些比`APM`编程范式更好的东西,而`EAP`范式引入了一些新的东西,但是没有解决我们面对的核心问题,我们仍然需要一些更好的东西。
### 进入Task时代
在.NET Framework 4.0中引入了`System.Threading.Tasks.Task`类型。当时`Task`类型还只代表某些异步操作的最终完成(在其他编程框架中可能成称为`promise`或者`future`)。当一个操作开始时,创建一个`Task`来表示这个操作,当这个操作完成之后,操作的结果就会被保存在这个`Task`中。简单而明确。但是`Task`相较于`IAsyncResult`提供的重要特点是其蕴含了一个任务在持续运行的状态。这个特点让你能够随意找到一个`Task`,让它在异步操作完成的时候异步的通知你,而不用你关注任务当前是处在已经完成、没有完成、正在完成等各种状态。为什么这点非常重要?首先想想`APM`范式中存在的两个主要问题:
1. 你需要对每个操作实现一个自定义的`IAsycResult`实现:库中没有任何内置开箱即用的`IAsycResult`实现。
2. 你需要在调用开始方法之前就知道在操作结束的时候需要做什么。这让编写使用任意异步操作的组合代码或者通用运行时非常困难。
相对的,`Task`提供了一个通用的接口让你在启动一个异步操作之后“接触”这个操作,还提供了针对“持续”的抽象,这样你就不需要为启动异步操作的方法提供一个持续性。任何需要进行异步操作的人都可以产生一个`Task`,任何人需要使用异步操作的人都可以使用一个`Task`,在这个过程中不用自定义任何东西,`Task`成为了沟通异步操作的生产者和消费者之间最重要的桥梁。这一点大大改变了.NET框架。
现在让我们深入理解`Task`所带来的重要意义。与其直接去研究错综复杂的`Task`源代码,我们将尝试去实现一个`Task`的简单版本。这不会是一个完善的实现,只会完成基础的功能来让我们更好的理解什么是`Task`,即一个负责协调设置和存储完成信号的数据结构。
开始时`Task`中只有很少的字段:
```csharp
class MyTask
{
private bool _completed;
private Exception? _error;
private Action<MyTask>? _continuation;
private ExecutionContext? _ec;
...
}
```
我们首先需要一个字段告诉我们任务是否完成`_completed`,一个字段存储造成任务执行失败的错误`_error`;如果我们需要实现一个泛型的`MyTask<TResult>`,还需要一个`private TResult _result`字段来存储操作运行完成之后的结果。到目前为止的实现和`IAsyncResult`相关的实现非常类似(当然这不是一个巧合)。`_continuation`字段时实现中最重要的字段。在这个简单的实现中,我们只支持一个简单的后续过程,在真正的`Task`实现中是一个`object`类型的字段,这样既可以是一个独立的后续过程,也可以是一个后续过程的列表。这个委托会在任务完成的时候调用。
让我们继续深入。如上所述,`Task`相较于之前的异步执行模型一个基础的优势是在异步操作开始之后再提供后续需要完成的工作。因此我们需要一个方法来实现这个功能:
```csharp
public void ContinueWith(Action<MyTask> action)
{
lock (this)
{
if (_completed)
{
ThreadPool.QueueUserWorkItem(_ => action(this));
}
else if (_continuation is not null)
{
throw new InvalidOperationException("Unlike Task, this implementation only supports a single continuation.");
}
else
{
_continuation = action;
_ec = ExecutionContext.Capture();
}
}
}
```
如果在调用`ContinueWith`的时候异步操作已经完成,那么就直接将该委托的执行加入执行队列。反之,这个方法就会将存储这个委托,当异步任务完成的时候进行执行(这个方法同时也存储一个被称为`ExecutionContext`的对象,会在后续调用委托的涉及到,我们后续会继续介绍)。
然后我们需要能够在异步过程完成的时候标记任务已经完成。我们将添加两个方法,一个负责标记任务成功完成,一个负责标记任务报错退出。
```csharp
public void SetResult() => Complete(null);
public void SetException(Exception error) => Complete(error);
private void Complete(Exception? error)
{
lock (this)
{
if (_completed)
{
throw new InvalidOperationException("Already completed");
}
_error = error;
_completed = true;
if (_continuation is not null)
{
ThreadPool.QueueUserWorkItem(_ =>
{
if (_ec is not null)
{
ExecutionContext.Run(_ec, _ => _continuation(this), null);
}
else
{
_continuation(this);
}
});
}
}
}
```
我们会存储任何的错误、标记任务已经完成,如果已经注册的任何的后续过程,我们也会引发其进行执行。
最后我们还需要一个方法将在工作中发生的任何传递出来,(如果是泛型类型,还需要将执行结果返回),为了方便某些特定的场景,我们将允许这个方法阻塞直到异步操作完成(通过调用`ContinueWith`注册一个`ManualResetEventSlim`实现)。
```csharp
public void Wait()
{
ManualResetEventSlim? mres = null;
lock (this)
{
if (!_completed)
{
mres = new ManualResetEventSlim();
ContinueWith(_ => mres.Set());
}
}
mres?.Wait();
if (_error is not null)
{
ExceptionDispatchInfo.Throw(_error);
}
}
```
这就是一个基础的`Task`实现。当然需要指出的是实际的`Task`会复杂很多:
- 支持设置任意数量的后续工作;
- 支持配置其的工作行为(例如配置后续工作是应该进入工作队列等待执行还是作为任务完成的一部分同步被调用);
- 支持存储多个错误;
- 支持取消异步操作;
- 一系列的帮助函数(例如`Task.Run`创建一个代表在线程池上运行委托的`Task`)。
但是这些内容中没有什么奥秘,核心工作原理和我们自行实现的是一样的。
你可以会注意到我们自行实现的`MyTask`直接公开了`SetResult/SetException`方法,而`Task`没有;这是因为`Task`是以`internal`声明了上述两个方法,同时`System.Threading.Tasks.TaskCompletionSource`类型负责作为一个独立的`Task`生产者和管理任务的完成。这样做的目的并不是出于技术目的,只是将负责控制完成的方法从消费`Task`的方法中分离出来。这样你就可以通过保留`TaskCompletionSource`对象来控制`Task`的完成,不必担心你创建的`Task`在你不知道的地方被完成。(`CancellationToken``CanellationTokenSource`也是处于同样的设计考虑,`CancellationToken`是一个包装`CancellationTokenSource`的结构,只暴露了和接受消费信号相关的结构而缺少产生一个取消信号的能力,这样就限制只有`CancellationToeknSource`可以产生取消信号。)
当前我们也可以像`Task`一样为我们自己的`MyTask`添加各种工具函数。例如我们添加一个`MyTask.WhenAll`
```csharp
public static MyTask WhenAll(MyTask t1, MyTask t2)
{
var t = new MyTask();
int remaining = 2;
Exception? e = null;
Action<MyTask> continuation = completed =>
{
e ??= completed._error; // just store a single exception for simplicity
if (Interlocked.Decrement(ref remaining) == 0)
{
if (e is not null) t.SetException(e);
else t.SetResult();
}
};
t1.ContinueWith(continuation);
t2.ContinueWith(continuation);
return t;
}
```
然后是一个`MyTask.Run`的示例:
```csharp
public static MyTask Run(Action action)
{
var t = new MyTask();
ThreadPool.QueueUserWorkItem(_ =>
{
try
{
action();
t.SetResult();
}
catch (Exception e)
{
t.SetException(e);
}
});
return t;
}
```
还有一个简单的`MyTask.Delay`
```csharp
public static MyTask Delay(TimeSpan delay)
{
var t = new MyTask();
var timer = new Timer(_ => t.SetResult());
timer.Change(delay, Timeout.InfiniteTimeSpan);
return t;
}
```
`Task`横空出世之后,之前的所有异步编程范式都成为了过去式。任何使用过去的编程范式暴露的异步`API`,现在都提供了返回`Task`的方法。
### 添加Value Task
直到现在,`Task`都是.NET异步编程中的主力军在每次新版本发布或者社区发布的新`API`都会返回`Task`或者`Task<TResult>`。但是,`Task`是一个类,而每次创建一个类是都需要分配一次内存。在大多数情况下,为一个会长期存在的异步操作进行一次内存分配时无关紧要的,并不会操作明显的性能影响。但是正如之前所说的,同步完成的异步操作十分创建。例如,`Stream.ReadAsync`会返回一个`Task<int>`,但是如果是在一个类似与`BufferedStream`的实现上调用该方法,那么你的调用由很大概率就会是同步完成的,因为大多数读取只需要从内存中的缓冲区中读取数据而不需要通过系统调用访问`I/O`。在这种情况下还需要分配一个额外的对象显然是不划算的(而且在`APM`范式中也存在这个问题)。对于返回非泛型类型的方法来说,还可以通过返回一个预先分配的已完成单例来缓解这个问题,而且`Task`也提供了一个`Task.CompletedTask`。但是对于泛型的`Task<TResult>`则不行,因为不可能针对每个不同的`TResult`都创建一个对应的单例。那么我们可以如何让这个同步操作更快呢?
我们可以试图缓存一个常见的`Task<TResult>`。例如`Task<bool>`就非常的常见,而且也只存在两种需要缓存的情况:当结果为真时的一个对象和结果为假时的一个对象。同样的,尽管我们可能不想尝试(也不太可能)去缓存数亿个`Task<int>`对象以覆盖所有可能出现的值,但是鉴于很小的`Int32`值时非常常见的,我们可以尝试去缓存给一些较小的结果,例如从-1到8的结果。 而且对于其他任意的类型来说,`default`就是一个常常出现的值,因此缓存一个结果是`default(TResult)``Task`。而且 在最近的.NET版本中添加了一个称作`Task.FromResult`辅助函数,该函数就会完成与上述类似的工作,如果存在可以重复使用的`Task<Result>`单例就返回该单例,反之再创建一个新的`Task`对象。对于其他常常出现的值也也可以设计方法进行缓存。还是以`Stream.ReadAsync`为例子,这个方法常常会在同一个流上调用多次,而且每次读取的值都是允许读取的字节数量`count`。再考虑到使用者往往只需要读取到这个`count`值,因此`Stream.ReadAsync`操作常常会重复返回有着相同`int`值的`Task`对象。为了避免在这种情况下重复的内存分配,许多`Stream`的实现(例如`MemoryStream`)会缓存上一次成功缓存的`Task<int>`对象,如果下一次读取仍然是同步返回的且返回了相同的数值,该方法就会返回上一次读取创建的`Task<int>`对象。但是仍然会存在许多无法覆盖的其他情况,能不能找到一种更加优雅的解决方案来来避免在异步操作同步完成的时候避免创建新的对象,尤其是在性能非常重要的场景下。
这就是`ValueTask<TResult>`诞生的背景([这篇博客](https://devblogs.microsoft.com/dotnet/understanding-the-whys-whats-and-whens-of-valuetask/)详细测试了`ValueTask<TResult>`的性能)。`ValueTask<TResult>`在诞生之初是`TResult``Task<TResult>`的歧视性联合。在这些争论尘埃落定之后,`ValueTask<TResult>`便不是一个立刻可以返回的结果就是一个对未来结果的承诺:
```csharp
public readonly struct ValueTask<TResult>
{
private readonly Task<TResult>? _task;
private readonly TResult _result;
...
}
```
一个方法可以通过返回`ValueTask<TResult>`来避免在`TResult`已知的情况下创建新的`Task<Result>`对象,当然返回的类型会更大、返回的结果更加不直接。
当然,实际应用中也存在对性能需求相当高的场合,甚至你会想在操作异步完成的时候也避免`Task<TResult>`对象的分配。例如`Socket`作为整个网络栈的最底层,对于网络中的大多数服务来说`SendAsync``ReceiveAsync`都是绝对的热点代码路径,不论是同步操作还是异步操作都是非常常见的(鉴于内核中的缓存,大多数发送请求都会同步完成,部分接受请求会同步完成)。因此对于像`Socket`这类的工具,如果我们可以在异步我弄成和同步完成的情况下都实现无内存分配的调用是十分有意义的。
这就是`System.Threading.Tasks.Sources.IValueTaskSource<TResult>`产生的背景:
```csharp
public interface IValueTaskSource<out TResult>
{
ValueTaskSourceStatus GetStatus(short token);
void OnCompleted(Action<object?> continuation, object? state, short token, ValueTaskSourceOnCompletedFlags flags);
TResult GetResult(short token);
}
```
该接口允许自行为`ValueTask<TResult>`实现一个“背后“的对象,并且让这个对象提供了获得操作结构的`GetResult`方法和设置操作后续工作的`OnCompleted`。在这个接口出现之后,`ValueTask<TResult>`也小小修改了定义,`Task<TResult>? _task`字段被一个`object? _obj`字段替换了:
```csharp
public readonly struct ValueTask<TResult>
{
private readonly object? _obj;
private readonly TResult _result;
...
}
```
现在`_obj`字段就可以存储一个`IValueTaskSource<TReuslt>`对象了。而且相较于`Task<TResult>`在完成之后就只能保持完成的状态,不能变回未完成的状态,`IValueTaskSource<TResult>`的实现有着完全的控制权,可以在已完成和未完成的状态之间双向变化。但是`ValueTask<TResult>`要求一个特定的实例只能被使用一次,不能观察到这个实例在使用之后的任何变化,这也是分析规则[CA2012](https://learn.microsoft.com/dotnet/fundamentals/code-analysis/quality-rules/ca2012)存在的意义。这就让让类似于`Socket`的工具为重复的调用建立一个`IValueTaskSource<TResult>`对象池。从实现上来说,`Socket`会至多缓存两个类似的实例一个用于读取操作一个用于写入操作因为在99.999%的情况下同时只会有一个发送请求和一个接受请求。
值得说明的是我只提到了`ValueTask<TResult>`却没有提到`ValueTask`。因为如果只是为了在操作同步完成的时候避免内存分配,非泛型类型的`ValueTask`指挥提供很少的性能提升,因为在同样的条件下可以使用`Task.CompletedTask`。但是如果要考虑在异步完成的时候通过缓存对象避免内存分配,非泛型类型也有作用。因而,在引入`IValueTaskSource<TResult>`的同时,`IValueTaskSource``ValueTask`也被引入了。
到目前我们,我们已经可以利用`Task``Task<TResult>``ValueTask``ValueTask<TResult>`表示各种各样的异步操作,并注册在操作完成之前和之后注册后续的操作。
但是这些后续操作仍然是回调方法,我们仍然陷入了基于回调的异步控制流程。该怎么办?
### 迭代器成为大救星
解决方案的先声实际上在`Task`诞生之前就出现了在C# 2.0引入迭代器语法的时候。
你可能会问,迭代器就是`IEnumerable<T>`吗?这是其中的一个。迭代器是一个让编译器将你编写的方法自动实现`IEnumerable<T>`或者`IEnumertor<T>`的语法。例如我可以用迭代器语法编写一个产生斐波那契数列的可遍历对象:
```csharp
public static IEnumerable<int> Fib()
{
int prev = 0, next = 1;
yield return prev;
yield return next;
while (true)
{
int sum = prev + next;
yield return sum;
prev = next;
next = sum;
}
}
```
这个方法可以直接用`foreach`遍历,也可以和`System.Linq.Enumerable`中提供的各种方法组合,也可以直接用一个`IEnumerator<T>`对象遍历。
```csharp
foreach (int i in Fib())
{
if (i > 100) break;
Console.Write($"{i} ");
}
```
```csharp
foreach (int i in Fib().Take(12))
{
Console.Write($"{i} ");
}
```
```csharp
using IEnumerator<int> e = Fib().GetEnumerator();
while (e.MoveNext())
{
int i = e.Current;
if (i > 100) break;
Console.Write($"{i} ");
}
```

View File

@@ -1,369 +0,0 @@
---
title: High Performance Computing 25 SP NVIDIA
date: 2025-04-24T19:02:36.1077330+08:00
tags:
- 高性能计算
- 学习资料
---
Fxxk you, NVIDIA!
<!--more-->
CPU/GPU Parallelism:
Moore's Law gives you more and more transistors:
- CPU strategy: make the workload (one compute thread) run as fast as possible.
- GPU strategy: make the workload (as many threads as possible) run as fast as possible.
GPU Architecture:
- Massively Parallel
- Power Efficient
- Memory Bandwidth
- Commercially Viable Parallelism
- Not dependent on large caches for performance
![image-20250424192311202](./hpc-2025-cuda/image-20250424192311202.webp)
## Nvidia GPU Generations
- 2006: G80-based GeForce 8800
- 2008: GT200-based GeForce GTX 280
- 2010: Fermi
- 2012: Kepler
- 2014: Maxwell
- 2016: Pascal
- 2017: Volta
- 2021: Ampere
- 2022: Hopper
- 2024: Blackwell
#### 2006: G80 Terminology
SP: Streaming Processor, scalar ALU for a single CUDA thread
SPA: Stream Processor Array
SM: Streaming Multiprocessor, containing of 8 SP
TPC: Texture Processor Cluster: 2 SM + TEX
![image-20250424192825010](./hpc-2025-cuda/image-20250424192825010.webp)
Design goal: performance per millimeter
For GPUs, performance is throughput, so hide latency with computation not cache.
So this is single instruction multiple thread (SIMT).
**Thread Life Cycle**:
Grid is launched on the SPA and thread blocks are serially distributed to all the SM.
![image-20250424193125125](./hpc-2025-cuda/image-20250424193125125.webp)
**SIMT Thread Execution**:
Groups of 32 threads formed into warps. Threads in the same wraps always executing same instructions. And some threads may become inactive when code path diverges so the hardware **automatically Handles divergence**.
Warps are the primitive unit of scheduling.
> SIMT execution is an implementation choice. As sharing control logic leaves more space for ALUs.
**SM Warp Scheduling**:
SM hardware implements zero-overhead warp scheduling:
- Warps whose next instruction has its operands ready for consumption are eligible for execution.
- Eligible warps are selected for execution on a prioritized scheduling policy.
> If 4 clock cycles needed to dispatch the same instructions for all threads in a warp, and one global memory access is needed for every 4 instructions and memory latency is 200 cycles. So there should be 200 / (4 * 4) =12.5 (13) warps to fully tolerate the memory latency
The SM warp scheduling use scoreboard and similar things.
**Granularity Consideration**:
Consider that int the G80 GPU, one SM can run 768 threads and 8 thread blocks, which is the best tiles to matrix multiplication: 16 * 16 = 256 and in one SM there can be 3 thread block which fully use the threads.
### 2008: GT200 Architecture
![image-20250424195111341](./hpc-2025-cuda/image-20250424195111341.webp)
### 2010: Fermi GF100 GPU
**Fermi SM**:
![image-20250424195221886](./hpc-2025-cuda/image-20250424195221886.webp)
There are 32 cores per SM and 512 cores in total, and introduce 64KB configureable L1/ shared memory.
Decouple internal execution resource and dual issue pipelines to select two warps.
And in Fermi, the debut the Parallel Thread eXecution(PTX) 2.0 ISA.
### 2012 Kepler GK 110
![image-20250424200022880](./hpc-2025-cuda/image-20250424200022880.webp)
### 2014 Maxwell
4 GPCs and 16 SMM.
![image-20250424200330783](./hpc-2025-cuda/image-20250424200330783.webp)
### 2016 Pascal
No thing to pay attention to.
### 2017 Volta
First introduce the tensor core, which is the ASIC to calculate matrix multiplication.
### 2021 Ampere
The GA100 SM:
![image-20250508183446257](./hpc-2025-cuda/image-20250508183446257.webp)
### 2022 Hopper
Introduce the GH200 Grace Hopper Superchip:
![image-20250508183528381](./hpc-2025-cuda/image-20250508183528381.webp)
A system contains a CPU and GPU which is linked by a NVLink technology.
And this system can scale out for machine learning.
![image-20250508183724162](./hpc-2025-cuda/image-20250508183724162.webp)
Memory access across the NVLink:
- GPU to local CPU
- GPU to peer GPU
- GPU to peer CPU
![image-20250508183931464](./hpc-2025-cuda/image-20250508183931464.webp)
These operations can be handled by hardware accelerated memory coherency. Previously, there are separate page table for CPU and GPU but for GPU to access memory in both CPU and GPU, CPU and GPU can use the same page table.
![image-20250508184155087](./hpc-2025-cuda/image-20250508184155087.webp)
### 2025 Blackwell
![image-20250508184455215](./hpc-2025-cuda/image-20250508184455215.webp)
### Compute Capability
The software version to show hardware version features and specifications.
## G80 Memory Hierarchy
### Memory Space
Each thread can
- Read and write per-thread registers.
- Read and write per-thread local memory.
- Read and write pre-block shared memory.
- Read and write pre-grid global memory.
- Read only pre-grid constant memory.
- Read only pre-grid texture memory.
![image-20250508185236920](./hpc-2025-cuda/image-20250508185236920.webp)
Parallel Memory Sharing:
- Local memory is per-thread and mainly for auto variables and register spill.
- Share memory is pre-block which can be used for inter thread communication.
- Global memory is pre-application which can be used for inter grid communication.
### SM Memory Architecture
![image-20250508185812302](./hpc-2025-cuda/image-20250508185812302.webp)
Threads in a block share data and results in memory and shared memory.
Shared memory is dynamically allocated to blocks which is one of the limiting resources.
### SM Register File
Register File(RF): there are 32KB, or 8192 entries, register for each SM in G80 GPU.
The tex pipeline and local/store pipeline can read and write register file.
Registers are dynamically partitioned across all blocks assigned to the SM. Once assigned to a block the register is **not** accessible by threads in other blocks and each thread in the same block only access registers assigned to itself.
For a matrix multiplication example:
- If one thread uses 10 registers and one block has 16x16 threads, each SM can contains three thread blocks as one thread blocks need 16 * 16 * 10 =2,560 registers and 3 * 2560 < 8192.
- But if each thread need 11 registers, one SM can only contains two blocks once as 8192 < 2816 * 3.
More on dynamic partitioning: dynamic partitioning gives more flexibility to compilers and programmers.
1. A smaller number of threads that require many registers each.
2. A large number of threads that require few registers each.
So there is a tradeoff between instruction level parallelism and thread level parallelism.
### Parallel Memory Architecture
In a parallel machine, many threads access memory. So memory is divided into banks to achieve high bandwidth.
Each bank can service one address per cycle. If multiple simultaneous accesses to a bank result in a bank conflict.
Shared memory bank conflicts:
- The fast cases:
- All threads of a half-warp access different banks, there's no back conflict.
- All threads of a half-warp access the identical address ,there is no bank conflict (by broadcasting).
- The slow cases:
- Multiple threads in the same half-warp access the same bank
## Memory in Later Generations
### Fermi Architecture
**Unified Addressing Model** allows local, shared and global memory access using the same address space.
![image-20250508193756274](./hpc-2025-cuda/image-20250508193756274.webp)
**Configurable Caches** allows programmers to configure the size if L1 cache and the shared memory.
The L1 cache works as a counterpart to shared memory:
- Shared memory improves memory access for algorithms with well defined memory access.
- L1 cache improves memory access for irregular algorithms where data addresses are not known before hand.
### Pascal Architecture
**High Bandwidth Memory**: a technology which enables multiple layers of DRAM components to be integrated vertically on the package along with the GPU.
![image-20250508194350572](./hpc-2025-cuda/image-20250508194350572.webp)
**Unified Memory** provides a single and unified virtual address space for accessing all CPU and GPU memory in the system.
And the CUDA system software doesn't need to synchronize all managed memory allocations to the GPU before each kernel launch. This is enabled by **memory page faulting**.
## Advanced GPU Features
### GigaThread
Enable concurrent kernel execution:
![image-20250508195840957](./hpc-2025-cuda/image-20250508195840957.webp)
And provides dual **Streaming Data Transfer** engines to enable streaming data transfer, a.k.a direct memory access.
![image-20250508195938546](./hpc-2025-cuda/image-20250508195938546.webp)
### GPUDirect
![image-20250508200041910](./hpc-2025-cuda/image-20250508200041910.webp)
### GPU Boost
GPU Boost works through real time hardware monitoring as opposed to application based profiles. It attempts to find what is the appropriate GPU frequency and voltage for a given moment in time.
### SMX Architectural Details
Each unit contains four warp schedulers.
Scheduling functions:
- Register scoreboard for long latency operations.
- Inter-warp scheduling decisions.
- Thread block level scheduling.
### Improving Programmability
![image-20250515183524043](./hpc-2025-cuda/image-20250515183524043.webp)
**Dynamic Parallelism**: The ability to launch new grids from the GPU.
And then introduce data-dependent parallelism and dynamic work generation and even batched and nested parallelism.
The cpu controlled work batching:
- CPU program limited by single point of control.
- Can run at most 10s of threads.
- CPU is fully consumed with controlling launches.
![](./hpc-2025-cuda/image-20250515184225475.webp)
Batching via dynamic parallelism:
- Move top-level loops to GPUs.
- Run thousands of independent tasks.
- Release CPU for other work.
![image-20250515184621914](./hpc-2025-cuda/image-20250515184621914.webp)
### Grid Management Unit
![image-20250515184714663](./hpc-2025-cuda/image-20250515184714663.webp)
Fermi Concurrency:
- Up to 16 grids can run at once.
- But CUDA streams multiplex into a single queue.
- Overlap only at stream edge.
Kepler Improved Concurrency:
- Up to 32 grids can run at once.
- One work queue per stream.
- Concurrency at full-stream level.
- No inter-stream dependencies.
It is called as **Hyper-Q**.
Without Hyper-Q:
![image-20250515185019590](./hpc-2025-cuda/image-20250515185019590.webp)
With Hyper-Q:
![image-20250515185034758](./hpc-2025-cuda/image-20250515185034758.webp)
In pascal, **asynchronous concurrent computing** is introduced.
![image-20250515185801775](./hpc-2025-cuda/image-20250515185801775.webp)
### NVLink: High-Speed Node Network
![image-20250515185212184](./hpc-2025-cuda/image-20250515185212184.webp)
> The *consumer* prefix means the product is designed for gamers.
>
> The *big* prefix means the product is designed for HPC.
### Preemption
Pascal can actually preempt at the lowest level, the instruction level.
![image-20250515190244112](./hpc-2025-cuda/image-20250515190244112.webp)
### Tensor Core
Operates on a 4x4 matrix and performs: D = A x B + C.
![image-20250515190507199](./hpc-2025-cuda/image-20250515190507199.webp)
### GPU Multi-Process Scheduling
- Timeslice scheduling: single process throughput optimization.
- Multi process service: multi-process throughput optimization.
How about multi-process time slicing:
![image-20250515190703918](./hpc-2025-cuda/image-20250515190703918.webp)
Volta introduces the multi-process services:
![image-20250515191142384](./hpc-2025-cuda/image-20250515191142384.webp)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,10 +0,0 @@
---
title: High Performance Computing 25 SP Quantum Computing
date: 2025-06-12T19:26:24.6668760+08:00
tags:
- 高性能计算
- 学习资料
---
<!--more-->

View File

@@ -1,239 +0,0 @@
---
title: High Performance Computing 2025 SP Non Stored Program Computing
date: 2025-05-29T18:29:28.6155560+08:00
tags:
- 高性能计算
- 学习资料
---
No Von Neumann Machines.
<!--more-->
## Application Specified Integrated Circuits
As known as **ASIC**, these hardwares can work along and are not von Neumann machines.
No stored program concept:
- Input data come in
- Pass through all circuit gates quickly
- Generate output results immediately
Advantages: performance is better.
Disadvantages: reusability is worse.
> The CPU and GPU are special kinds of ASIC.
Why we need ASIC in computing:
- Alternatives to the Moore'a law.
- High capacity and high speed.
![image-20250605185212740](./hpc-2025-non-stored-program-computing/image-20250605185212740.webp)
### Full Custom ASICs
All mask layers are customized in a full-custom ASICs.
The full-custom ASICs always can offer the highest performance and lowest part cost (smallest die size) for a given design.
A typical example of full-custom ASICs is the CPU.
The advantages and disadvantages of full-custom ASICs is shown below.
| Advantages | Disadvantages |
| ------------------------------------------------------------ | -------------------------------------------------------- |
| Reducing the area | The design process takes a longer time |
| Enhancing the performance | Having more complexity in computer-aided design tool |
| Better ability of integrating with other analog components and other pre-designed components | Requiring higher investment and skilled human resources. |
### Semi Custom ASICs
All the logical cell are predesigned and some or all of the mask layer is customized.
There are two types of semi-custom ASICs:
- Standard cell based ASICs
- Gate-array based ASICs.
The Standard cell based ASICs is also called as **Cell-based ASIC(CBIC)**.
![image-20250815093113115](./hpc-2025-non-stored-program-computing/image-20250815093113115.png)
> The *gate* is used a unit to measure the ability of semiconductor to store logical elements.
The semi-custom ASICs is developed as:
- Programmable Logic Array(PLA)
- Complex Programmable Logical Device(CPLD)
- Programmable Array Logical
- Field Programing Gate Array(FPGA)
#### Programmable Logical Device
An integrated circuit that can be programmed/reprogrammed with a digital logical of a curtain level.
The basic idea of PLD is an array of **AND** gates and an array of **OR** gates. Each input feeds both a non-inverting buffer and an inverting buffer to produce the true and inverted forms of each variable. The AND outputs are called the product lines. Each product line is connected to one of the inputs of each OR gate.
Depending on the structure, the standard PLD can be divided into:
- Read Only Memory(ROM): A fixed array of AND gates and a programmable array of OR gates.
- Programmable Array Logic(PAL): A programmable array of AND gates feeding a fixed array of OR gates.
- Programmable Logic Array(PLA): A programmable array of AND gates feeding a programmable of OR gates.
- Complex Programmable Logic Device(CPLD) and Field Programmable Gate Array(FPGA): complex enough to be called as *architecture*.
![image-20250817183832472](./hpc-2025-non-stored-program-computing/image-20250817183832472.png)
## Field Programming Gate Array
> General speaking, all semiconductor can be considered as a special kind of ASIC. But in practice, we always refer the circuit with a special function as ASIC, a circuit that can change the function as FPGA.
![image-20250612184120333](./hpc-2025-non-stored-program-computing/image-20250612184120333.webp)
### FPGA Architecture
![image-20250817184419856](./hpc-2025-non-stored-program-computing/image-20250817184419856.png)
#### Configurable Logic Block(CLB) Architecture
The CLB consists of:
- Look-up Table(LUT): implements the entries of a logic functions truth table.
And some FPGAs can use the LUTs to implement small random access memory(RAM).
- Carry and Control Logic: Implements fast arithmetic operation(adders/subtractors).
- Memory Elements: configures flip flops/latches (programmable clock edges, set, reset and clock enable). These memory elements usually can be configured as shift-registers.
##### Configuring LUTs
LUT is a ram with data width of 1 bit and the content is programmed at power up. Internal signals connect to control signals of MUXs to select a values of the truth tables for any given input signals.
The below figure shows LUT working:
![image-20250817185111521](./hpc-2025-non-stored-program-computing/image-20250817185111521.png)
The configuration memory holds the output of truth table entries, so that when the FPGA is restarting it will run with the same *program*.
And as the truth table entries are just bits, the program of FPGA is called as **BITSTREAM**, we download a bitstream to an FPGA and all LUTs will be configured using the BITSTREAM to implement the boolean logic.
##### LUT Based Ram
Let the input signal as address, the LUT will be configured as a RAM. Normally, LUT mode performs read operations, the address decoders can generate clock signal to latches for writing operation.
![image-20250817185859510](./hpc-2025-non-stored-program-computing/image-20250817185859510.png)
#### Routing Architecture
The logic blocks are connected to each though programmable routing network. And the routing network provides routing connections among logic blocks and I/O blocks to complete a user-designed circuit.
Horizontal and vertical mesh or wire segments interconnection by programmable switches called programmable interconnect points(PIPs).
![image-20250817192006784](./hpc-2025-non-stored-program-computing/image-20250817192006784.png)
These PIPs are implemented using a transmission gate controlled by a memory bits from the configuration memory.
Several types of PIPs are used in the FPGA:
- Cross-point: connects vertical or horizontal wire segments allowing turns.
- Breakpoint: connects or isolates 2 wire segments.
- Decoded MUX: groups of cross-points connected to a single output configured by n configuration bits.
- Non-decoded MUX: n wire segments each with a configuration bit.
- Compound cross-point: 6 breakpoint PIPs and can isolate two isolated signal nets.
![image-20250817194355228](./hpc-2025-non-stored-program-computing/image-20250817194355228.png)
#### Input/Output Architecture
The I/O pad and surrounding supporting logical and circuitry are referred as input/input cell.
The programmable Input/Output cells consists of three parts:
- Bi-directional buffers
- Routing resources.
- Programmable I/O voltage and current levels.
![image-20250817195139631](./hpc-2025-non-stored-program-computing/image-20250817195139631.png)
#### Fine-grained and Coarse-grained Architecture
The fine-grained architecture:
- Each logic block can implement a very simple function.
- Very efficient in implementing systolic algorithms.
- Has a large number of interconnects per logic block than the functionality they offer.
The coarse-grained architecture:
- Each logic block is relatively packed with more logic.
- Has their logic blocks packed with more functionality.
- Has fewer interconnections which leading to reduce the propagating delays encountered.
#### Interconnect Devices
FPGAs are based on an array of logic modules and uncommitted wires to route signal.
Three types of interconnected devices have been commonly used to connect there wires:
- Static random access memory (SRAM) based
- Anti-fuse based
- EEPROM based
### FPGA Design Flow
![image-20250817195714935](./hpc-2025-non-stored-program-computing/image-20250817195714935.png)
![image-20250817200350750](./hpc-2025-non-stored-program-computing/image-20250817200350750.png)
The FPGA configuration techniques contains:
- Full configuration and read back.
- Partial re-configuration and read back.
- Compressed configuration.
Based on the partially reconfiguration, the runtime reconfiguration is development. The area to be reconfigured is changed based on run-time.
#### Hardware Description Languages(HDL)
There are three languages targeting FPGAs:
- VHDL: VHSIC Hardware Description Language.
- Verilog
- OpenCL
The first two language are typical HDL:
| Verilog | VHDL |
| -------------------------------------- | ------------------------------- |
| Has fixed data types. | Has abstract data types. |
| Relatively easy to learn. | Relatively difficult to learn. |
| Good gate level timing. | Poor level gate timing. |
| Interpreted constructs. | Compiled constructs. |
| Limited design reusability. | Good design reusability. |
| Doesn't support structure replication. | Supports structure replication. |
| Limited design management. | Good design management. |
The OpenCL is not an traditional hardare description language. And OpenCL needs to turn the thread parallelism into hardware parallelism, called **pipeline parallelism**.
The follow figure shows how the OpenCL-FPGA compiler turns an vector adding function into the circuit.
![image-20250829210329225](./hpc-2025-non-stored-program-computing/image-20250829210329225.png)
The compiler generates three stages for this function:
1. In the first stage, two loading units are used.
2. In the second stage, one adding unit is used.
3. In the third stage, one storing unit is used.
Once cycle, the thread `N` is clocked in the first stage, loading values from the array meanwhile, the thread `N - 1` is in the second stage, adding values from the array and the thread `N - 2` is in the third stage, storing value into the target array.
So different from the CPU and GPU, the OpenCL on the FPGA has two levels of parallelism:
- Pipelining
- Replication of the kernels and having them run concurrently.

View File

@@ -1,99 +0,0 @@
---
title: High Performance Computing 2025 SP OpenCL Programming
date: 2025-05-29T18:29:14.8444660+08:00
tags:
- 高性能计算
- 学习资料
---
Open Computing Language.
<!--more-->
OpenCL is Open Computing Language.
- Open, royalty-free standard C-language extension.
- For parallel programming of heterogeneous systems using GPUs, CPUs , CBE, DSP and other processors including embedded mobile devices.
- Managed by Khronos Group.
![image-20250529185915068](./hpc-2025-opencl/image-20250529185915068.webp)
### Anatomy of OpenCL
- Platform Layer APi
- Runtime Api
- Language Specification
### Compilation Model
OpenCL uses dynamic/runtime compilation model like OpenGL.
1. The code is compiled to an IR.
2. The IR is compiled to a machine code for execution.
And in dynamic compilation, *step 1* is done usually once and the IR is stored. The app loads the IR and performs *step 2* during the app runtime.
### Execution Model
OpenCL program is divided into
- Kernel: basic unit of executable code.
- Host: collection of compute kernels and internal functions.
The host program invokes a kernel over an index space called an **NDRange**.
NDRange is *N-Dimensional Range*, and can be a 1, 2, 3-dimensional space.
A single kernel instance at a point of this index space is called **work item**. Work items are further grouped into **work groups**.
### OpenCL Memory Model
![image-20250529191215424](./hpc-2025-opencl/image-20250529191215424.webp)
Multiple distinct address spaces: Address can be collapsed depending on the device's memory subsystem.
Address space:
- Private: private to a work item.
- Local: local to a work group.
- Global: accessible by all work items in all work groups.
- Constant: read only global memory.
> Comparison with CUDA:
>
> ![image-20250529191414250](./hpc-2025-opencl/image-20250529191414250.webp)
Memory region for host and kernel:
![image-20250529191512490](./hpc-2025-opencl/image-20250529191512490.webp)
### Programming Model
#### Data Parallel Programming Model
1. Define N-Dimensional computation domain
2. Work-items can be grouped together as *work group*.
3. Execute multiple work-groups in parallel.
#### Task Parallel Programming Model
> Data parallel execution model must be implemented by all OpenCL computing devices, but task parallel programming is a choice for vendor.
Some computing devices such as CPUs can also execute task-parallel computing kernels.
- Executes as s single work item.
- A computing kernel written in OpenCL.
- A native function.
### OpenCL Framework
![image-20250529192022613](./hpc-2025-opencl/image-20250529192022613.webp)
The basic OpenCL program structure:
![image-20250529192056388](./hpc-2025-opencl/image-20250529192056388.webp)
**Contexts** are used to contain the manage the state of the *world*.
**Command-queue** coordinates execution of the kernels.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,78 +0,0 @@
---
title: High Performance Computing 25 SP Potpourri
date: 2025-06-12T18:45:49.2698190+08:00
tags:
- 高性能计算
- 学习资料
---
Potpourri has a good taste.
<!--more-->
## Heterogeneous System Architecture
![image-20250612185019968](./hpc-2025-potpourri/image-20250612185019968.webp)
The goals of the HSA:
- Enable power efficient performance.
- Improve programmability of heterogeneous processors.
- Increase the portability of code across processors and platforms.
- Increase the pervasiveness of heterogeneous solutions.
### The Runtime Stack
![image-20250612185221643](./hpc-2025-potpourri/image-20250612185221643.webp)
## Accelerated Processing Unit
A processor that combines the CPU and the GPU elements into a single architecture.
![image-20250612185743675](./hpc-2025-potpourri/image-20250612185743675.webp)
## Intel Xeon Phi
The goal:
- Leverage X86 architecture and existing X86 programming models.
- Dedicate much of the silicon to floating point ops.
- Cache coherent.
- Increase floating-point throughput.
- Strip expensive features.
The reality:
- 10s of x86-based cores.
- Very high-bandwidth local GDDR5 memory.
- The card runs a modified embedded Linux.
## Deep Learning: Deep Neural Networks
The network can used as a computer.
## Tensor Processing Unit
A custom ASIC for the phase of Neural Networks (AI accelerator).
### TPUv1 Architecture
![image-20250612191035632](./hpc-2025-potpourri/image-20250612191035632.webp)
### TPUv2 Architecture
![image-20250612191118473](./hpc-2025-potpourri/image-20250612191118473.webp)
Advantages of TPU:
- Allows to make predications very quickly and respond within fraction of a second.
- Accelerate performance of linear computation, key of machine learning applications.
- Minimize the time to accuracy when you train large and complex network models.
Disadvantages of TPU:
- Linear algebra that requires heavy branching or are not computed on the basis of element wise algebra.
- Non-dominated matrix multiplication is not likely to perform well on TPUs.
- Workloads that access memory using sparse technique.
- Workloads that use highly precise arithmetic operations.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,41 +0,0 @@
---
title: High Performance Computing 2025 SP Programming CUDA
date: 2025-05-15T19:13:48.8893010+08:00
tags:
- 高性能计算
- 学习资料
---
Compute Unified Device Architecture
<!--more-->
## CUDA
General purpose programming model:
- Use kicks off batches of threads on the GPU.
![image-20250515195739382](./hpc-2025-program-cuda/image-20250515195739382.webp)
The compiling C with CUDA applications:
![image-20250515195907764](./hpc-2025-program-cuda/image-20250515195907764.webp)
### CUDA APIs
Areas:
- Device management
- Context management
- Memory management
- Code module management
- Execution control
- Texture reference management
- Interoperability with OpenGL and Direct3D
Two APIs:
- A low-level API called the CUDA driver API.
- A higher-level API called the C runtime for CUDA that is implemented on top of the CUDA driver API.

View File

@@ -1,12 +1,10 @@
--- ---
title: 2021年终总结 title: 2021年终总结
date: 2022-01-12T16:27:19.0000000 date: 2022-01-12 16:27:19
tags: tags:
- 杂谈 - 随笔
- 年终总结
--- ---
2021年已经过去2022年已经来临。每每一年开始的时候我都会展开一张纸或者新建一个文档思量着又是一年时光也该同诸大杂志一般写几句意味深长的话语怀念过去的时光也祝福未来的自己。可往往脑海中已是三万字的长篇落在笔头却又是一个字都没有了。 2021年已经过去2022年已经来临。每每一年开始的时候我都会展开一张纸或者新建一个文档思量着又是一年时光也该同诸大杂志一般写几句意味深长的话语怀念过去的时光也祝福未来的自己。可往往脑海中已是三万字的长篇落在笔头却又是一个字都没有了。
如今跨年的时候已经过去朋友圈中已经不见文案的踪影我也该重新提笔细说自己2021年中做过的种种。 如今跨年的时候已经过去朋友圈中已经不见文案的踪影我也该重新提笔细说自己2021年中做过的种种。
@@ -24,7 +22,7 @@ tags:
在前12年的学生生涯中我们都在期待着这一次的暑假以为在这个没有作业的假期里我们就可以充分的享受人间的美好。可是当时我们不知道这人间的烦恼可不止作业这一种无论是突如其来的疫情导致开学延期还是等待录取时的不安。 在前12年的学生生涯中我们都在期待着这一次的暑假以为在这个没有作业的假期里我们就可以充分的享受人间的美好。可是当时我们不知道这人间的烦恼可不止作业这一种无论是突如其来的疫情导致开学延期还是等待录取时的不安。
虽说在暑假时,拥有了自己的笔记本电脑,可是在高中三年屯下的游戏还是没有玩几个,看来我也是“喜加一”的受害者。虽然在高考后入坑了原神,但是假期间我并没有太过投入的玩。 虽说在暑假时,拥有了自己的笔记本电脑,可是在高中三年屯下的游戏还是没有玩几个,看来我也是“喜加一”的受害者。虽然在高考后入坑了原神,但是假期间我并没有太过投入的玩。
暑假下定决心要好好的学一学可是看着我gitee上暑假期间那稀疏的提交我就知道我又摸了一个暑假的鱼。 暑假下定决心要好好的学一学可是看着我gitee上暑假期间那稀疏的提交我就知道我又摸了一个暑假的鱼。
![gitee贡献](./2021-final/1.webp) ![gitee贡献](./2021-final/1.png)
即使我想写的很多项目都没有被扎实的推进下来但是学习的一些的C语言还是让我受益匪浅。 即使我想写的很多项目都没有被扎实的推进下来但是学习的一些的C语言还是让我受益匪浅。
现在看来,这个假期真是,**学也没有学好,耍也没有耍好**的典型。 现在看来,这个假期真是,**学也没有学好,耍也没有耍好**的典型。

BIN
YaeBlog/source/posts/2021-final/1.png (Stored with Git LFS) Normal file

Binary file not shown.

BIN
YaeBlog/source/posts/2021-final/1.webp (Stored with Git LFS)

Binary file not shown.

View File

@@ -1,13 +1,11 @@
--- ---
title: 2022年终总结 title: 2022年终总结
date: 2022-12-30T14:58:12.0000000
tags: tags:
- 杂谈 - 随笔
- 年终总结 date: 2022-12-30 14:58:12
--- ---
2022是困难的一年。我们需要为2023年做好准备。 2022是困难的一年。我们需要为2023年做好准备。
<!--more--> <!--more-->
@@ -58,11 +56,11 @@ tags:
小小的总结一下2022年可以算得上是一事无成的一年还搞砸了不少的事情。在写代码上进展有限成绩上大幅倒退说好的六级英语和大学物理竞赛都没有参加在年末应对疫情进展的时候更是把“不知所措”这个成语诠释的淋漓尽致。 小小的总结一下2022年可以算得上是一事无成的一年还搞砸了不少的事情。在写代码上进展有限成绩上大幅倒退说好的六级英语和大学物理竞赛都没有参加在年末应对疫情进展的时候更是把“不知所措”这个成语诠释的淋漓尽致。
![](./2022-final/2022-12-30-14-26-19-QQ_Image_1672381538441.webp) ![](./2022-final/2022-12-30-14-26-19-QQ_Image_1672381538441.jpg)
关于今年的人际交往和社会关系我愿意用QQ2022年年终总结中的一张截屏来总结这张图片透漏出一种无可救药的悲伤。 关于今年的人际交往和社会关系我愿意用QQ2022年年终总结中的一张截屏来总结这张图片透漏出一种无可救药的悲伤。
![](./2022-final/2022-12-30-14-28-12-QQ_Image_1672381543836.webp) ![](./2022-final/2022-12-30-14-28-12-QQ_Image_1672381543836.jpg)
## 展望 ## 展望

Binary file not shown.

Binary file not shown.

View File

@@ -1,11 +1,11 @@
--- ---
title: 2022年暑假碎碎念 title: 2022年暑假碎碎念
date: 2022-08-22T15:39:13.0000000
tags: tags:
- 杂谈 - 随笔
typora-root-url: 2022-summer-vacation
date: 2022-08-22 15:39:13
--- ---
在8个月的漫长寒假的最后两个月~~也就是俗称的暑假中~~,我都干了些什么? 在8个月的漫长寒假的最后两个月~~也就是俗称的暑假中~~,我都干了些什么?
<!--more--> <!--more-->
@@ -32,7 +32,7 @@ tags:
- 下定决定要参加下一学期的物理竞赛,但是在听了讲座之后直接决定开学再开始学习,~~我知道我在家没法学习,俗称开摆~~ - 下定决定要参加下一学期的物理竞赛,但是在听了讲座之后直接决定开学再开始学习,~~我知道我在家没法学习,俗称开摆~~
- 又捡起了`Blender`,并在[Github](https://github.com/tanjian1998/bupt_minecraft)上找到了伟大的前辈们在`Minecraft`里复刻的老校区,希望能用`Blender`渲染几张图当作桌面。 - 又捡起了`Blender`,并在[Github](https://github.com/tanjian1998/bupt_minecraft)上找到了伟大的前辈们在`Minecraft`里复刻的老校区,希望能用`Blender`渲染几张图当作桌面。
![唯一的一张成品](result1.webp) ![唯一的一张成品](result1.png)
> 在此感谢所有为此付出过汗水的前辈们,让我这个即将搬入老校区的萌新能提前一睹老校区的风采。 > 在此感谢所有为此付出过汗水的前辈们,让我这个即将搬入老校区的萌新能提前一睹老校区的风采。

BIN
YaeBlog/source/posts/2022-summer-vacation/result1.png (Stored with Git LFS) Normal file

Binary file not shown.

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More